max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
vedastr/datasets/lmdb_dataset.py | wu0004in/vedastr | 0 | 12766151 | # modify from clovaai
import random
import re
import lmdb
import six
from PIL import Image
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module
class LmdbDataset(BaseDataset):
def __init__(self, *args, **kwargs):
super(LmdbDataset, self).__init__(*args, **kwargs)
def get_name_list(self):
self.env = lmdb.open(self.root, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode()))
if self.data_filter_off:
self.filtered_index_list = [index + 1 for index in range(nSamples)]
self.samples = nSamples
else:
self.filtered_index_list = []
for index in range(nSamples):
index += 1 # lmdb starts with 1
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
if self.filter(label):
continue
else:
self.filtered_index_list.append(index)
self.samples = len(self.filtered_index_list)
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index = self.filtered_index_list[index]
with self.env.begin(write=False) as txn:
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
img_key = 'image-%09d'.encode() % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('RGB') # for color image
except IOError:
print(f'Corrupted image for {index}')
# make dummy image and dummy label for corrupted image.
img, label = self.__getitem__(random.choice(range(len(self))))
return img, label
if self.transforms:
try:
img, label = self.transforms(img, label)
except:
return self.__getitem__(random.choice(range(len(self))))
if not self.unknown:
out_of_char = f'[^{self.character}]'
label = re.sub(out_of_char, '', label)
return img, label
| 1.984375 | 2 |
GalleryMan/assets/QtHelpers.py | 0xsapphir3/GalleryMan | 9 | 12766152 | from functools import partial
from PyQt5 import QtCore
from PyQt5.QtCore import QParallelAnimationGroup, QPoint, QPropertyAnimation, QRect, QTimer
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QGraphicsOpacityEffect, QHBoxLayout, QLabel, QMainWindow, QPushButton, QScrollArea, QVBoxLayout, QWidget
class Thrower:
"""This function does indeed works on a fixed font size, so this has been excluded for the time being."""
def __init__(self, x, y, window) -> None:
self.x = x - 10
self.y = y
self.window = window
def fade(self, widget):
self.effect = QGraphicsOpacityEffect()
widget.setGraphicsEffect(self.effect)
self.animation = QtCore.QPropertyAnimation(self.effect, b"opacity")
self.animation.setDuration(500)
self.animation.setStartValue(1)
self.animation.setEndValue(0)
return self.animation
def throw(self):
animations = QParallelAnimationGroup()
self.labels = []
self.blurs = QParallelAnimationGroup()
for left, right in zip(
[self.x - 40, self.x + 40, self.x ],
[self.y - 65, self.y - 65, self.y + 35],
):
label = QLabel(self.window)
label.setText(" ")
label.setStyleSheet("background-color: transparent; font-family: SauceCodePro Nerd Font; color: #BF616A")
label.setFixedHeight(200)
animation = QPropertyAnimation(label, b"pos")
animation.setStartValue(QPoint(self.x, self.y))
animation.setEndValue(QPoint(left, right))
animation.setDuration(500)
animations.addAnimation(animation)
blur_animation = self.fade(label)
self.blurs.addAnimation(blur_animation)
label.show()
self.labels.append(label)
q = QPushButton(self.window)
q.clicked.connect(partial(self.start, animations))
q.click()
def start(self, animations: QParallelAnimationGroup):
animations.start()
def callback():
for i in self.labels:
i.hide()
animations.finished.connect(callback)
self.blurs.start()
class QCustomButton:
"""Creates A Push Button With Some Tunings"""
def __init__(self, text, window , setStyle = False , addtext = None) -> None:
self.text = text
self.window = window
self.setStyle = setStyle
def create(self):
self.button = QPushButton(self.window)
if(self.setStyle):
self.button.setStyleSheet('font-size: 80px')
self.button.setFlat(True)
self.button.setText(self.text)
self.button.setCursor(QCursor(Qt.PointingHandCursor))
return self.button
class PopUpMessage:
def new_msg(self , window , msg , duration):
self.window = window
if(type(self.window) != QMainWindow):
self.window = self.window.parent().parent().parent().parent()
try:
self.popup_window.hide()
except:
pass
self.popup_window = QLabel(self.window)
self.popup_window.setFixedWidth(len(msg) * 20)
self.popup_window.setFixedHeight(60)
self.popup_window.setAlignment(Qt.AlignCenter | Qt.AlignCenter)
self.popup_window.setText(msg)
self.popup_window.setStyleSheet(
"""
QLabel{
background-color: #4C566A;
font-size: 20px;
font-family: "Comfortaa"
}
"""
)
self.animation = QPropertyAnimation(self.popup_window, b"pos")
self.animation.setStartValue(QPoint(20, self.window.height() + 100))
self.animation.setEndValue(QPoint(20, self.window.height() - 100))
self.animation.setDuration(duration)
self.popup_window.show()
timer = QTimer(self.window)
timer.timeout.connect(self.remove)
timer.start(2000)
self.start()
return self.popup_window
def start(self):
self.animation.start()
def updateText(self , text):
self.popup_window.setText(text)
def remove(self):
self.an = QPropertyAnimation(self.popup_window, b"pos")
self.an.setStartValue(self.popup_window.pos())
self.an.setEndValue(QPoint(20, 1000))
self.an.setDuration(200)
self.an.start()
self.an.finished.connect(self.popup_window.hide)
class QContinueButton:
def __init__(self , window) -> None:
self.window = window
def start(self , text="Continue"):
self.button = QPushButton(self.window)
self.button.setFlat(True)
self.button.setCursor(QCursor(Qt.PointingHandCursor))
self.layout = QHBoxLayout()
first_text = QLabel(text)
second_text = QLabel(text=" ")
animation = QPropertyAnimation(second_text , b"pos")
animation.setDuration(200)
animation.setStartValue(QPoint(self.button.pos().x() + 110 , self.button.y() + 10))
animation.setEndValue(QPoint(self.button.pos().y() + 130 , self.button.pos().x() + 10))
leave_ani = QPropertyAnimation(second_text , b"pos")
leave_ani.setDuration(200)
leave_ani.setEndValue(QPoint(self.button.pos().x() + 110 , self.button.y() + 10))
leave_ani.setStartValue(QPoint(self.button.pos().y() + 130 , self.button.pos().x() + 10))
self.layout.addWidget(first_text)
self.layout.addWidget(second_text)
self.button.setLayout(self.layout)
onhover = lambda x : animation.start()
leave = lambda x: leave_ani.start()
self.button.enterEvent = onhover
self.button.leaveEvent = leave
return self.button
class Animation:
def movingAnimation(self , widget , endValue , duration):
animation = QPropertyAnimation(widget , b"pos")
animation.setStartValue(widget.pos())
animation.setEndValue(endValue)
animation.setDuration(duration)
return animation
def fadingAnimation(self , widget: QWidget , duration, reverse=False , startValue = 0, endValue = 0):
# opacity = widget.graphicsEffect()
# # if(opacity == None):
opacity = QGraphicsOpacityEffect()
widget.setGraphicsEffect(opacity)
animation = QPropertyAnimation(opacity , b"opacity")
if(not reverse):
animation.setStartValue(1)
animation.setEndValue(endValue)
else:
animation.setStartValue(startValue)
animation.setEndValue(1)
animation.setDuration(duration)
return animation
class QLayoutMaker:
def __init__(self , icons: list[list[str]] , functions: list) -> None:
self.icons = icons
self.functions = functions
def make(self) -> QHBoxLayout:
layout = QHBoxLayout()
i = 0
try:
for icon, icon_color, icon_font_size, icon_family in self.icons:
item = QCustomButton(icon, None).create()
item.setStyleSheet(
"color: {}; font-size: {}px; font-family: {}".format(
icon_color, icon_font_size, icon_family
)
)
item.clicked.connect(self.functions[i])
i += 1
layout.addWidget(item)
except:
pass
return layout
class QSliderMenu(QLabel):
def __init__(self , parent) -> None:
super().__init__(parent)
self.head = parent
self.setProperty("class" , "need")
self.setGeometry(QRect(2000, 0, 400, 1000))
self.show()
layout = QVBoxLayout(self)
self.scrollArea = QScrollArea(self)
layout.addWidget(self.scrollArea)
self.buttons = QWidget(self)
self.buttons.setGeometry(QRect(100, 0, 400, 50))
self.scrollArea.setWidget(self.buttons)
self.second_layout = QVBoxLayout(self.buttons)
self.buttons.setLayout(self.second_layout)
self.setStyleSheet("""QLabel[class="need"] { border: 3px solid #3B4252 }""")
def addMenu(self , name , widget , addAsLayout = False):
childLayout = QVBoxLayout()
if(name != ""):
nameLabel = QLabel()
nameLabel.setText(name)
nameLabel.setFixedHeight(80)
nameLabel.setStyleSheet("color: white; font-size: 20px; font-family: Comfortaa")
childLayout.addWidget(nameLabel)
if(addAsLayout):
childLayout.addLayout(widget)
else:
childLayout.addWidget(widget)
widget.setGeometry(self.geometry())
self.second_layout.addLayout(childLayout)
self.buttons.setFixedHeight(self.buttons.height() + 135)
| 2.625 | 3 |
users/forms.py | arthtyagi/gettit | 6 | 12766153 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'first_name']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'first_name']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
labels = {'image': 'Image'}
widgets = {'image': forms.FileInput()}
| 2.265625 | 2 |
tests/test_gm.py | ghost-devs/GhostMarketContractN3 | 1 | 12766154 | <reponame>ghost-devs/GhostMarketContractN3
from boa3 import constants
from pathlib import Path
from boa3.boa3 import Boa3
from boa3.neo import to_script_hash
from boa3.neo.cryptography import hash160
from boa3.neo.vm.type.String import String
from boa3.neo.smart_contract.VoidType import VoidType
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.TestExecutionException import TestExecutionException
from boa3_test.tests.test_classes.testengine import TestEngine
from boa3.builtin.type import UInt160
from boa3.builtin.interop.iterator import Iterator
from boa3_test.tests.test_classes.TestExecutionException import TestExecutionException
from boa3.neo.core.types.InteropInterface import InteropInterface
CONTRACT_BUILT = False
class TestNEP17(BoaTest):
p = Path(__file__)
GHOST_ROOT = str(p.parents[1])
PRJ_ROOT = str(p.parents[2])
CONTRACT_PATH_JSON = GHOST_ROOT+ '/contracts/NEP17/GhostMarketToken.manifest.json'
CONTRACT_PATH_NEF = GHOST_ROOT + '/contracts/NEP17/GhostMarketToken.nef'
CONTRACT_PATH_PY = GHOST_ROOT + '/contracts/NEP17/GhostMarketToken.py'
TEST_ENGINE_PATH = '/home/merl/source/onblock/n3_gm/neo-devpack-dotnet/src/Neo.TestEngine/bin/Debug/net5.0/'
BOA_PATH = PRJ_ROOT + '/neo3-boa/boa3'
DEPLOYER_ACCOUNT = UInt160(b'\x9c\xa5/\x04"{\<KEY>')
OWNER_SCRIPT_HASH = UInt160(to_script_hash(b'NZcuGiwRu1QscpmCyxj5XwQBUf6sk7dJJN'))
OTHER_ACCOUNT_1 = UInt160(to_script_hash(b'NiNmXL8FjEUEs1nfX9uHFBNaenxDHJtmuB'))
OTHER_ACCOUNT_2 = bytes(range(20))
def print_notif(self, notifications):
print('\n=========================== NOTIFICATIONS START ===========================\n')
for notif in notifications:
print(f"{str(notif.name)}: {str(notif.arguments)}")
print('\n=========================== NOTIFICATIONS END ===========================\n')
def deploy_contract(self, engine):
engine.add_contract(self.CONTRACT_PATH_NEF)
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, '_deploy', None, False,
signer_accounts=[self.OWNER_SCRIPT_HASH])
self.assertEqual(VoidType, result)
def prepare_testengine(self, preprocess=False) -> TestEngine:
engine = TestEngine(self.TEST_ENGINE_PATH)
engine.reset_engine()
return engine
def test_gm_compile(self):
output, manifest = self.compile_and_save(self.CONTRACT_PATH_PY)
self.assertIn('supportedstandards', manifest)
self.assertIsInstance(manifest['supportedstandards'], list)
def test_gm_symbol(self):
engine = self.prepare_testengine()
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'symbol', expected_result_type=str)
self.assertEqual('GM', result)
def test_gm_decimals(self):
engine = self.prepare_testengine()
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'decimals')
self.assertEqual(8, result)
def test_gm_total_supply(self):
total_supply = 100_000_000 * 10 ** 8
engine = self.prepare_testengine()
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'totalSupply')
self.assertEqual(total_supply, result)
def test_gm_total_balance_of(self):
total_supply = 100_000_000 * 10 ** 8
engine = self.prepare_testengine()
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.DEPLOYER_ACCOUNT,
signer_accounts=[self.OWNER_SCRIPT_HASH])
self.print_notif(engine.notifications)
self.assertEqual(total_supply, result)
# should fail when the script length is not 20
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', bytes(10))
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', bytes(30))
self.print_notif(engine.notifications)
def test_gm_total_transfer(self):
transferred_amount = 10 * 10 ** 8 # 10 tokens
engine = self.prepare_testengine()
# should fail if the sender doesn't sign
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
self.OWNER_SCRIPT_HASH, self.OTHER_ACCOUNT_1, transferred_amount, None,
expected_result_type=bool)
self.assertEqual(False, result)
# should fail if the sender doesn't have enough balance
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
self.OTHER_ACCOUNT_1, self.OWNER_SCRIPT_HASH, transferred_amount, None,
signer_accounts=[self.OTHER_ACCOUNT_1],
expected_result_type=bool)
self.assertEqual(False, result)
# should fail when any of the scripts' length is not 20
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
self.OWNER_SCRIPT_HASH, bytes(10), transferred_amount, "")
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
bytes(10), self.OTHER_ACCOUNT_1, transferred_amount, "")
# should fail when the amount is less than 0
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
self.OTHER_ACCOUNT_1, self.OWNER_SCRIPT_HASH, -10, None)
# fire the transfer event when transferring to yourself
balance_before = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.DEPLOYER_ACCOUNT)
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
self.DEPLOYER_ACCOUNT, self.DEPLOYER_ACCOUNT, transferred_amount, None,
signer_accounts=[self.DEPLOYER_ACCOUNT],
expected_result_type=bool)
self.assertEqual(True, result)
transfer_events = engine.get_events('Transfer')
self.assertEqual(2, len(transfer_events))
self.assertEqual(3, len(transfer_events[1].arguments))
sender, receiver, amount = transfer_events[1].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(self.DEPLOYER_ACCOUNT, sender)
self.assertEqual(self.DEPLOYER_ACCOUNT, receiver)
self.assertEqual(transferred_amount, amount)
# transferring to yourself doesn't change the balance
balance_after = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.DEPLOYER_ACCOUNT)
self.assertEqual(balance_before, balance_after)
# does fire the transfer event
balance_sender_before = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.DEPLOYER_ACCOUNT)
balance_receiver_before = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.OTHER_ACCOUNT_1)
result = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'transfer',
self.DEPLOYER_ACCOUNT, self.OTHER_ACCOUNT_1, transferred_amount, None,
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
transfer_events = engine.get_events('Transfer')
self.assertEqual(3, len(transfer_events))
self.assertEqual(3, len(transfer_events[2].arguments))
sender, receiver, amount = transfer_events[2].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(self.DEPLOYER_ACCOUNT, sender)
self.assertEqual(self.OTHER_ACCOUNT_1, receiver)
self.assertEqual(transferred_amount, amount)
# transferring to someone other than yourself does change the balance
balance_sender_after = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.DEPLOYER_ACCOUNT)
balance_receiver_after = self.run_smart_contract(engine, self.CONTRACT_PATH_NEF, 'balanceOf', self.OTHER_ACCOUNT_1)
self.assertEqual(balance_sender_before - transferred_amount, balance_sender_after)
self.assertEqual(balance_receiver_before + transferred_amount, balance_receiver_after)
| 1.625 | 2 |
mayan/apps/sources/management/commands/bulk_upload.py | Dave360-crypto/mayan-edms | 3 | 12766155 | <gh_stars>1-10
from __future__ import absolute_import
from json import loads
from optparse import make_option
import os
import sys
from django.core.management.base import CommandError, LabelCommand
from documents.models import DocumentType
from metadata.api import convert_dict_to_dict_list
from ...models import OutOfProcess
from common.compressed_files import NotACompressedFile
class Command(LabelCommand):
args = '<filename>'
help = 'Upload documents from a compressed file in to the database.'
option_list = LabelCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive',
default=True, help='Do not ask the user for confirmation before '
'starting.'),
make_option('--metadata', action='store', dest='metadata',
help='A metadata dictionary list to apply to the documents.'),
make_option('--document_type', action='store', dest='document_type_name',
help='The document type to apply to the uploaded documents.'),
)
def handle_label(self, label, **options):
if not os.access(label, os.R_OK):
raise CommandError("File '%s' is not readable." % label)
if options['metadata']:
try:
metadata_dict = loads(options['metadata'])
metadata_dict_list = convert_dict_to_dict_list(metadata_dict)
except Exception as exception:
sys.exit('Metadata error: %s' % exception)
else:
metadata_dict_list = None
if options['document_type_name']:
try:
document_type = DocumentType.objects.get(name=options['document_type_name'])
except DocumentType.DoesNotExist:
sys.exit('Unknown document type')
else:
document_type = None
if _confirm(options['interactive']) == 'yes':
print 'Beginning upload...'
if metadata_dict_list:
print 'Using the metadata values:'
for key, value in metadata_dict.items():
print '%s: %s' % (key, value)
if document_type:
print 'Uploaded document will be of type: %s' % options['document_type_name']
source = OutOfProcess()
fd = open(label)
try:
result = source.upload_file(fd, filename=None, use_file_name=False, document_type=document_type, expand=True, metadata_dict_list=metadata_dict_list, user=None, document=None, new_version_data=None, command_line=True)
pass
except NotACompressedFile:
print '%s is not a compressed file.' % label
else:
print 'Finished.'
fd.close()
else:
print 'Cancelled.'
def _confirm(interactive):
if not interactive:
return 'yes'
return raw_input('You have requested to bulk upload a number of documents from a compressed file.\n'
'Are you sure you want to do this?\n'
'Type \'yes\' to continue, or any other value to cancel: ')
| 2.046875 | 2 |
fa_expects/models.py | shimayu22/fa_expects_app | 0 | 12766156 | from django.db import models
from django.core import validators
# Create your models here.
class Players(models.Model):
POSITION_CHOICES = (
('', '選択'),
(1, '投'),
(2, '捕'),
(3, '一'),
(4, '二'),
(5, '三'),
(6, '遊'),
(7, '外'),
)
DOMINANT_HAND_CHOICES = (
('', '選択'),
(1, '右投右打'),
(2, '右投左打'),
(3, '右投両打'),
(4, '左投左打'),
)
NPB_TEAM_CHOICES = (
('', '選択'),
(1, '西武'),
(2, 'ソフトバンク'),
(3, '楽天'),
(4, 'ロッテ'),
(5, '日本ハム'),
(6, 'オリックス'),
(7, '巨人'),
(8, 'DeNA'),
(9, '阪神'),
(10, '広島'),
(11, '中日'),
(12, 'ヤクルト'),
)
name = models.CharField(
verbose_name="選手名",
max_length=10,
)
age = models.PositiveSmallIntegerField(
verbose_name="年齢",
validators=[validators.MinValueValidator(26),validators.MaxValueValidator(60)],
)
position = models.IntegerField(
verbose_name="メインポジション",
choices=POSITION_CHOICES,
default=0,
)
dominant_hand = models.IntegerField(
verbose_name="利き手",
choices=DOMINANT_HAND_CHOICES,
default=0,
)
department = models.IntegerField(
verbose_name="現所属球団",
choices=NPB_TEAM_CHOICES,
default=0,
)
created_at = models.DateTimeField(
verbose_name="登録日",
auto_now_add=True,
)
def __str__(self):
return f'{self.name} : {self.POSITION_CHOICES[self.position][1]} : {self.NPB_TEAM_CHOICES[self.department][1]}'
class Meta:
verbose_name = "選手情報"
verbose_name_plural = "選手情報"
ordering = ['department', 'position']
class RequestedConditions(models.Model):
POSITION_CHOICES = (
(0, 'なし'),
(1, '投手'),
(2, '捕手'),
(3, '一塁手'),
(4, '二塁手'),
(5, '三塁手'),
(6, '遊撃手'),
(7, '外野手'),
)
DOMINANT_HAND_CHOICES = (
(0, 'なし'),
(1, '右投右打'),
(2, '右投左打'),
(3, '右投両打'),
(4, '左投左打'),
)
age = models.PositiveSmallIntegerField(
verbose_name="年齢",
default=0,
)
position = models.IntegerField(
verbose_name="メインポジション",
choices=POSITION_CHOICES,
default=0,
)
dominant_hand = models.IntegerField(
verbose_name="利き手",
choices=DOMINANT_HAND_CHOICES,
default=0,
)
created_at = models.DateTimeField(
verbose_name="登録日",
auto_now_add=True,
)
def __str__(self):
return f'{self.age} : {self.POSITION_CHOICES[self.position][1]} : {self.DOMINANT_HAND_CHOICES[self.dominant_hand][1]}'
class Meta:
verbose_name = "要望"
verbose_name_plural = "要望"
def set_players_condition():
condition = RequestedConditions.objects.latest('pk')
condition_dict = {}
if condition.age > 0:
condition_dict["age__lt"] = condition.age
if condition.position > 0:
condition_dict["position"] = condition.position
if condition.dominant_hand > 0:
condition_dict["dominant_hand"] = condition.dominant_hand
return condition_dict
class FaExpects(models.Model):
PRIORITY_CHOICES = (
('', '選択'),
(1, '第一希望'),
(2, '第二希望'),
(3, '第三希望'),
(4, '第四希望以降'),
)
NPB_TEAM_CHOICES = (
('', '選択'),
(1, '西武'),
(2, 'ソフトバンク'),
(3, '楽天'),
(4, 'ロッテ'),
(5, '日本ハム'),
(6, 'オリックス'),
(7, '巨人'),
(8, 'DeNA'),
(9, '阪神'),
(10, '広島'),
(11, '中日'),
(12, 'ヤクルト'),
)
team = models.IntegerField(
verbose_name="球団",
choices=NPB_TEAM_CHOICES,
default=0,
)
player_id = models.ForeignKey(
Players,
on_delete=models.CASCADE,
verbose_name="選手",
limit_choices_to=set_players_condition,
)
priority = models.IntegerField(
verbose_name="優先度",
choices=PRIORITY_CHOICES,
default=0,
)
created_at = models.DateTimeField(
verbose_name="登録日",
auto_now_add=True,
)
def __str__(self):
return f'{self.player_id} : {self.PRIORITY_CHOICES[self.priority][1]}'
class Meta:
verbose_name = "FA予想"
verbose_name_plural = "FA予想"
ordering = ['priority'] | 2.21875 | 2 |
tests/core/usecase/stream/test_get_streams.py | thepabloaguilar/kamui | 5 | 12766157 | from unittest.mock import Mock
from uuid import uuid4
import pytest
from returns.result import Failure, Result, Success
from kamui.core.entity.source import SourceType
from kamui.core.entity.stream import Stream
from kamui.core.use_case.failure import FailureDetails, BusinessFailureDetails
from kamui.core.use_case.stream.get_streams import FindStreams, GetStreamsUseCase
@pytest.fixture(scope="function")
def find_streams() -> Mock:
return Mock(spec=FindStreams)
@pytest.fixture(scope="function")
def get_streams_use_case(find_streams: Mock) -> GetStreamsUseCase:
return GetStreamsUseCase(find_streams)
def test_should_return_streams_correctly(
get_streams_use_case: GetStreamsUseCase, find_streams: Mock
) -> None:
streams_list = [
Stream(
stream_id=uuid4(),
name="STREAM_ONE",
source_type=SourceType.TOPIC,
source_name="some_topic",
),
Stream(
stream_id=uuid4(),
name="STREAM_TWO",
source_type=SourceType.STREAM,
source_name="STREAM_ONE",
),
]
find_streams.return_value = Success(streams_list)
actual = get_streams_use_case()
find_streams.assert_called_once()
assert isinstance(actual, Result.success_type)
assert isinstance(actual.unwrap(), list)
assert streams_list == actual.unwrap()
def test_should_return_failure_when_find_streams_fails(
get_streams_use_case: GetStreamsUseCase, find_streams: Mock
) -> None:
failure = FailureDetails(reason="TEST_FIND_STREAMS_FAIL")
find_streams.return_value = Failure(failure)
actual = get_streams_use_case()
find_streams.assert_called_once()
assert isinstance(actual, Result.failure_type)
assert isinstance(actual.failure(), BusinessFailureDetails)
assert "NON_BUSINESS_RULE_CAUSE" == actual.failure().reason
assert failure == actual.failure().failure_due
| 2.125 | 2 |
algo-c2d.py | AlgoveraAI/DeFi | 0 | 12766158 | <reponame>AlgoveraAI/DeFi
import sys
def get_input(local=False):
pass
def run_algo(local=False):
filename = get_input(local)
if __name__ == "__main__":
local = (len(sys.argv) == 2 and sys.argv[1] == "local")
run_algo(local) | 2.015625 | 2 |
sdk/webpubsub/azure-messaging-webpubsubservice/azure/messaging/webpubsubservice/aio/_operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 1 | 12766159 | <reponame>vincenttran-msft/azure-sdk-for-python
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, cast
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ..._operations._operations import (
build_add_connection_to_group_request,
build_add_user_to_group_request,
build_close_all_connections_request,
build_close_connection_request,
build_close_group_connections_request,
build_close_user_connections_request,
build_connection_exists_request,
build_get_client_access_token_request,
build_grant_permission_request,
build_group_exists_request,
build_has_permission_request,
build_remove_connection_from_group_request,
build_remove_user_from_all_groups_request,
build_remove_user_from_group_request,
build_revoke_permission_request,
build_send_to_all_request,
build_send_to_connection_request,
build_send_to_group_request,
build_send_to_user_request,
build_user_exists_request,
)
from .._vendor import MixinABC
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WebPubSubServiceClientOperationsMixin(MixinABC):
@distributed_trace_async
async def get_client_access_token(
self,
*,
user_id: Optional[str] = None,
roles: Optional[List[str]] = None,
minutes_to_expire: Optional[int] = 60,
**kwargs: Any
) -> JSON:
"""Generate token for the client to connect Azure Web PubSub service.
Generate token for the client to connect Azure Web PubSub service.
:keyword user_id: User Id. Default value is None.
:paramtype user_id: str
:keyword roles: Roles that the connection with the generated token will have. Default value is
None.
:paramtype roles: list[str]
:keyword minutes_to_expire: The expire time of the generated token. Default value is 60.
:paramtype minutes_to_expire: int
:return: JSON object
:rtype: JSON
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"token": "str" # Optional. The token value for the WebSocket client to
connect to the service.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_client_access_token_request(
hub=self._config.hub,
api_version=api_version,
user_id=user_id,
roles=roles,
minutes_to_expire=minutes_to_expire,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace_async
async def close_all_connections( # pylint: disable=inconsistent-return-statements
self, *, excluded: Optional[List[str]] = None, reason: Optional[str] = None, **kwargs: Any
) -> None:
"""Close the connections in the hub.
Close the connections in the hub.
:keyword excluded: Exclude these connectionIds when closing the connections in the hub. Default
value is None.
:paramtype excluded: list[str]
:keyword reason: The reason closing the client connection. Default value is None.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_close_all_connections_request(
hub=self._config.hub,
api_version=api_version,
excluded=excluded,
reason=reason,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def send_to_all( # pylint: disable=inconsistent-return-statements
self,
message: Union[IO, str],
*,
excluded: Optional[List[str]] = None,
content_type: Optional[str] = "application/json",
**kwargs: Any
) -> None:
"""Broadcast content inside request body to all the connected client connections.
Broadcast content inside request body to all the connected client connections.
:param message: The payload body.
:type message: IO or str
:keyword excluded: Excluded connection Ids. Default value is None.
:paramtype excluded: list[str]
:keyword content_type: Media type of the body sent to the API. Known values are:
"application/json", "application/octet-stream", and "text/plain". Default value is
"application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
_json = None
_content = None
content_type = content_type or ""
if content_type.split(";")[0] in ["application/json"]:
_json = message
elif content_type.split(";")[0] in ["application/octet-stream", "text/plain"]:
_content = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_all_request(
hub=self._config.hub,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
excluded=excluded,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def connection_exists(self, connection_id: str, **kwargs: Any) -> bool:
"""Check if the connection with the given connectionId exists.
Check if the connection with the given connectionId exists.
:param connection_id: The connection Id.
:type connection_id: str
:return: bool
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_connection_exists_request(
hub=self._config.hub,
connection_id=connection_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
@distributed_trace_async
async def close_connection( # pylint: disable=inconsistent-return-statements
self, connection_id: str, *, reason: Optional[str] = None, **kwargs: Any
) -> None:
"""Close the client connection.
Close the client connection.
:param connection_id: Target connection Id.
:type connection_id: str
:keyword reason: The reason closing the client connection. Default value is None.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_close_connection_request(
hub=self._config.hub,
connection_id=connection_id,
api_version=api_version,
reason=reason,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def send_to_connection( # pylint: disable=inconsistent-return-statements
self,
connection_id: str,
message: Union[IO, str],
*,
content_type: Optional[str] = "application/json",
**kwargs: Any
) -> None:
"""Send content inside request body to the specific connection.
Send content inside request body to the specific connection.
:param connection_id: The connection Id.
:type connection_id: str
:param message: The payload body.
:type message: IO or str
:keyword content_type: Media type of the body sent to the API. Known values are:
"application/json", "application/octet-stream", and "text/plain". Default value is
"application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
_json = None
_content = None
content_type = content_type or ""
if content_type.split(";")[0] in ["application/json"]:
_json = message
elif content_type.split(";")[0] in ["application/octet-stream", "text/plain"]:
_content = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_connection_request(
hub=self._config.hub,
connection_id=connection_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def group_exists(self, group: str, **kwargs: Any) -> bool:
"""Check if there are any client connections inside the given group.
Check if there are any client connections inside the given group.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:return: bool
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_group_exists_request(
hub=self._config.hub,
group=group,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
@distributed_trace_async
async def close_group_connections( # pylint: disable=inconsistent-return-statements
self, group: str, *, excluded: Optional[List[str]] = None, reason: Optional[str] = None, **kwargs: Any
) -> None:
"""Close connections in the specific group.
Close connections in the specific group.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:keyword excluded: Exclude these connectionIds when closing the connections in the group.
Default value is None.
:paramtype excluded: list[str]
:keyword reason: The reason closing the client connection. Default value is None.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_close_group_connections_request(
hub=self._config.hub,
group=group,
api_version=api_version,
excluded=excluded,
reason=reason,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def send_to_group( # pylint: disable=inconsistent-return-statements
self,
group: str,
message: Union[IO, str],
*,
excluded: Optional[List[str]] = None,
content_type: Optional[str] = "application/json",
**kwargs: Any
) -> None:
"""Send content inside request body to a group of connections.
Send content inside request body to a group of connections.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param message: The payload body.
:type message: IO or str
:keyword excluded: Excluded connection Ids. Default value is None.
:paramtype excluded: list[str]
:keyword content_type: Media type of the body sent to the API. Known values are:
"application/json", "application/octet-stream", and "text/plain". Default value is
"application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
_json = None
_content = None
content_type = content_type or ""
if content_type.split(";")[0] in ["application/json"]:
_json = message
elif content_type.split(";")[0] in ["application/octet-stream", "text/plain"]:
_content = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_group_request(
hub=self._config.hub,
group=group,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
excluded=excluded,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def add_connection_to_group( # pylint: disable=inconsistent-return-statements
self, group: str, connection_id: str, **kwargs: Any
) -> None:
"""Add a connection to the target group.
Add a connection to the target group.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param connection_id: Target connection Id.
:type connection_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_add_connection_to_group_request(
hub=self._config.hub,
group=group,
connection_id=connection_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def remove_connection_from_group( # pylint: disable=inconsistent-return-statements
self, group: str, connection_id: str, **kwargs: Any
) -> None:
"""Remove a connection from the target group.
Remove a connection from the target group.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param connection_id: Target connection Id.
:type connection_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_remove_connection_from_group_request(
hub=self._config.hub,
group=group,
connection_id=connection_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def user_exists(self, user_id: str, **kwargs: Any) -> bool:
"""Check if there are any client connections connected for the given user.
Check if there are any client connections connected for the given user.
:param user_id: Target user Id.
:type user_id: str
:return: bool
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_user_exists_request(
hub=self._config.hub,
user_id=user_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
@distributed_trace_async
async def close_user_connections( # pylint: disable=inconsistent-return-statements
self, user_id: str, *, excluded: Optional[List[str]] = None, reason: Optional[str] = None, **kwargs: Any
) -> None:
"""Close connections for the specific user.
Close connections for the specific user.
:param user_id: The user Id.
:type user_id: str
:keyword excluded: Exclude these connectionIds when closing the connections for the user.
Default value is None.
:paramtype excluded: list[str]
:keyword reason: The reason closing the client connection. Default value is None.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_close_user_connections_request(
hub=self._config.hub,
user_id=user_id,
api_version=api_version,
excluded=excluded,
reason=reason,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def send_to_user( # pylint: disable=inconsistent-return-statements
self, user_id: str, message: Union[IO, str], *, content_type: Optional[str] = "application/json", **kwargs: Any
) -> None:
"""Send content inside request body to the specific user.
Send content inside request body to the specific user.
:param user_id: The user Id.
:type user_id: str
:param message: The payload body.
:type message: IO or str
:keyword content_type: Media type of the body sent to the API. Known values are:
"application/json", "application/octet-stream", and "text/plain". Default value is
"application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
_json = None
_content = None
content_type = content_type or ""
if content_type.split(";")[0] in ["application/json"]:
_json = message
elif content_type.split(";")[0] in ["application/octet-stream", "text/plain"]:
_content = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_user_request(
hub=self._config.hub,
user_id=user_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def add_user_to_group( # pylint: disable=inconsistent-return-statements
self, group: str, user_id: str, **kwargs: Any
) -> None:
"""Add a user to the target group.
Add a user to the target group.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_add_user_to_group_request(
hub=self._config.hub,
group=group,
user_id=user_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def remove_user_from_group( # pylint: disable=inconsistent-return-statements
self, group: str, user_id: str, **kwargs: Any
) -> None:
"""Remove a user from the target group.
Remove a user from the target group.
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_remove_user_from_group_request(
hub=self._config.hub,
group=group,
user_id=user_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def remove_user_from_all_groups( # pylint: disable=inconsistent-return-statements
self, user_id: str, **kwargs: Any
) -> None:
"""Remove a user from all groups.
Remove a user from all groups.
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_remove_user_from_all_groups_request(
hub=self._config.hub,
user_id=user_id,
api_version=api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def grant_permission( # pylint: disable=inconsistent-return-statements
self, permission: str, connection_id: str, *, target_name: Optional[str] = None, **kwargs: Any
) -> None:
"""Grant permission to the connection.
Grant permission to the connection.
:param permission: The permission: current supported actions are joinLeaveGroup and
sendToGroup. Known values are: "sendToGroup" or "joinLeaveGroup".
:type permission: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword target_name: The meaning of the target depends on the specific permission. For
joinLeaveGroup and sendToGroup, targetName is a required parameter standing for the group name.
Default value is None.
:paramtype target_name: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_grant_permission_request(
hub=self._config.hub,
permission=permission,
connection_id=connection_id,
api_version=api_version,
target_name=target_name,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def revoke_permission( # pylint: disable=inconsistent-return-statements
self, permission: str, connection_id: str, *, target_name: Optional[str] = None, **kwargs: Any
) -> None:
"""Revoke permission for the connection.
Revoke permission for the connection.
:param permission: The permission: current supported actions are joinLeaveGroup and
sendToGroup. Known values are: "sendToGroup" or "joinLeaveGroup".
:type permission: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword target_name: The meaning of the target depends on the specific permission. For
joinLeaveGroup and sendToGroup, targetName is a required parameter standing for the group name.
Default value is None.
:paramtype target_name: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_revoke_permission_request(
hub=self._config.hub,
permission=permission,
connection_id=connection_id,
api_version=api_version,
target_name=target_name,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def has_permission(
self, permission: str, connection_id: str, *, target_name: Optional[str] = None, **kwargs: Any
) -> bool:
"""Check if a connection has permission to the specified action.
Check if a connection has permission to the specified action.
:param permission: The permission: current supported actions are joinLeaveGroup and
sendToGroup. Known values are: "sendToGroup" or "joinLeaveGroup".
:type permission: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword target_name: The meaning of the target depends on the specific permission. For
joinLeaveGroup and sendToGroup, targetName is a required parameter standing for the group name.
Default value is None.
:paramtype target_name: str
:return: bool
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_has_permission_request(
hub=self._config.hub,
permission=permission,
connection_id=connection_id,
api_version=api_version,
target_name=target_name,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
| 1.734375 | 2 |
text_summarize/text_summarize.py | anshul2807/Automation-scripts | 496 | 12766160 | <filename>text_summarize/text_summarize.py
import argparse
from summarizer import Summarizer
def text_summarize(text, **kwargs):
"""
Summarize the given text. Returns the summarize
"""
model = Summarizer()
return model(text, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Summarize the given text')
parser.add_argument('-t', '--text', help="Text to summarize",
type=str)
parser.add_argument('-f', '--file', help="Filename to read text from",
type=str)
parser.add_argument('-r', '--ratio',
help="Given the ratio of the summarized text "
"(default: 0.2)",
type=float, default=0.2)
parser.add_argument('-o', '--output',
help="Given the path to an output file. "
"Otherwise stdout will be used",
type=str, default=None)
args = parser.parse_args()
if not (args.text or args.file):
parser.error("Either --text or --file is required")
if args.text and args.file:
parser.error("The arguments --text and --file are not "
"allowed together")
if args.file:
with open(args.file, 'r') as infile:
text = infile.readlines()
text = "\n".join(text)
if args.text:
text = args.text
summary = text_summarize(text, ratio=args.ratio)
if args.output:
with open(args.output, 'w') as outfile:
outfile.write(summary)
outfile.write("\n")
else:
print(summary)
| 3.984375 | 4 |
src/mave/train.py | goodarzilab/ciberatac | 3 | 12766161 | from apex import amp
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime
import scipy.sparse as sp_sparse
import tables
from itertools import chain
from model import loss_function
from model import VAE
import numpy as np
import os
import pandas as pd
from sklearn.metrics import accuracy_score
# from train_multitask_ccle import read_tsv
import torch
opt_level = 'O1'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def read_tsv(nparpath, genes, outdir, gmtmat, normalize_vals=True):
h5outpath = os.path.join(
outdir, "cellByGeneMatrix.npz")
if "gct" in nparpath:
rnadf = pd.read_csv(
nparpath, sep="\t", index_col=0,
compression="gzip", skiprows=2)
rnadf.drop_duplicates(subset=["Description"], inplace=True)
rnadf = rnadf[rnadf["Description"].isin(genes)]
npar = np.array(rnadf.iloc[:, 1:])
ar_genes = np.array(rnadf["Description"])
barcodes = np.array(rnadf.columns[1:])
else:
rnadf = pd.read_csv(
nparpath, sep="\t", index_col=0,
compression="gzip")
npar = np.array(rnadf)
ar_genes = rnadf.index
barcodes = np.array(rnadf.columns)
# Divide by max
# arsum = np.matrix.sum(npar, axis=0)
if normalize_vals:
arsum = np.apply_along_axis(np.sum, 0, npar)
npar = (npar * 1000) / arsum
_, idx_g1, idx_g2 = np.intersect1d(genes, ar_genes, return_indices=True)
npar = npar[idx_g2, :]
gmtmat = gmtmat[idx_g1, :]
out_genes = genes[idx_g1]
npar = np.transpose(npar)
np.savez_compressed(h5outpath, arr=npar, barcodes=barcodes,
genes=ar_genes)
return npar, barcodes, gmtmat, out_genes
def make_plot_umap(mudf, metadf, outdir, numlvs=10):
metadf.index = metadf["Barcode"]
import umap
import seaborn as sns
mumat = np.array(mudf.iloc[:, :numlvs])
for n_neighbors in [10, 100]:
for min_dist in [0.45]:
adname = "UMAP_dist-{}_nNeigh-{}".format(
min_dist, n_neighbors)
print(adname)
reducer = umap.UMAP(
n_neighbors=n_neighbors,
min_dist=min_dist)
embedding = reducer.fit_transform(mumat)
umap_output = pd.DataFrame(embedding)
umap_output.columns = ["UMAP1", "UMAP2"]
umap_output["CellType"] = list(metadf.loc[mudf.index, "CellType"])
umap_output.index = mudf.index
umap_output.to_csv(
os.path.join(outdir, adname + ".tsv.gz"),
sep="\t", compression="gzip")
sns_plot = sns.relplot(
x="UMAP1", y="UMAP2", hue="CellType", data=umap_output,
height=6, aspect=1.5)
sns_plot.savefig(
os.path.join(outdir, adname + ".pdf"))
sns_plot.savefig(
os.path.join(outdir, adname + ".png"))
def make_args():
metapaths = [
"/scratch/hdd001/home/mkarimza/" +
"ciberAtac/10x/raw/scRNA-seq_10XPBMC" +
"_metadataWithCellType.tsv",
"/scratch/ssd001/home/mkarimza/" +
"data/ciberatac/models/vae202012/" +
"SW480Files/metadata_for_vae_visualization.tsv"]
nparpaths = [
"/scratch/hdd001/home/mkarimza/" +
"ciberAtac/10x/raw/pbmc_unsorted_10k" +
"_filtered_feature_bc_matrix.h5",
"/scratch/hdd001/home/mkarimza/" +
"johnny/A06/10X/outs/" +
"filtered_feature_bc_matrix.h5"]
genepath = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202101/" +\
"scviVersusCustomized/customizedScvi" +\
"FullTrainScaled1000/genes.txt"
gmtpath = "../c3.tft.v7.2.symbols.gmt"
genepath = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202012/" +\
"commonGenes/Genes_passing_40p.txt"
outdir = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202101/" +\
"customScviAppliedOnPbmcAndSw480"
numlvs = 10
os.makedirs(outdir, exist_ok=True)
existingmodelpath = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202101/" +\
"scviVersusCustomized/customized" +\
"ScviFullTrainScaled1000/VAE_10LVS.pt"
use_connections = True
loss_scalers = [1, 1, 1]
predict_celltypes = True
num_celltypes = 11
argslist = [gmtpath, nparpaths, outdir,
numlvs, genepath, metapaths,
existingmodelpath,
use_connections,
loss_scalers,
predict_celltypes,
num_celltypes]
return argslist
def get_matrix_from_h5(filename):
with tables.open_file(filename, 'r') as f:
mat_group = f.get_node(f.root, 'matrix')
barcodes = f.get_node(mat_group, 'barcodes').read()
data = getattr(mat_group, 'data').read()
indices = getattr(mat_group, 'indices').read()
indptr = getattr(mat_group, 'indptr').read()
shape = getattr(mat_group, 'shape').read()
matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)
feature_ref = {}
feature_group = f.get_node(mat_group, 'features')
feature_ids = getattr(feature_group, 'id').read()
feature_names = getattr(feature_group, 'name').read()
feature_types = getattr(feature_group, 'feature_type').read()
feature_ref['id'] = feature_ids
feature_ref['name'] = feature_names
feature_ref['feature_type'] = feature_types
tag_keys = getattr(feature_group, '_all_tag_keys').read()
for key in tag_keys:
feature_ref[key] = getattr(feature_group, key.decode()).read()
return feature_ref, barcodes, matrix
def read_npz(nparpath, genes, outdir, gmtmat):
h5outpath = os.path.join(
outdir, "cellByGeneMatrix.npz")
npobj = np.load(nparpath, allow_pickle=True)
npar = npobj["arr"]
if npar.shape[0] > npar.shape[1]:
npar = np.transpose(npar)
ar_genes = npobj["rows"]
barcodes = npobj["cols"]
_, idx_g1, idx_g2 = np.intersect1d(genes, ar_genes, return_indices=True)
# arsum = np.matrix.sum(npar, axis=0)
# arsum = np.apply_along_axis(np.sum, 0, npar)
npar = npar[:, idx_g2]
gmtmat = gmtmat[idx_g1, :]
out_genes = genes[idx_g1]
np.savez_compressed(h5outpath, arr=npar, barcodes=barcodes)
return npar, barcodes, gmtmat, out_genes
def read_h5(h5path, genes, outdir, gmtmat):
h5outpath = os.path.join(
outdir, "cellByGeneMatrix.npz")
# Must be in form of filtered feature matrix
feature_ref, barcodes, matrix = get_matrix_from_h5(h5path)
# Limit the array to gene expression
idx_gexp = np.where(
np.array(feature_ref["feature_type"] == b'Gene Expression'))[0]
npar = matrix.toarray()
npar = np.transpose(npar[idx_gexp, :])
# Normalize npar by dividing by sum of the reads then multiplying by 1000)
# arsum = np.apply_along_axis(np.sum, 0, npar)
# arsum2d = np.zeros((1, npar.shape[1]))
# arsum2d[0, :] = arsum
# npar_scaled = (npar / arsum) * 1000
# tmat = np.transpose(npar_scaled)
expar = np.zeros((len(barcodes), len(genes)), dtype=float)
gene_names = np.array(
feature_ref["name"], dtype="|U64")
_, idx_g1, idx_g2 = np.intersect1d(genes, gene_names, return_indices=True)
expar[:, idx_g1] = npar[:, idx_g2]
np.savez_compressed(h5outpath, arr=npar, barcodes=barcodes, genes=genes)
# return npar, barcodes
return expar, barcodes, gmtmat, genes
def get_genes_from_txt(genepath):
select_genes = np.loadtxt(genepath, dtype="|U64")
return select_genes
def make_gmtmat(gmtpath, outdir, genepath):
gmtoutpath = os.path.join(
outdir, "gmt_conv_matrix.npz")
if os.path.exists(gmtoutpath):
npobj = np.load(gmtoutpath)
npar = npobj["arr"]
all_tfs = npobj["tfs"]
all_genes = npobj["genes"]
return npar, all_tfs, all_genes
gmtdict = {}
with open(gmtpath, "r") as gmtlink:
for gmtline in gmtlink:
gmtlist = gmtline.rstrip().split("\t")
gmtdict[gmtlist[0]] = gmtlist[2:]
all_tfs = np.array(list(gmtdict.keys()))
all_tfs = np.sort(all_tfs)
all_genes = list(gmtdict.values())
all_genes = list(chain.from_iterable(all_genes))
all_genes = np.unique(all_genes)
if genepath != "NA" and os.path.exists(genepath):
select_genes = get_genes_from_txt(genepath)
print("Limiting to {} genes found in {}".format(
len(select_genes), genepath))
all_genes = np.intersect1d(all_genes, select_genes)
print("Found {} TFs and {} genes in {}".format(
len(all_tfs), len(all_genes),
gmtpath))
npar = np.zeros((len(all_genes), len(all_tfs)), dtype=bool)
for tf in all_tfs:
idx_tf = np.where(all_tfs == tf)[0]
genes = gmtdict[tf]
# add index and +1 for the array
for gene in genes:
idx_gene = np.where(all_genes == gene)[0]
npar[idx_gene, idx_tf] = True
if idx_tf % 100 == 0:
print("{}/{} TFs added".format(idx_tf[0], len(all_tfs)))
np.savez_compressed(
gmtoutpath, arr=npar, tfs=all_tfs, genes=all_genes)
return npar, all_tfs, all_genes
def get_n_params(model):
pp = 0
for p in list(model.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
def get_paths(outdir, numlvs):
try:
job_id = os.environ["SLURM_JOB_ID"]
except Exception:
job_id = "NA"
logdir = os.path.join(outdir, "logs")
os.makedirs(logdir, exist_ok=True)
modelpath = os.path.join(
outdir, "VAE_{}LVS.pt".format(numlvs))
chkdir = os.path.join(
"/checkpoint/mkarimza",
job_id)
if not os.path.exists(chkdir):
chkdir = os.path.join(
logdir, "checkpoint")
os.makedirs(chkdir, exist_ok=True)
chkpath = os.path.join(
chkdir, "VAE_{}LVS.pt".format(numlvs))
return logdir, modelpath, chkpath
def train_model(vae, optimizer, MINIBATCH, MAXEPOCH, expar, logdir,
modelpath, chkpath, one_hot_ct_encoding,
loss_scalers, predict_celltypes,
celltypes=[], batch_idxs=None):
criterion_class = torch.nn.CrossEntropyLoss()
time_str = str(datetime.now())
time_str = time_str.replace(" ", "_")
time_str = time_str.replace(":", "0")
logpath = os.path.join(
logdir,
"training.log.{}.{}".format(
os.environ["SLURM_JOB_ID"], time_str))
accpath = logpath + "_accuracy.txt"
loglink = open(logpath, "w")
# header = ["Epoch", "Training.Loss", "MiniBatch.ID", "Time.Stamp"]
header = ["Epoch", "Reconstruction.Loss", "KLD",
"CE.Loss", "Accuracy", "MiniBatch.ID",
"Time.Stamp"]
loglink.write("\t".join(header) + "\n")
loglink.close()
if predict_celltypes:
acclink = open(accpath, "w")
header_acc = ["Epoch"]
for celltype in celltypes:
header_acc.append(celltype + ".acc")
acclink.write("\t".join(header_acc) + "\n")
acclink.close()
TOTBATCHIDX = int(expar.shape[0] / MINIBATCH)
# loss_scalers = np.array([300, 1, 1])
sampled_idxs = np.random.choice(
np.arange(expar.shape[0]), expar.shape[0], replace=False)
for epoch in range(MAXEPOCH):
running_loss_reconst = 0
running_kld = 0
running_ce = 0
running_loss = 0
accval = 0
celltype_resps = np.zeros(
(expar.shape[0]))
celltype_preds = np.zeros(
(expar.shape[0]))
for idxbatch in range(TOTBATCHIDX):
idxbatch_st = idxbatch * MINIBATCH
idxbatch_end = (idxbatch + 1) * MINIBATCH
if idxbatch_end > expar.shape[0]:
idxbatch_end = expar.shape[0]
cur_sidxs = sampled_idxs[idxbatch_st:idxbatch_end]
train1 = torch.from_numpy(
expar[cur_sidxs, :]).to(device).float()
if batch_idxs is not None:
batch_idxs_tensor = torch.from_numpy(
batch_idxs[cur_sidxs]).long().to(device).reshape(
-1, 1)
local_l_mean = np.mean(
np.apply_along_axis(
np.sum, 1, expar[cur_sidxs, :]))
local_l_var = np.var(
np.apply_along_axis(
np.sum, 1, expar[cur_sidxs, :]))
if batch_idxs is None:
outdict = vae(train1)
else:
outdict = vae(train1, batch_idxs_tensor)
ct_pred = outdict["ctpred"]
loss_1, loss_2 = loss_function(
outdict['qz_m'], outdict['qz_v'], train1,
outdict['px_rate'], outdict['px_r'],
outdict['px_dropout'], outdict['ql_m'],
outdict['ql_v'], True,
local_l_mean, local_l_var)
loss_1 = torch.mean(loss_1)
loss_2 = torch.mean(loss_2)
optimizer.zero_grad()
if predict_celltypes:
one_hot_resp = torch.max(
one_hot_ct_encoding[cur_sidxs],
1)[1].to(device).long()
one_hot_pred = torch.max(
ct_pred, 1)[1]
celltype_resps[cur_sidxs] = \
one_hot_resp.detach().cpu().numpy()
celltype_preds[cur_sidxs] = \
one_hot_pred.detach().cpu().numpy()
adacc = accuracy_score(
one_hot_resp.detach().cpu().numpy(),
one_hot_pred.detach().cpu().numpy())
accval += adacc
loss_3 = criterion_class(
ct_pred, one_hot_resp)
else:
loss_3 = 0
if idxbatch == 0:
print(loss_1, loss_2, loss_3)
if idxbatch == -1 and epoch % 25 == 0:
loss_scalers = np.array(
[loss_1.detach().cpu().numpy(),
loss_2.detach().cpu().numpy(),
loss_3.detach().cpu().numpy()])
if np.min(loss_scalers) < 0:
if loss_2 < 0:
loss_2 = loss_2 * -1
else:
raise ValueError("One of the losses are negative")
print(loss_1)
print(loss_2)
print(loss_3)
loss_scalers = loss_scalers / np.min(loss_scalers)
loss = (loss_1 / torch.tensor(loss_scalers[0])) + (
loss_2 / torch.tensor(loss_scalers[1])) + (
loss_3 / torch.tensor(loss_scalers[2]))
if idxbatch == 0:
print(loss)
if torch.isnan(loss):
print("Losses: {} {} {}".format(loss_1, loss_2, loss_3))
raise ValueError("NA occured in loss")
# print(loss)
if torch.cuda.is_available():
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
running_loss_reconst += (loss_1 / loss_scalers[0])
running_kld += (loss_2 / loss_scalers[1])
running_ce += (loss_3 / loss_scalers[2])
running_loss += loss
del train1, outdict
# del one_hot_temp
if torch.cuda.is_available():
torch.cuda.empty_cache()
cur_loss = running_loss / TOTBATCHIDX
cur_loss_reconst = running_loss_reconst / TOTBATCHIDX
cur_kld = running_kld / TOTBATCHIDX
cur_ce = running_ce / TOTBATCHIDX
accval = accval / TOTBATCHIDX
adlist_cts = [str(epoch)]
for k in range(len(celltypes)):
pred_cell = celltype_preds == k
resp_cell = celltype_resps == k
cur_acc = accuracy_score(
resp_cell, pred_cell)
adlist_cts.append(str(round(cur_acc, 3)))
if predict_celltypes:
with open(accpath, "a+") as acclink:
acclink.write("\t".join(adlist_cts) + "\n")
print("Epoch {}, Loss {} at {}".format(
epoch, cur_loss.item(), datetime.now()))
with open(logpath, "a+") as loglink:
adlist = [str(epoch), str(cur_loss_reconst.item()),
str(cur_kld.item()), str(cur_ce.item()),
str(round(accval, 3)),
str(idxbatch), str(datetime.now())]
# adlist = [str(epoch), str(cur_loss.item()),
# str(idxbatch), str(datetime.now())]
loglink.write("\t".join(adlist) + "\n")
if epoch % 10 == 0:
checkpoint = {
'model': vae.state_dict(),
'optimizer': optimizer.state_dict(),
}
if torch.cuda.is_available():
checkpoint["amp"] = amp.state_dict()
for eachpath in [modelpath, chkpath]:
torch.save(checkpoint, eachpath)
return vae
def make_labels(metapath, expar, barcodes):
if "S" in str(barcodes.dtype):
barcodes = np.array(barcodes, dtype="|U64")
metadf = pd.read_csv(metapath, sep="\t", index_col=0)
if "CellType" not in metadf.columns:
if "Site_Primary" in metadf.columns:
metadf["CellType"] = metadf["Site_Primary"]
metadf["Barcode"] = metadf.index
classes = np.unique(metadf["CellType"])
classes = np.array(
[each for each in classes if "Not" not in each])
classes = np.array(
[each for each in classes if "nan" not in each])
metadf = metadf[metadf["CellType"].isin(classes)]
metadf = metadf[metadf["Barcode"].isin(barcodes)]
new_barcodes, idx_1, idx_2 = np.intersect1d(
barcodes, np.array(metadf["Barcode"]),
return_indices=True)
outar = expar[idx_1, :]
outdf = metadf.iloc[idx_2, :]
out_barcodes = np.array(barcodes, dtype="|U64")[idx_1]
one_hot_ct_encoding = pd.get_dummies(outdf["CellType"])
one_hot_tensor = torch.from_numpy(np.array(one_hot_ct_encoding))
return outar, outdf, out_barcodes, one_hot_tensor
def load_npar(nparpath, genes, outdir, gmtmat,
metapath):
if ".npz" in nparpath:
expar, barcodes, gmtmat, genes = read_npz(
nparpath, genes, outdir, gmtmat)
list_temp = make_labels(metapath, expar, barcodes)
elif ".gct" in nparpath or ".tsv" in nparpath:
expar, barcodes, gmtmat, genes = read_tsv(
nparpath, genes, outdir, gmtmat, False)
from train_multitask_ccle import make_labels as tmp_fnc
list_temp = tmp_fnc(
metapath, expar, barcodes)
elif ".h5" in nparpath:
expar, barcodes, gmtmat, genes = read_h5(
nparpath, genes, outdir, gmtmat)
list_temp = make_labels(metapath, expar, barcodes)
expar, metadf, barcodes, _ = list_temp
return expar, metadf, barcodes, genes, gmtmat
def filter_by_var(expar, genes, gmtmat, num_genes):
vars_genes = np.apply_along_axis(np.var, 0, expar)
idx_sorted = np.argsort(vars_genes)[::-1]
newexp = expar[:, idx_sorted[:num_genes]]
newgenes = genes[idx_sorted[:num_genes]]
gmtmat_new = gmtmat[idx_sorted[:num_genes], :]
return newexp, newgenes, gmtmat_new
def intersect_lists(genes_list):
genes = np.intersect1d(genes_list[0], genes_list[1])
for i in range(2, len(genes_list)):
genes = np.intersect1d(genes, genes_list[i])
return genes
def load_inputs(nparpaths, gmtmat, outdir,
genes, metapaths, filter_var=False,
num_genes=2000):
GMTMAT = gmtmat
gmtmat_genes = genes
metadf_list = []
expar_list = []
barcodes_list = []
genes_list = []
celltypes_list = []
num_barcodes = 0
for i in range(len(nparpaths)):
print("Loading {}".format(nparpaths[i]))
expar, metadf, barcodes, genes, gmtmat = load_npar(
nparpaths[i], genes, outdir, gmtmat, metapaths[i])
expar_list.append(expar)
barcodes_list.append(barcodes)
celltypes_list.append(
np.array(metadf["CellType"], dtype="|U64"))
addf = pd.DataFrame(
dict(OriginalBarcode=barcodes, CellType=celltypes_list[-1]))
addf["Dataset"] = "File.{}.".format(i + 1)
addf["Barcode"] = addf["Dataset"] + addf["OriginalBarcode"]
addf["Batch.Index"] = i
metadf_list.append(addf)
genes_list.append(genes)
num_barcodes += len(barcodes)
metadf = pd.concat(metadf_list)
metadf.index = metadf["Barcode"]
if len(genes_list) > 1:
genes = intersect_lists(genes_list)
else:
genes = genes_list[0]
# Filter gmtmat
_, idx_1, idx_2 = np.intersect1d(gmtmat_genes, genes, return_indices=True)
# gmtmat = gmtmat[idx_1, :]
gmtmat = GMTMAT[idx_1, :]
npar = np.zeros((num_barcodes, len(genes)), dtype=int)
i_st = 0
i_end = 0
for k in range(len(expar_list)):
cur_genes = genes_list[k]
expar = expar_list[k]
shared_genes, idx_1, idx_2 = np.intersect1d(
genes, cur_genes, return_indices=True)
i_end = i_st + expar.shape[0]
npar[i_st:i_end, idx_1] = expar[:, idx_2]
i_st = i_end
if filter_var:
print("Filtering by variance")
npar, genes, gmtmat = filter_by_var(
npar, genes, gmtmat, num_genes)
one_hot_ct_encoding = pd.get_dummies(metadf["CellType"])
one_hot_tensor = torch.from_numpy(np.array(one_hot_ct_encoding))
out_dict = dict(
expar=npar,
metadf=metadf,
barcodes=np.array(metadf["Barcode"]),
genes=genes,
gmtmat=gmtmat,
cellTypes=np.array(celltypes_list),
batch_idx=np.array(metadf["Batch.Index"]),
one_hot=one_hot_tensor)
return out_dict
def main(gmtpath, nparpaths, outdir, numlvs, metapaths,
dont_train=False, genepath="NA", existingmodelpath="NA",
use_connections=True, loss_scalers=[1, 1, 1],
predict_celltypes=True, num_celltypes=59, filter_var=False,
num_genes=2000, include_batches=False):
BATCHEFFECT_NUM = 0
if include_batches:
BATCHEFFECT_NUM = len(nparpaths)
MINIBATCH = 32
MAXEPOCH = 20
gmtmat, tfs, genes = make_gmtmat(gmtpath, outdir, genepath)
# expar, barcodes = read_h5(h5path, genes, outdir)
dict_inputs = load_inputs(
nparpaths, gmtmat, outdir, genes, metapaths, filter_var,
num_genes)
expar = dict_inputs["expar"]
metadf = dict_inputs["metadf"]
gmtmat = dict_inputs["gmtmat"]
one_hot_ct_encoding = dict_inputs["one_hot"]
barcodes = dict_inputs["barcodes"]
batch_idxs = dict_inputs["batch_idx"]
if not include_batches:
batch_idxs = None
# celltypes = dict_inputs["cellTypes"]
celltypes = []
if predict_celltypes:
celltypes = list(pd.unique(metadf["CellType"]))
celltypes.sort()
# save metadf
metadf.to_csv(
os.path.join(outdir, "metadata.tsv.gz"),
sep="\t", compression="gzip")
# Save genes
print("Shape of expar is : {}".format(expar.shape))
save_genes(genes, outdir)
print("Max in expar is {}".format(np.max(expar)))
if use_connections:
gmttensor = torch.from_numpy(
np.transpose(gmtmat)).to(device).long()
else:
gmttensor = torch.ones(
gmtmat.shape[1], gmtmat.shape[0]).to(device).long()
print("Shape of expar is : {}".format(expar.shape))
logdir, modelpath, chkpath = get_paths(outdir, numlvs)
if existingmodelpath == "NA":
existingmodelpath = modelpath
vae = VAE(expar.shape[1], # num genes
gmttensor,
num_celltypes,
BATCHEFFECT_NUM, # batch
0, # labels
gmtmat.shape[1], # hiddensize
numlvs)
n_params = get_n_params(vae)
print(vae)
print("VAE has {} parameters".format(n_params))
vae.to(device)
# optimizer = adabound.AdaBound(
# vae.parameters(), lr=0.001, final_lr=0.1)
optimizer = torch.optim.Adam(
vae.parameters(), lr=0.002)
if torch.cuda.is_available():
vae, optimizer = amp.initialize(
vae, optimizer, opt_level=opt_level)
vae, optimizer = load_existing_model(
existingmodelpath, chkpath, vae, optimizer)
if not dont_train:
np.random.seed(42)
# For 10 times, sample 1000 cells
for i in range(20):
# idx_rand = np.random.choice(
# np.arange(expar.shape[0]), SAMPLE_IDXS)
vae = train_model(
vae, optimizer, MINIBATCH, MAXEPOCH,
expar, logdir,
modelpath, chkpath, one_hot_ct_encoding,
loss_scalers, predict_celltypes,
celltypes, batch_idxs)
reconst, mumat, sd2mat, tf_act = apply_model(
vae, expar, numlvs, MINIBATCH, batch_idxs)
mudf = pd.DataFrame(mumat)
mudf.columns = ["LV.mu.{}".format(each)
for each in range(numlvs)]
mudf["Index"] = np.array(
barcodes, dtype="|U64")
mudf.index = mudf["Index"]
mudf.to_csv(
os.path.join(outdir, "VAE_mu-matrix.tsv.gz"),
compression="gzip", sep="\t")
make_plot_umap(mudf, metadf, outdir, numlvs)
reconst, mumat, sd2mat, tf_act = apply_model(
vae, expar, numlvs, MINIBATCH, batch_idxs)
tf_act_df = pd.DataFrame(tf_act)
tf_act_df.index = np.array(
barcodes, dtype="|U64")
tf_act_df.columns = tfs
tf_act_df["Labels"] = metadf.loc[tf_act_df.index]["CellType"]
tf_act_df.to_csv(
os.path.join(outdir, "VAE-TF-adjusted-weights_CellxTF.tsv.gz"),
sep="\t", compression="gzip")
# zmat = np_reparameterize(mumat, sd2mat)
zmat = torch_reparameterize(mumat, sd2mat)
zdf = pd.DataFrame(zmat)
zdf.columns = ["LV.Z.{}".format(each)
for each in range(numlvs)]
zdf["Index"] = np.array(
barcodes, dtype="|U64")
zdf.index = np.array(
barcodes, dtype="|U64")
zdf.to_csv(
os.path.join(outdir, "VAE_Z-matrix.tsv.gz"),
compression="gzip", sep="\t")
outdir_full = os.path.join(
outdir, "fullDatasetZPlot")
os.makedirs(outdir_full, exist_ok=True)
make_plot_umap(zdf, metadf, outdir_full, numlvs)
mudf = pd.DataFrame(mumat)
mudf.columns = ["LV.mu.{}".format(each)
for each in range(numlvs)]
mudf["Index"] = np.array(
barcodes, dtype="|U64")
mudf.index = mudf["Index"]
mudf.to_csv(
os.path.join(outdir, "VAE_mu-matrix.tsv.gz"),
compression="gzip", sep="\t")
outdir_full = os.path.join(
outdir, "fullDatasetPlot")
os.makedirs(outdir_full, exist_ok=True)
make_plot_umap(mudf, metadf, outdir_full, numlvs)
sd2df = pd.DataFrame(sd2mat)
sd2df.columns = [
"LV.logVAR.{}".format(each)
for each in range(numlvs)]
sd2df["Index"] = mudf["Index"]
sd2df.index = mudf["Index"]
sd2df.to_csv(
os.path.join(outdir, "VAE_variance-matrix.tsv.gz"),
compression="gzip", sep="\t")
def np_reparameterize(mu, logvar):
mu_tensor = torch.from_numpy(mu)
logvar_tensor = torch.from_numpy(logvar)
std_tensor = torch.exp(0.5 * logvar_tensor)
eps_tensor = torch.randn_like(std_tensor)
ztensor = mu_tensor + eps_tensor * std_tensor
zmat = ztensor.numpy()
return zmat
def load_existing_model(modelpath, chkpath, vae, optimizer):
for eachpath in [modelpath, chkpath]:
if os.path.exists(eachpath):
try:
checkpoint = torch.load(eachpath)
state_dict = checkpoint['model']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
new_state_dict[k] = v
vae.load_state_dict(new_state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
if torch.cuda.is_available():
amp.load_state_dict(checkpoint['amp'])
print("Loaded from {}".format(eachpath))
return vae, optimizer
except Exception:
pass
print("Didn't load from any")
return vae, optimizer
def save_genes(genes, outdir):
outpath = os.path.join(outdir, "genes.txt")
outlink = open(outpath, "w")
for gene in genes:
outlink.write(gene + "\n")
outlink.close()
def torch_reparameterize(mumat, varmat):
from torch.distributions import Normal
mu = torch.from_numpy(mumat)
var = torch.from_numpy(varmat)
normtensor = Normal(mu, var.sqrt()).rsample()
zmat = normtensor.detach().numpy()
return zmat
def get_hidden_layer(vae, train1, batch_tensor=None, n_batch=0):
if n_batch > 0 and batch_tensor is not None:
batch_ar_temp = batch_tensor.reshape(-1).cpu().numpy()
ad_mat = torch.zeros((train1.shape[0], n_batch))
for j in range(n_batch):
idx_j = np.where(batch_ar_temp == j)[0]
ad_mat[idx_j, j] = 1
train1 = torch.cat((train1, ad_mat.to(train1.device)), dim=-1)
weight_mat = vae.z_encoder.encoder.fc_layers[0][0].weights
connections = vae.z_encoder.encoder.fc_layers[0][0].connections
enforced_weights = torch.mul(
weight_mat, connections)
ew_times_x = torch.mm(train1, enforced_weights.detach().t())
add_bias = vae.z_encoder.encoder.fc_layers[0][0].bias
ew_times_x = torch.add(ew_times_x, add_bias)
output = ew_times_x.cpu().detach().numpy()
return output
def apply_model(vae, expar, numlvs, MINIBATCH, batch_idxs=None):
n_batch = 0
batch_tensor = None
if batch_idxs is not None:
n_batch = len(np.unique(batch_idxs))
conn_dim = vae.z_encoder.encoder.fc_layers[0][0].connections.shape[0]
reconst = np.zeros(expar.shape)
mumat = np.zeros((expar.shape[0], numlvs))
sd2mat = np.zeros((expar.shape[0], numlvs))
tf_activation = np.zeros((expar.shape[0], conn_dim))
TOTBATCHIDX = int(expar.shape[0] / MINIBATCH) + 1
for idxbatch in range(TOTBATCHIDX):
idxbatch_st = idxbatch * MINIBATCH
if idxbatch_st >= expar.shape[0]:
break
idxbatch_end = min(
[(idxbatch + 1) * MINIBATCH, expar.shape[0]])
train1 = torch.from_numpy(
expar[idxbatch_st:idxbatch_end, :]).to(device).float()
if batch_idxs is None:
outdict = vae(train1)
else:
batch_tensor = torch.from_numpy(
batch_idxs[idxbatch_st:idxbatch_end]).to(
device).long().reshape(-1, 1)
outdict = vae(train1, batch_tensor)
reconst[idxbatch_st:idxbatch_end, :] = \
outdict["px_scale"].cpu().detach().numpy()
mumat[idxbatch_st:idxbatch_end, :] = \
outdict["qz_m"].cpu().detach().numpy()
sd2mat[idxbatch_st:idxbatch_end, :] = \
outdict["qz_v"].cpu().detach().numpy()
tf_activation[idxbatch_st:idxbatch_end, :] = \
get_hidden_layer(vae, train1, batch_tensor, n_batch)
if idxbatch % 100 == 0:
print("Applied on {}/{}".format(idxbatch, TOTBATCHIDX))
return reconst, mumat, sd2mat, tf_activation
if __name__ == "__main__":
parser = ArgumentParser(
description="Train VAE using "
"mapping of genes to TFs")
parser.add_argument(
"gmtpath",
help="Path to GMT file mapping "
"genes to TFs")
parser.add_argument(
"outdir",
help="Path to output directory for "
"saving the model and log files")
parser.add_argument(
"--nparpaths",
nargs="*",
help="Space-separated paths to scRNA-seq "
"file npz containing arr, rows, and cols")
parser.add_argument(
"--numlvs",
type=int,
default=10,
help="Number of latent variables")
parser.add_argument(
"--dont-train",
action="store_true",
help="Specify if you want to apply an existing "
"model which is stored in outdir")
parser.add_argument(
"--genepath",
default="NA",
help="Path to .txt file containing "
"one gene per line to limit the list "
"of genes we use here")
parser.add_argument(
"--modelpath",
default="NA",
help="Specify if you don't want the "
"model existing in <outdir>/VAE_<--numlvs>LVS.pt")
parser.add_argument(
"--metapaths",
nargs="*",
required=True,
help="Space-separated path to metadata tsv with "
"a column named as barcode and a "
"column named as cell type")
parser.add_argument(
"--use-connections",
action="store_true",
help="If set, will enforce weights that don't "
"correspong to TF-gene mappings to be zero")
parser.add_argument(
"--loss-scalers",
nargs="*",
default=[1, 1, 1],
type=float,
help="Specify values to divide "
"MSE, KLD, and CE losses by: example: "
"--loss-scalers 100 1 1")
parser.add_argument(
"--predict-celltypes",
action="store_true",
help="Specify --predict-celltypes to "
"optimize the cell type prediction task as well")
parser.add_argument(
"--num-celltypes",
default=59,
type=int,
help="Number of cell types to predict (must match "
"the column CellType in metadata file)")
parser.add_argument(
"--filter-var",
action="store_true",
help="If specified, will filter by top 2000 most "
"variant genes")
parser.add_argument(
"--num-genes",
default=2000,
type=int,
help="Number of genes to filter by highest variance")
parser.add_argument(
"--include-batches",
action="store_true",
help="Specify if more than one h5 file is being passed "
"and you want to allow scVI to correct the batches")
args = parser.parse_args()
print(args)
modelpath = args.modelpath
if modelpath == "NA":
modelpath = os.path.join(
args.outdir, "VAE_{}LVS.pt".format(args.numlvs))
main(args.gmtpath, args.nparpaths,
args.outdir, args.numlvs, args.metapaths,
args.dont_train, args.genepath, modelpath,
args.use_connections, args.loss_scalers,
args.predict_celltypes, args.num_celltypes,
args.filter_var, args.num_genes,
args.include_batches)
| 1.875 | 2 |
fsociety/__version__.py | kernel1337/fsociety | 1 | 12766162 | <gh_stars>1-10
VERSION = (3, 2, 4)
__version__ = ".".join(map(str, VERSION))
| 1.4375 | 1 |
Random.random_walk_GRAPH.py | eltechno/python_course | 4 | 12766163 | # Numpy is imported, seed is set
import numpy as np
np.random.seed(123)
# Initialization
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
random_walk.append(step)
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Plot random_walk
plt.plot(random_walk)
# Show the plot
plt.show() | 3.734375 | 4 |
staghunt/util.py | airibarne/msthesis | 0 | 12766164 | <filename>staghunt/util.py<gh_stars>0
"""Miscellaneous utility functions and classes"""
import gc
import numpy as np
import torch as t
def kronecker_delta(i, j):
return 1 if i == j else 0
def _round_by_tol(x, tol):
return x if x > tol else 0
round_by_tol = np.vectorize(_round_by_tol, otypes=[np.float64])
def prod(x):
"""
Computes the product of the elements of an iterable
:param x: iterable
:type x: iterable
:return: product of the elements of x
"""
ret = 1
for item in x:
ret = item * ret
return ret
def memory_report():
for o in gc.get_objects():
if t.is_tensor(o):
print(o.device, o.dtype, tuple(o.shape))
def new_var(var_name, t_index, agent):
"""
Naming of agent variables xij, where i in {1,...,T} and j in {1,...,M}
:param var_name: variable name x, u, d,...
:param t_index: time index
:param agent: agent index
:return: string with the name of the variable
"""
return f"{var_name}{t_index}_{agent}"
| 2.390625 | 2 |
scripts/zlevel.py | max-simon/master-thesis | 4 | 12766165 | #!/usr/bin/env python
from netCDF4 import Dataset # pylint: disable=no-name-in-module
import numpy as np
#########################################################
# Class for ROMS grd and clm files
# (For use in various post-processing scripts)
#########################################################
class getGrid(object):
'''
Read the basics of ROMS setup into class for further use in other functions
and classes.
'''
# Read grid file
def __init__(self,grdfile):
# Set grd file
self.grdfile = grdfile
self.ncgrd = Dataset(grdfile, mode='r')
# Read mask
self.mask_rho = self.ncgrd.variables['mask_rho'][:]
self.FillValue = getattr(self.ncgrd.variables['mask_rho'],'_FillValue')
# Read dimensions
self.SY = self.mask_rho.shape[0]
self.SX = self.mask_rho.shape[1]
def getAttrs(self,clmfile):
# Set clm file
self.ncclm = Dataset(clmfile, mode='r')
# Read attributes
try:
self.theta_s = getattr(self.ncclm,'theta_s')
self.theta_b = getattr(self.ncclm,'theta_b')
self.hc = getattr(self.ncclm,'hc')
except AttributeError:
self.theta_s = self.ncclm.variables['theta_s'][0]
self.theta_b = self.ncclm.variables['theta_b'][0]
self.hc = self.ncclm.variables['hc'][0]
# Vertical dimension
self.NZ = self.ncclm.dimensions['s_rho'].size
def setClmFiles(self,clmfile,clm2file):
# Set clm file
if not hasattr(self, 'ncclm'):
self.ncclm = Dataset(clmfile, mode='r')
# Set clm2 file
self.ncclm2 = Dataset(clm2file, mode='r')
def getTopo(self):
# Read topography
self.h = self.ncgrd.variables['h'][:]
self.hmin = getattr(self.ncgrd,'hmin')
self.hmax = getattr(self.ncgrd,'hmax')
def getLatLon(self):
# Read Lat/Lon
self.lon_rho = self.ncgrd.variables['lon_rho'][:]
self.lat_rho = self.ncgrd.variables['lat_rho'][:]
def getArea(self):
# Read pm/pn
self.area = 1/(self.ncgrd.variables['pm'][:]*self.ncgrd.variables['pn'][:])
def getAngle(self):
# Read angle
self.angle = self.ncgrd.variables['angle'][:]
#########################################################
# Vertical sigma level depths and spacing
#########################################################
def compute_zlev(fpin,fpin_grd,NZ,type,zeta=None,stype=3):
# Compute z levels of rho points for ZERO SSH. Input:
#
# fpin: file descriptor pointing to a NetCDF file containing theta_b,
# theta_s and Tcline or hc
# fpin_grd: file descriptor pointing to a NetCDF file containing h
# NZ: number of vertical (rho) levels
# type: 'r': rho points
# 'w': w points
# stype: specifies type of sigma levels used:
# 1: similar to Song, Haidvogel 1994
# 2: Shchepetkin 2006
# 3: Shchepetkin 2010 (or so)
import numpy as np
import sys
h = fpin_grd.variables['h'][:,:]
try:
theta_b = fpin.theta_b
theta_s = fpin.theta_s
except AttributeError:
# theta_b/s may be variables:
theta_b = fpin.variables['theta_b'][0]
theta_s = fpin.variables['theta_s'][0]
if stype == 1:
hmin = min(min(h))
try:
Tcline = fpin.Tcline
hc = min(hmin,Tcline)
except AttributeError:
hc = fpin.hc
hc = min(hmin,hc)
elif stype == 2 or stype == 3:
try:
hc = fpin.hc
except AttributeError:
# hc may be a variable:
hc = fpin.variables['hc'][0]
else:
msg = '{}: Unknown type of sigma levels'.format(stype)
sys.exit(msg)
ds = 1./NZ # float, to prevent integer division in sc
if type == 'w':
lev = np.arange(NZ+1)
sc = (lev - NZ) * ds
nr_zlev = NZ+1 # number of vertical levels
else:
lev = np.arange(1,NZ+1)
sc = -1 + (lev-0.5)*ds
nr_zlev = NZ # number of vertical levels
Ptheta = np.sinh(theta_s*sc)/np.sinh(theta_s)
Rtheta = np.tanh(theta_s*(sc+.5))/(2*np.tanh(.5*theta_s))-.5
if stype <= 2:
Cs = (1-theta_b)*Ptheta+theta_b*Rtheta
elif stype == 3:
if theta_s > 0:
csrf=(1.-np.cosh(theta_s*sc))/(np.cosh(theta_s)-1.)
else:
csrf=-sc**2
if theta_b > 0:
Cs=(np.exp(theta_b*csrf)-1.)/(1.-np.exp(-theta_b))
else:
Cs=csrf
z0 = np.zeros((nr_zlev,h.shape[0],h.shape[1]),np.float)
if stype == 1:
cff = (sc-Cs)*hc
cff1 = Cs
hinv = 1.0 / h
for k in range(nr_zlev):
z0[k,:,:] = cff[k]+cff1[k]*h
if not (zeta is None):
z0[k,:,:] = z0[k,:,:]+zeta*(1.+z0[k,:,:]*hinv)
elif stype == 2 or stype == 3:
hinv = 1.0/(h+hc)
cff = hc*sc
cff1 = Cs
for k in range(nr_zlev):
tmp1 = cff[k]+cff1[k]*h
tmp2 = np.multiply(tmp1,hinv)
if zeta is None:
z0[k,:,:] = np.multiply(h,tmp2)
else:
z0[k,:,:] = zeta + np.multiply((zeta+h),tmp2)
# Return
return z0
def compute_dz(fpin,fpin_grd,NZ,zeta=None,stype=3):
# Compute dz of sigma level rho points for ZERO SSH. Input:
#
# fpin: file descriptor pointing to a NetCDF file containing theta_b,
# theta_s and Tcline or hc
# fpin_grd: file descriptor pointing to a NetCDF file containing h
# NZ: number of vertical (rho) levels
# stype: specifies type of sigma levels used:
# 1: similar to Song, Haidvogel 1994
# 2: Shchepetkin 2006
# 3: Shchepetkin 2010 (or so)
# Compute depth of w sigma levels
depth_w = -compute_zlev(fpin,fpin_grd,NZ,type='w',zeta=zeta,stype=3)
# Compute dz between w sigma levels (= dz of sigma layer)
dz_sigma = depth_w[:-1]-depth_w[1:]
return dz_sigma
#########################################################
# Additions from Max Simon
# Author: <NAME>
# Year: 2020
#########################################################
def get_cell_heights(z_values, depth):
"""
Structure if depth is False:
------------- // surface, top second cell
x // rho point, idx 2
------------- // top first cell, bottom second cell
x // rho point, idx 1
------------- // top zero-th cell, bottom first cell
x // rho point, idx 0
------------- // ground, bottom zero-th cell
Structure if depth is True
------------- // surface, top zero-th cell
x // depth point, idx 0
------------- // top first cell, bottom zero-th cell
x // depth point, idx 1
------------- // top second cell, bottom first cell
x // depth point, idx 2
------------- // ground, bottom second cell
Idea:
- loop from top to bottom (this means for depth = False from last index to first)
- calculate distance from current point to last_depth --> half the cell height
- last_depth is initially 0 and set to _current rho point + half the cell height_ after each iteration
- cell size is _2 x half the cell height_
Note: if depth = False this has to be done for each grid point seperately!
"""
heights = np.zeros_like(z_values)
last_height = 0.0 if depth else np.zeros((z_values.shape[1], z_values.shape[2]))
zero_edge_case = False
for srho_idx in range(z_values.shape[0]):
# go from top to bottom
srho = srho_idx if depth else (z_values.shape[0] - srho_idx - 1)
# handle edge case:
if srho == 0 and (z_values[srho] == 0).any():
assert (z_values[srho] == 0).all()
print('Zero Edge Case detected')
zero_edge_case = True
continue
# calc dist to last height
half = np.abs(z_values[srho]) - last_height
# handle edge case
if srho == 1 and zero_edge_case:
half = 0.5*half
previous_srho = 0 if depth else -1
heights[previous_srho] = half
zero_edge_case = False
print('Zero Edge Case solved')
assert np.array(half >= 0).all(), (srho_idx, srho, z_values[srho], last_height, half)
heights[srho] = 2*half
# update last_height
last_height = np.abs(z_values[srho]) + half
return heights
def create_zlevel_file(grid_path, sample_data_path, out_path):
"""
Create a netCDF file containing the zlevels
"""
sample_data = Dataset(sample_data_path)
is_zslice_file = 'depth' in sample_data.dimensions
if is_zslice_file:
print('Sample Data is z sliced')
z_levels = np.array(sample_data['depth'])
z_thickness = get_cell_heights(z_levels, True)
assert np.sum(z_thickness[:-1]) + 0.5*z_thickness[-1] == abs(z_levels[-1]), (np.sum(z_thickness[:-1]), z_thickness[-1], z_levels[-1])
with Dataset(out_path, mode='w') as new_dataset:
# copy global attributes all at once via dictionary
new_dataset.createDimension('depth', len(z_levels))
# save zlevels
new_dataset.createVariable('z_level', np.float32, dimensions=('depth',))
new_dataset['z_level'][:] = np.abs(z_levels)
new_dataset.createVariable('thickness_z', np.float32, dimensions=('depth'))
new_dataset['thickness_z'][:] = np.abs(z_thickness)
else:
sample_data.close() # just make sure that we dont interfer with other routines
print('Sample Data is raw ROMS output')
# calculate the zlevels
grid = Dataset(grid_path)
sample_data = Dataset(sample_data_path)
n_s_rho = sample_data.dimensions['s_rho'].size
n_eta_rho = sample_data.dimensions['eta_rho'].size
n_xi_rho = sample_data.dimensions['xi_rho'].size
z_levels_rho = compute_zlev(sample_data, grid, n_s_rho, 'r')
z_levels_w = compute_zlev(sample_data, grid, n_s_rho, 'w')
z_thickness_rho = get_cell_heights(z_levels_rho, False)
control = np.sum(z_thickness_rho, axis=0) - np.array(grid['h'])
assert np.max(np.abs(control)) < 5, 'Height calculation differs more than 5m'
with Dataset(out_path, mode='w') as new_dataset:
# copy global attributes all at once via dictionary
new_dataset.createDimension('s_rho', n_s_rho)
new_dataset.createDimension('eta_rho', n_eta_rho)
new_dataset.createDimension('xi_rho', n_xi_rho)
new_dataset.createDimension('s_w', n_s_rho + 1)
# save zlevels
new_dataset.createVariable('z_level', np.float32, dimensions=('s_rho', 'eta_rho', 'xi_rho'))
new_dataset['z_level'][:] = np.abs(z_levels_rho)
new_dataset.createVariable('z_level_w', np.float32, dimensions=('s_w', 'eta_rho', 'xi_rho'))
new_dataset['z_level_w'][:] = np.abs(z_levels_w)
new_dataset.createVariable('thickness_z', np.float32, dimensions=('s_rho', 'eta_rho', 'xi_rho'))
new_dataset['thickness_z'][:] = np.abs(z_thickness_rho)
if __name__ == "__main__":
import argparse
# create parser
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--input', type=str, required=True, help="Sample Input Path")
parser.add_argument('--grid', type=str, required=True, help="Grid path")
parser.add_argument('--output', type=str, help="Output path")
args = parser.parse_args()
# execute
create_zlevel_file(args.grid, args.input, args.output)
| 2.359375 | 2 |
bridgetest/common/npm.py | Half-Shot/hs-bridge-test | 3 | 12766166 | import subprocess
import shutil
import tempfile
import logging
from time import sleep
logger = logging.getLogger(__name__)
class Npm:
def __init__(self):
self.process = None
pass
def install(self, path):
logger.info("Installing npm packages...")
process = subprocess.Popen(
["npm", "install"],
cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
return_code = process.wait()
except subprocess.TimeoutExpired:
return True
if return_code is not 0:
raise Exception("Return code was non-zero")
logger.info("Done.")
return True
def stop_process(self, kill_after=None):
if self.process is None:
return
outS, errS, timedOut = self.__read_process_stream(self.process, kill_after=kill_after)
rc = self.process.returncode
self.process = None
return (timedOut, rc, (outS, errS))
def start(self, path, cmd, kill_after=None, noRead=False):
if self.process and noRead:
logger.warn("Opening a process while a current one is running.")
process = subprocess.Popen(
["node"] + cmd,
cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if noRead:
self.process = process
else:
outS, errS, timedOut = self.__read_process_stream(process, kill_after)
return (timedOut, process.returncode, (outS, errS))
def __read_process_stream(self, proc, kill_after=None):
timedOut = False
outS = ""
errS = ""
try:
outs, errs = proc.communicate(timeout=kill_after)
outS = outs.decode()
errS = errs.decode()
except subprocess.TimeoutExpired as e:
proc.terminate()
proc.wait()
if e.stdout is not None:
outS = e.stdout.decode()
if e.stderr is not None:
errS = e.stderr.decode()
timedOut = True
logger.debug("%s was terminated", str(" ".join(proc.args)))
return (outS, errS, timedOut)
| 2.59375 | 3 |
Lileb-Training/src/modules/new_head_llmodel.py | ForInLoveAting/LifeLongLearningBenchmark | 1 | 12766167 | <reponame>ForInLoveAting/LifeLongLearningBenchmark
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from src.modules.ll_model import LifelongLearningModel
from src.modules.utils import make_model, MultiHead
class NewHeadLLModel(LifelongLearningModel):
def __init__(self, *args, **kwargs):
super(NewHeadLLModel, self).__init__(*args, **kwargs)
self.common_model = None
def _new_model(self, x_dim, n_classes, **kwargs):
if self.common_model is None:
self.all_dims = [x_dim, *self.hidden_size]
self.common_model = self.base_model_func(self.all_dims)
assert x_dim == self.all_dims[0], 'SingleHead model can only be used ' \
'with the same input dimensions. ' \
'Was initialized with {} but got {}' \
' on the last task'.format(
self.all_dims[0], x_dim)
new_head = MultiHead(self.all_dims[-1], n_classes)
new_model = make_model(self.common_model, new_head)
return new_model
def finish_task(self, dataset):
pass
| 2.21875 | 2 |
fairseq/models/abs_sum_roberta_transformer.py | XingxingZhang/abs_pretraining | 0 | 12766168 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import os
import json
import copy
import logging
from argparse import Namespace
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_transformers import RobertaModel, RobertaConfig
# from pytorch_transformers import BertModel, BertConfig
from fairseq import options, utils
from fairseq.modules import (
AdaptiveInput, AdaptiveSoftmax, CharacterTokenEmbedder, LayerNorm,
LearnedPositionalEmbedding, MultiheadAttention, SinusoidalPositionalEmbedding,
)
from . import (
FairseqIncrementalDecoder, FairseqEncoder, FairseqLanguageModel,
FairseqModel, register_model, register_model_architecture,
)
@register_model('roberta_transformer')
class AbsSumRobertaTransformerModel(FairseqModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-dropout', type=float, metavar='D',
help='decoder dropout probability')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--layer-norm-eps', type=float, metavar='D',
help='eps for layer norm')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
# parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
# help='comma separated list of adaptive softmax cutoff points. '
# 'Must be used with adaptive_loss criterion'),
# parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
# help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--roberta-model', default='roberta-base',
help="RoBerta pre-trained model selected in the list: roberta-base, "
"roberta-large.")
parser.add_argument('--roberta-decoder', default=False, action='store_true',
help='if set, the decoder is built as BERT architecture, instead of Fairseq transformer')
parser.add_argument('--roberta-decoder-initialization', default=False, action='store_true',
help='if set, the decoder is built as BERT architecture, instead of Fairseq transformer')
parser.add_argument('--roberta-config-path', default=None, metavar='PRETRAINED_PATH',
help='roberta config json file path')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = TransformerEncoder(args, src_dict, encoder_embed_tokens, left_pad=args.left_pad_source)
if hasattr(args, 'roberta_decoder') and args.roberta_decoder:
print("Apply Bert Architecture as Decoder")
# json_file_path = 'roberta-vocab/{0}-config.json'.format(args.roberta_model)
json_file_path = args.roberta_config_path
config = from_json_file(json_file_path)
decoder_config = Namespace(**config)
print(decoder_config)
decoder = BertDecoder(args, decoder_config, tgt_dict, decoder_embed_tokens, left_pad=args.left_pad_target)
else:
decoder = TransformerDecoder(args, tgt_dict, decoder_embed_tokens, left_pad=args.left_pad_target)
return AbsSumRobertaTransformerModel(encoder, decoder)
def forward(self, src_tokens, segment_ids, prev_output_tokens):
encoder_out = self.encoder(src_tokens, segment_ids)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out)
return decoder_out
def initilize_roberta_decoder(self):
print("Initializing the decoder with Roberta encoder parameters.")
assert self.decoder is not None
assert self.encoder is not None
# Embedding
# print(self.decoder.embeddings)
# print(self.encoder.roberta.embeddings)
self.decoder.embeddings = self.copy_params(self.encoder.roberta.embeddings, self.decoder.embeddings)
# print(self.encoder.roberta.encoder.layer[0])
# print(self.decoder.layers[0])
# Layer list
for i in range(len(self.encoder.roberta.encoder.layer)):
self.decoder.layers[i] = self.copy_params(self.encoder.roberta.encoder.layer[i], self.decoder.layers[i])
def copy_params(self, module1, module2):
params1 = module1.state_dict()
params2 = module2.state_dict()
dict_param2 = dict(params2)
for name1 in params1:
# print(name1)
# print(params1[name1].data)
if name1 in dict_param2.keys():
# print('before', dict_param2[name1])
dict_param2[name1].data.copy_(params1[name1].data)
# print('after', dict_param2[name1])
# print('-------------------')
module2.load_state_dict(dict_param2)
return module2
def from_json_file(json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
json_object = json.loads(text)
config = dict()
for key, value in json_object.items():
config[key] = value
return config
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
left_pad (bool, optional): whether the input is left-padded
(default: True).
"""
def __init__(self, args, dictionary, embed_tokens, left_pad=False):
super().__init__(dictionary)
self.dropout = args.dropout
self.n_gpu = torch.cuda.device_count()
print('Distributed rank: ', args.distributed_rank)
print('Number of used GPU: ', self.n_gpu)
# if args.distributed_world_size > 1:
# if args.distributed_rank not in [-1, 0]: # [1, 0]
# torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
# Load pre-trained model (weights)
config = RobertaConfig.from_pretrained(args.roberta_model)
self.roberta = RobertaModel.from_pretrained(args.roberta_model, config=config)
# if args.distributed_world_size > 1:
# if args.distributed_rank == 0: # 1
# torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
# self.layers = nn.ModuleList([])
# self.layers.extend([
# TransformerEncoderLayer(args)
# for i in range(args.encoder_layers)
# ])
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.encoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
# def forward(self, src_tokens, src_lengths):
def forward(self, src_tokens, segment_ids):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# print(src_tokens)
# sum = src_tokens[:, 0].sum().item()
# print(sum)
bsz, seqlen = src_tokens.size()
src_tokens = src_tokens.view(bsz, seqlen)
segment_ids = segment_ids.view(bsz, seqlen) # all fill 0
# compute padding mask
attention_mask = src_tokens.ne(self.padding_idx)
# print(attention_mask)
# enc_hids, _ = self.bert(src_tokens, segment_ids, attention_mask, output_all_encoded_layers=False)
# print(src_tokens)
enc_hids, _ = self.roberta(src_tokens, token_type_ids=segment_ids, attention_mask=attention_mask)
# print('enc_hids', enc_hids.size())
# doc_pos = self.sent_embed_positions(doc_pos_tok)
# sent_repr = x[0].view(bsz, n_sent, -1)
sent_repr = enc_hids
# print( 'sent_repr', sent_repr.size() )
if self.embed_positions is not None:
sent_repr += self.embed_positions(src_tokens)
# B x T x C -> T x B x C
sent_repr = sent_repr.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
# for layer in self.layers:
# sent_repr = layer(sent_repr, encoder_padding_mask)
if self.normalize:
sent_repr = self.layer_norm(sent_repr)
'''
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.normalize:
x = self.layer_norm(x)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
'''
return {
'encoder_out': sent_repr, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
left_pad (bool, optional): whether the input is left-padded
(default: False).
final_norm (bool, optional): apply layer norm to the output of the
final decoder layer (default: True).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, left_pad=False, final_norm=True):
super().__init__(dictionary)
self.dropout = args.decoder_dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
left_pad=left_pad,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, output_embed_dim, bias=False) \
if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.decoder_normalize_before and final_norm
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
# print(encoder_out)
# print(incremental_state)
# exit(1)
# embed positions
# incremental_state = None
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
# self.project_in_dim = None
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# self.project_out_dim = None
if self.project_out_dim is not None:
x = self.project_out_dim(x)
# self.adaptive_softmax = None
# print(self.share_input_output_embed)
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = F.linear(x, self.embed_out)
return x, {'attn': attn, 'inner_states': inner_states}
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
if utils.item(state_dict.get('{}.version'.format(name), torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict['{}.version'.format(name)] = torch.Tensor([1])
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for i in range(2)])
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(0, x, after=True)
residual = x
x = self.maybe_layer_norm(1, x, before=True)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(1, x, after=True)
return x
def maybe_layer_norm(self, i, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return self.layer_norms[i](x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.decoder_dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, x, encoder_out, encoder_padding_mask, incremental_state,
prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None,
self_attn_padding_mask=None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
# print("incremental_state", incremental_state) None
# print("prev_attn_state", prev_attn_state) None
# print("self_attn_mask", self_attn_mask.shape) # tensor
# print(self_attn_mask)
# print("self_attn_padding_mask", self_attn_padding_mask) None
# print("encoder_padding_mask", encoder_padding_mask) None
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
# print("prev_self_attn_state", prev_self_attn_state) None
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
attn = None
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
# print("encoder_padding_mask", encoder_padding_mask) # None
# print(not self.training and self.need_attn) # True
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad, learned=False):
if learned:
m = LearnedPositionalEmbedding(num_embeddings + padding_idx + 1, embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(embedding_dim, padding_idx, left_pad, num_embeddings + padding_idx + 1)
return m
@register_model_architecture('roberta_transformer', 'abs_sum_roberta_transformer_base')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_dropout = getattr(args, 'decoder_dropout', args.dropout)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('roberta_transformer', 'abs_sum_roberta_transformer')
def transformer_abs_sum_roberta(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
# args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('roberta_transformer', 'abs_sum_roberta_transformer_medium')
def transformer_abs_sum_roberta(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
# args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
# args.dropout = getattr(args, 'dropout', 0.15)
base_architecture(args)
@register_model_architecture('roberta_transformer', 'abs_sum_roberta_transformer_large')
def transformer_abs_sum_roberta(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
# args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 6)
# args.dropout = getattr(args, 'dropout', 0.15)
base_architecture(args)
@register_model_architecture('roberta_transformer', 'abs_sum_roberta_large_transformer_large')
def transformer_abs_sum_roberta(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
# args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 12)
# args.dropout = getattr(args, 'dropout', 0.15)
base_architecture(args)
'''
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani, et al, 2017)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.relu_dropout = getattr(args, 'relu_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
'''
###################################################################################################
### Bert as Decoder
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
# class BertConfig(PretrainedConfig):
# r"""
# :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
# `BertModel`.
#
#
# Arguments:
# vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
# hidden_size: Size of the encoder layers and the pooler layer.
# num_hidden_layers: Number of hidden layers in the Transformer encoder.
# num_attention_heads: Number of attention heads for each attention layer in
# the Transformer encoder.
# intermediate_size: The size of the "intermediate" (i.e., feed-forward)
# layer in the Transformer encoder.
# hidden_act: The non-linear activation function (function or string) in the
# encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
# hidden_dropout_prob: The dropout probabilitiy for all fully connected
# layers in the embeddings, encoder, and pooler.
# attention_probs_dropout_prob: The dropout ratio for the attention
# probabilities.
# max_position_embeddings: The maximum sequence length that this model might
# ever be used with. Typically set this to something large just in case
# (e.g., 512 or 1024 or 2048).
# type_vocab_size: The vocabulary size of the `token_type_ids` passed into
# `BertModel`.
# initializer_range: The sttdev of the truncated_normal_initializer for
# initializing all weight matrices.
# layer_norm_eps: The epsilon used by LayerNorm.
# """
# pretrained_config_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
#
# def __init__(self,
# vocab_size_or_config_json_file=30522,
# hidden_size=768,
# num_hidden_layers=12,
# num_attention_heads=12,
# intermediate_size=3072,
# hidden_act="gelu",
# hidden_dropout_prob=0.1,
# attention_probs_dropout_prob=0.1,
# max_position_embeddings=512,
# type_vocab_size=2,
# initializer_range=0.02,
# layer_norm_eps=1e-12,
# **kwargs):
# super(BertConfig, self).__init__(**kwargs)
# # if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
# # and isinstance(vocab_size_or_config_json_file, unicode)):
# # with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
# # json_config = json.loads(reader.read())
# # for key, value in json_config.items():
# # self.__dict__[key] = value
# # elif isinstance(vocab_size_or_config_json_file, int):
# self.vocab_size = vocab_size_or_config_json_file
# self.hidden_size = hidden_size
# self.num_hidden_layers = num_hidden_layers
# self.num_attention_heads = num_attention_heads
# self.hidden_act = hidden_act
# self.intermediate_size = intermediate_size
# self.hidden_dropout_prob = hidden_dropout_prob
# self.attention_probs_dropout_prob = attention_probs_dropout_prob
# self.max_position_embeddings = max_position_embeddings
# self.type_vocab_size = type_vocab_size
# self.initializer_range = initializer_range
# self.layer_norm_eps = layer_norm_eps
# # else:
# # raise ValueError("First argument must be either a vocabulary size (int)"
# # "or the path to a pretrained model config file (str)")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
# try:
# from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
# except (ImportError, AttributeError) as e:
# class BertLayerNorm(nn.Module):
# def __init__(self, hidden_size, eps=1e-12):
# """Construct a layernorm module in the TF style (epsilon inside the square root).
# """
# super(BertLayerNorm, self).__init__()
# self.weight = nn.Parameter(torch.ones(hidden_size))
# self.bias = nn.Parameter(torch.zeros(hidden_size))
# self.variance_epsilon = eps
#
# def forward(self, x):
# u = x.mean(-1, keepdim=True)
# s = (x - u).pow(2).mean(-1, keepdim=True)
# x = (x - u) / torch.sqrt(s + self.variance_epsilon)
# return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = True # config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query_hidden_states, key_hidden_states, value_hidden_states, attention_mask=None, head_mask=None):
# print('query', query_hidden_states.shape)
# print('key', key_hidden_states.shape)
# print('value', value_hidden_states.shape)
mixed_query_layer = self.query(query_hidden_states)
mixed_key_layer = self.key(key_hidden_states)
mixed_value_layer = self.value(value_hidden_states)
# print('mixed_query_layer', mixed_query_layer.shape)
# print('mixed_key_layer', mixed_key_layer.shape)
# print('mixed_value_layer', mixed_value_layer.shape)
tgt_len, bsz, embed_dim = query_hidden_states.size()
# query_layer = self.transpose_for_scores(mixed_query_layer)
# key_layer = self.transpose_for_scores(mixed_key_layer)
# value_layer = self.transpose_for_scores(mixed_value_layer)
query_layer = mixed_query_layer.contiguous().view(tgt_len, bsz * self.num_attention_heads, self.attention_head_size).transpose(0, 1)
key_layer = mixed_key_layer.contiguous().view(-1, bsz * self.num_attention_heads, self.attention_head_size).transpose(0, 1)
value_layer = mixed_value_layer.contiguous().view(-1, bsz * self.num_attention_heads, self.attention_head_size).transpose(0, 1)
# print('query_layer', query_layer.shape)
# print('key_layer', key_layer.shape)
# print('value_layer', value_layer.shape)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(1, 2))
# attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_mask = attention_mask.unsqueeze(0)
# print('attention_scores', attention_scores.shape)
# print('attention_mask', attention_mask.shape)
attention_scores = attention_scores + attention_mask
# attention_scores = attention_scores
# Normalize the attention scores to probabilities.
# attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = utils.softmax(
attention_scores, dim=-1
).type_as(attention_scores)
attention_probs = F.dropout(attention_probs, p=self.attention_probs_dropout_prob, training=self.training)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
# attention_probs = self.dropout(attention_probs)
# print('attention_probs', attention_probs.shape)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.bmm(attention_probs, value_layer)
# context_layer = torch.matmul(attention_probs, value_layer)
# print('attention_probs', attention_probs.shape)
# print('value_layer', value_layer.shape)
# print('context_layer', context_layer.shape)
# context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
# context_layer = context_layer.view(*new_context_layer_shape)
# print('context layer', context_layer.shape)
context_layer = context_layer.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
# print('context layer', context_layer.shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
# exit(1)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, query_tensor, key_tensor, value_tensor, attention_mask=None, head_mask=None):
self_outputs = self.self(query_hidden_states=query_tensor,
key_hidden_states=key_tensor,
value_hidden_states=value_tensor,
attention_mask=attention_mask,
head_mask=head_mask)
attention_output = self.output(self_outputs[0], query_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
# if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
# self.intermediate_act_fn = ACT2FN[config.hidden_act]
# else:
# self.intermediate_act_fn = config.hidden_act
self.intermediate_act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertDecoderLayer(nn.Module):
def __init__(self, config, args):
super(BertDecoderLayer, self).__init__()
self.attention = BertAttention(config)
# self.self_intermediate = BertIntermediate(config)
self.encoder_attention = MultiheadAttention(config.hidden_size, config.num_attention_heads,
dropout=args.attention_dropout,)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.need_attn = True
def forward(self, x, encoder_hidden_states, encoder_padding_mask, self_attn_mask=None, head_mask=None):
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
self_attention_outputs = self.attention(query_tensor=x, key_tensor=x, value_tensor=x,
attention_mask=self_attn_mask, head_mask=head_mask)
self_attention_output = self_attention_outputs[0]
# self_intermediate_output = self.self_intermediate(self_attention_output)
attention_outputs = self.encoder_attention(query=self_attention_output, key=encoder_hidden_states,
value=encoder_hidden_states, key_padding_mask=encoder_padding_mask,
incremental_state=None,
static_kv=True,
need_weights=(not self.training and self.need_attn),)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertDecoder(FairseqIncrementalDecoder):
"""
Bert decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
left_pad (bool, optional): whether the input is left-padded
(default: False).
final_norm (bool, optional): apply layer norm to the output of the
final decoder layer (default: True).
"""
def __init__(self, args, config, dictionary, embed_tokens, no_encoder_attn=False, left_pad=False, final_norm=True):
super().__init__(dictionary)
self.embeddings = BertEmbeddings(config)
# self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = config.hidden_size # embed_tokens.embedding_dim
embed_dim = config.hidden_size # args.decoder_embed_dim
output_embed_dim = config.hidden_size # args.decoder_output_dim
# padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
# self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
# self.project_in_dim = BertLinear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
# self.embed_positions = BertPositionalEmbedding(
# args.max_target_positions, embed_dim, padding_idx,
# left_pad=left_pad,
# learned=args.decoder_learned_pos,
# ) if not args.no_token_positional_embeddings else None
self.embed_positions = None
self.layers = nn.ModuleList([])
self.layers.extend([
BertDecoderLayer(config, args)
for _ in range(config.num_hidden_layers)
])
self.adaptive_softmax = None
# self.project_out_dim = BertLinear(embed_dim, output_embed_dim, bias=False) \
# if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None
# if args.adaptive_softmax_cutoff is not None:
# self.adaptive_softmax = AdaptiveSoftmax(
# len(dictionary),
# output_embed_dim,
# options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
# dropout=args.adaptive_softmax_dropout,
# adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
# factor=args.adaptive_softmax_factor,
# tie_proj=args.tie_adaptive_proj,
# )
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.decoder_normalize_before and final_norm
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
# print(encoder_out)
# print(incremental_state)
# exit(1)
# embed positions
# incremental_state = None
# positions = self.embed_positions(
# prev_output_tokens,
# incremental_state=incremental_state,
# ) if self.embed_positions is not None else None
#
# if incremental_state is not None:
# prev_output_tokens = prev_output_tokens[:, -1:]
# if positions is not None:
# positions = positions[:, -1:]
# embed tokens and positions
x = self.embeddings(prev_output_tokens)
# if positions is not None:
# x += positions
# x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
# print('new batch')
# print(prev_output_tokens.shape)
# print('x', x.shape)
inner_states = [x]
# decoder layers
for layer in self.layers:
# print('=========')
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# self.project_out_dim = None
# if self.project_out_dim is not None:
# x = self.project_out_dim(x)
# self.adaptive_softmax = None
# print(self.share_input_output_embed)
if self.adaptive_softmax is None:
# project back to size of vocabulary
# if self.share_input_output_embed:
# x = F.linear(x, self.embed_tokens.weight)
# else:
x = F.linear(x, self.embed_out)
return x, {'attn': attn, 'inner_states': inner_states}
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
if utils.item(state_dict.get('{}.version'.format(name), torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict['{}.version'.format(name)] = torch.Tensor([1])
return state_dict
def BertEmbedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def BertLinear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
def BertPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad, learned=False):
if learned:
m = LearnedPositionalEmbedding(num_embeddings + padding_idx + 1, embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(embedding_dim, padding_idx, left_pad, num_embeddings + padding_idx + 1)
return m
| 2.15625 | 2 |
Data/print_data_from_mdx_view.py | wimgielis/tm1py-samples | 36 | 12766169 | """
Create MDX View on }ClientGroups cube and query data through it.
IMPORTANT: MDX Views can not be seen through Architect/Perspectives.
"""
import configparser
import uuid
from TM1py.Objects import MDXView
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
# storing the credentials in a file is not recommended for purposes other than testing.
# it's better to setup CAM with SSO or use keyring to store credentials in the windows credential manager. Sample:
# Samples/credentials_best_practice.py
config.read(r'..\config.ini')
with TM1Service(**config['tm1srv01']) as tm1:
# Random text
random_string = str(uuid.uuid4())
# Create mdx view
mdx = "SELECT " \
"NON EMPTY {TM1SUBSETALL( [}Clients] )} on ROWS, " \
"NON EMPTY {TM1SUBSETALL( [}Groups] )} ON COLUMNS " \
"FROM [}ClientGroups]"
mdx_view = MDXView(cube_name='}ClientGroups', view_name='TM1py_' + random_string, MDX=mdx)
# Create mdx view on TM1 Server
tm1.cubes.views.create(view=mdx_view)
# Get view content
content = tm1.cubes.cells.execute_view(cube_name=mdx_view.cube, view_name=mdx_view.name)
# Print content
print(content)
| 2.5 | 2 |
train_refinement.py | wuli-heyjude/Instance-Segmentation | 1 | 12766170 | import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader, ConcatDataset
from argparse import ArgumentParser
from models.psp.pspnet import PSPNet
from models.sobel_op import SobelComputer
from dataset import OnlineTransformDataset
from util.logger import BoardLogger
from util.model_saver import ModelSaver
from util.hyper_para import HyperParameters
from util.log_integrator import Integrator
from util.metrics_compute import compute_loss_and_metrics, iou_hooks_to_be_used
from util.image_saver import vis_prediction
import time
import os
import datetime
torch.backends.cudnn.benchmark = True
# Parse command line arguments
para = HyperParameters()
para.parse()
parser = ArgumentParser()
parser.add_argument('data_path', help='Image path')
args = parser.parse_args()
# Logging
if para['id'].lower() != 'null':
long_id = '%s_%s' % (para['id'],datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
else:
long_id = None
logger = BoardLogger(long_id)
logger.log_string('hyperpara', str(para))
print('CUDA Device count: ', torch.cuda.device_count())
# Construct model
model = PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50')
model = nn.DataParallel(
model.cuda(), device_ids=[0,1,2,3]
)
if para['load'] is not None:
model.load_state_dict(torch.load(para['load']))
optimizer = optim.Adam(model.parameters(), lr=para['lr'], weight_decay=para['weight_decay'])
data_dir = args.data_path
dataset = OnlineTransformDataset(data_dir, method=1, perturb=True)
print('dataset size: ', len(dataset))
#train_dataset = ConcatDataset([fss_dataset, duts_tr_dataset, duts_te_dataset, ecssd_dataset, msra_dataset])
#train_dataset = ConcatDataset([ duts_tr_dataset])
# For randomness: https://github.com/pytorch/pytorch/issues/5059
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
# Dataloaders, multi-process data loading
train_loader = DataLoader(dataset, para['batch_size'], shuffle=True, num_workers=8,
worker_init_fn=worker_init_fn, drop_last=True, pin_memory=True)
sobel_compute = SobelComputer()
# Learning rate decay scheduling
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, para['steps'], para['gamma'])
saver = ModelSaver(long_id)
report_interval = 50
save_im_interval = 800
total_epoch = int(para['iterations']/len(train_loader) + 0.5)
print('Actual training epoch: ', total_epoch)
train_integrator = Integrator(logger)
train_integrator.add_hook(iou_hooks_to_be_used)
total_iter = 0
last_time = 0
for e in range(total_epoch):
np.random.seed() # reset seed
epoch_start_time = time.time()
# Train loop
model = model.train()
for im, seg, gt in train_loader:
im, seg, gt = im.cuda(), seg.cuda(), gt.cuda()
total_iter += 1
if total_iter % 5000 == 0:
saver.save_model(model, total_iter)
images = model(im, seg)
images['im'] = im
images['seg'] = seg
images['gt'] = gt
sobel_compute.compute_edges(images)
loss_and_metrics = compute_loss_and_metrics(images, para)
train_integrator.add_dict(loss_and_metrics)
optimizer.zero_grad()
(loss_and_metrics['total_loss']).backward()
optimizer.step()
if total_iter % report_interval == 0:
logger.log_scalar('train/lr', scheduler.get_lr()[0], total_iter)
train_integrator.finalize('train', total_iter)
train_integrator.reset_except_hooks()
# Need to put step AFTER get_lr() for correct logging, see issue #22107 in PyTorch
scheduler.step()
if total_iter % save_im_interval == 0:
predict_vis = vis_prediction(images)
logger.log_cv2('train/predict', predict_vis, total_iter)
# Final save!
saver.save_model(model, total_iter)
| 1.976563 | 2 |
tests/user_test.py | jjinno/pygerduty | 144 | 12766171 | from __future__ import absolute_import
import httpretty
import pygerduty
import pygerduty.v2
###################
# Version 1 Tests #
###################
@httpretty.activate
def test_get_user_v1():
body = open('tests/fixtures/user_v1.json').read()
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/PIJ90N7",
body=body, status=200)
p = pygerduty.PagerDuty("contosso", "password")
user = p.users.show("PIJ90N7")
assert user.id == "PIJ90N7"
assert user.name == "<NAME>"
assert user.role == "admin"
@httpretty.activate
def test_list_user_contact_methods_v1():
user_body = open('tests/fixtures/user_v1.json').read()
contact_body = open('tests/fixtures/contacts_v1.json').read()
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/PIJ90N7",
body=user_body, status=200),
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/PIJ90N7/contact_methods",
body=contact_body, status=200)
p = pygerduty.PagerDuty("contosso", "password")
user = p.users.show("PIJ90N7")
contact_methods = [c for c in user.contact_methods.list()]
assert len(contact_methods) == 3
assert len([c for c in contact_methods if c.type == "email"]) == 1
assert len([c for c in contact_methods if c.type == "phone"]) == 1
assert len([c for c in contact_methods if c.type == "SMS"]) == 1
###################
# Version 2 Tests #
###################
@httpretty.activate
def test_get_user_v2():
body = open('tests/fixtures/user_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42",
body=body, status=200)
p = pygerduty.v2.PagerDuty("password")
user = p.users.show("PXPGF42")
assert user.id == "PXPGF42"
assert user.name == "<NAME>"
assert user.role == "admin"
assert user.self_ == 'https://api.pagerduty.com/users/PXPGF42'
@httpretty.activate
def test_list_user_contact_methods_v2():
user_body = open('tests/fixtures/user_v2.json').read()
contact_body = open('tests/fixtures/contacts_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42",
body=user_body, status=200)
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42/contact_methods",
body=contact_body, status=200)
p = pygerduty.v2.PagerDuty("password")
user = p.users.show("PXPGF42")
contact_methods = [c for c in user.contact_methods.list()]
assert len(contact_methods) == 3
assert len([c for c in contact_methods if c.type == "email"]) == 1
assert len([c for c in contact_methods if c.type == "phone"]) == 1
assert len([c for c in contact_methods if c.type == "SMS"]) == 1
assert user.self_ == 'https://api.pagerduty.com/users/PXPGF42'
@httpretty.activate
def test_user_notification_rules_v2():
user_body = open('tests/fixtures/user_v2.json').read()
notification_body = open('tests/fixtures/notification_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42",
body=user_body, status=200)
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42/notification_rules",
body=notification_body, status=200)
p = pygerduty.v2.PagerDuty("password")
user = p.users.show("PXPGF42")
notification_rules = [n for n in user.notification_rules.list()]
assert len(notification_rules) == 1
assert len([n for n in notification_rules if n.type == "assignment_notification_rule"]) == 1
assert user.self_ == "https://api.pagerduty.com/users/PXPGF42"
def test_clean_response():
mock_response = {
"user" : {
"id": "PHDGK84",
"type": "user",
"self": "https://api.pagerduty.com/users/PHDGK84",
"name": "Snoopy",
"contact_methods": [
{
"address": "<EMAIL>",
"id": "PZMO0JF",
"self": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
"label": "Default"
},
{
"address": "8928393498",
"id": "PZMN843",
"self": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMN843",
"label": "Default"
}
],
"notification_rules": [
{
"id": "P8WETWW",
"contact_method": {
"id": "PZMO0JF",
"self": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
}
}
]
}
}
clean_response = pygerduty.common.clean_response(mock_response)
assert clean_response == {
"user" : {
"id": "PHDGK84",
"type": "user",
"self_": "https://api.pagerduty.com/users/PHDGK84",
"name": "Snoopy",
"contact_methods": [
{
"address": "<EMAIL>",
"id": "PZMO0JF",
"self_": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
"label": "Default"
},
{
"address": "8928393498",
"id": "PZMN843",
"self_": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMN843",
"label": "Default"
}
],
"notification_rules": [
{
"id": "P8WETWW",
"contact_method": {
"id": "PZMO0JF",
"self_": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
}
}
]
}
}
| 2.28125 | 2 |
uninove/auditoria-web-01.py | dirleif/aprendendo-python | 0 | 12766172 | <reponame>dirleif/aprendendo-python<gh_stars>0
# script para criar o banco de dados e a tabela
import sqlite3
conn = sqlite3.connect('empresa.db')
cursor = conn.cursor()
cursor.execute(
'CREATE TABLE usuario(nome TEXT NOT NULL,senha TEXT NOT NULL);'
) | 3.6875 | 4 |
tests/test_with_simple_ontology.py | irbraun/oats | 1 | 12766173 | <reponame>irbraun/oats
import pytest
import sys
import pandas as pd
sys.path.append("../oats")
import oats
# From the test ontology file:
#! This ontology is set up to contain two branches below the root.
#!
#!
#! --> 2 --> 5
#! The first branch of the ontology goes like: 1 --> 4
#! --> 3 --> 6
#!
#!
#! The second branch of the ontology goes like: 1 --> 7 --> 8 --> 9
#!
#!
#! This is done to model different aspects of a DAG, such as one term having
#! two different parents but those parents then being subclasses of a single
#! common term. The second branch just includes a linear set of terms that
#! subclasses of one another going back up to the root term.
@pytest.fixture
def ontology():
from oats.annotation.ontology import Ontology
ontology_filename = "tests/data/test_ontology.obo"
ontology = Ontology(ontology_filename)
return(ontology)
@pytest.mark.fast
def test_ontology_term_depths(ontology):
"""Are the depths of each term what are expected?
"""
assert ontology.depth("TO:0000001") == 0
assert ontology.depth("TO:0000002") == 1
assert ontology.depth("TO:0000003") == 1
assert ontology.depth("TO:0000004") == 2
assert ontology.depth("TO:0000005") == 3
assert ontology.depth("TO:0000006") == 3
assert ontology.depth("TO:0000007") == 1
assert ontology.depth("TO:0000008") == 2
assert ontology.depth("TO:0000009") == 3
@pytest.mark.fast
def test_ontology_term_graph_based_information_content(ontology):
"""Is the information content calculated from the graph structure what is expected?
"""
assert ontology.ic("TO:0000001", as_weight=False) == 0.000
assert ontology.ic("TO:0000002", as_weight=False) == 0.3690702464285426
assert ontology.ic("TO:0000003", as_weight=False) == 0.3690702464285426
assert ontology.ic("TO:0000004", as_weight=False) == 1.000
assert ontology.ic("TO:0000005", as_weight=False) == 3.000
assert ontology.ic("TO:0000006", as_weight=False) == 3.000
assert ontology.ic("TO:0000007", as_weight=False) == 0.500
assert ontology.ic("TO:0000008", as_weight=False) == 1.3690702464285427
assert ontology.ic("TO:0000009", as_weight=False) == 3.000
@pytest.mark.fast
def test_ontology_term_graph_based_information_content_as_weights(ontology):
"""Is the information content calculated from the graph structure what is expected?
"""
assert ontology.ic("TO:0000001", as_weight=True) == 0.000
assert ontology.ic("TO:0000002", as_weight=True) == (((0.3690702464285426 - 0.000) * 1.000) / 3.000) + 0.000
assert ontology.ic("TO:0000003", as_weight=True) == (((0.3690702464285426 - 0.000) * 1.000) / 3.000) + 0.000
assert ontology.ic("TO:0000004", as_weight=True) == (((1.000 - 0.000) * 1.000) / 3.000) + 0.000
assert ontology.ic("TO:0000005", as_weight=True) == 1.000
assert ontology.ic("TO:0000006", as_weight=True) == 1.000
assert ontology.ic("TO:0000007", as_weight=True) == (((0.5 - 0.000) * 1.000) / 3.000) + 0.000
assert ontology.ic("TO:0000008", as_weight=True) == (((1.3690702464285427 - 0.000) * 1.000) / 3.000) + 0.000
assert ontology.ic("TO:0000009", as_weight=True) == 1.000
@pytest.mark.fast
def test_ontology_term_inheritance(ontology):
"""Is the number of inherited terms of each term in the graph as expected?
"""
assert len(ontology.inherited("TO:0000001")) == 1
assert len(ontology.inherited("TO:0000002")) == 2
assert len(ontology.inherited("TO:0000003")) == 2
assert len(ontology.inherited("TO:0000004")) == 4
assert len(ontology.inherited("TO:0000005")) == 5
assert len(ontology.inherited("TO:0000006")) == 5
assert len(ontology.inherited("TO:0000007")) == 2
assert len(ontology.inherited("TO:0000008")) == 3
assert len(ontology.inherited("TO:0000009")) == 4
assert len(ontology.inherited(["TO:0000002","TO:0000003"])) == 3
assert len(ontology.inherited(["TO:0000009","TO:0000005"])) == 8
assert len(ontology.inherited(["TO:0000004","TO:0000003"])) == 4
assert len(ontology.inherited(["TO:0000002"])) == 2
assert len(ontology.inherited([])) == 0
@pytest.mark.fast
def test_ontology_ic_similarity(ontology):
"""Is the information content of the most informative common ancestor term as expected for these lists of terms?
"""
assert ontology.similarity_ic(["TO:0000001"],["TO:0000002"], inherited=False, as_weight=False) == 0
assert ontology.similarity_ic(["TO:0000001"],["TO:0000003"], inherited=False, as_weight=False) == 0
assert ontology.similarity_ic(["TO:0000002"],["TO:0000003"], inherited=False, as_weight=False) == 0
assert ontology.similarity_ic(["TO:0000003"],["TO:0000005"], inherited=False, as_weight=False) == 0.3690702464285426
assert ontology.similarity_ic(["TO:0000007"],["TO:0000008"], inherited=False, as_weight=False) == 0.5
assert ontology.similarity_ic(["TO:0000005"],["TO:0000009"], inherited=False, as_weight=False) == 0
assert ontology.similarity_ic(["TO:0000001"],["TO:0000002","TO:0000001"], inherited=False, as_weight=False) == 0
assert ontology.similarity_ic(["TO:0000003"],["TO:0000001","TO:0000009"], inherited=False, as_weight=False) == 0
assert ontology.similarity_ic(["TO:0000002"],["TO:0000003","TO:0000002"], inherited=False, as_weight=False) == 0.3690702464285426
assert ontology.similarity_ic(["TO:0000003"],["TO:0000005","TO:0000002"], inherited=False, as_weight=False) == 0.3690702464285426
assert ontology.similarity_ic(["TO:0000008"],["TO:0000008","TO:0000007"], inherited=False, as_weight=False) == 1.3690702464285427
assert ontology.similarity_ic(["TO:0000005"],["TO:0000009","TO:0000002"], inherited=False, as_weight=False) == 0.3690702464285426
@pytest.mark.fast
def test_ontology_similarity_jaccard(ontology):
"""Is the Jaccard similarity between the given lists of terms as expected?
"""
assert ontology.similarity_jaccard(["TO:0000001"],["TO:0000002"], inherited=False) == 1/2
assert ontology.similarity_jaccard(["TO:0000001"],["TO:0000003"], inherited=False) == 1/2
assert ontology.similarity_jaccard(["TO:0000002"],["TO:0000003"], inherited=False) == 1/3
assert ontology.similarity_jaccard(["TO:0000003"],["TO:0000005"], inherited=False) == 2/5
assert ontology.similarity_jaccard(["TO:0000007"],["TO:0000008"], inherited=False) == 2/3
assert ontology.similarity_jaccard(["TO:0000005"],["TO:0000009"], inherited=False) == 1/8
assert ontology.similarity_jaccard(["TO:0000001"],["TO:0000002","TO:0000001"], inherited=False) == 1/2
assert ontology.similarity_jaccard(["TO:0000003"],["TO:0000001","TO:0000009"], inherited=False) == 1/5
assert ontology.similarity_jaccard(["TO:0000002"],["TO:0000003","TO:0000002"], inherited=False) == 2/3
assert ontology.similarity_jaccard(["TO:0000003"],["TO:0000005","TO:0000002"], inherited=False) == 2/5
assert ontology.similarity_jaccard(["TO:0000008"],["TO:0000008","TO:0000007"], inherited=False) == 3/3
assert ontology.similarity_jaccard(["TO:0000005"],["TO:0000009","TO:0000002"], inherited=False) == 2/8
| 2.71875 | 3 |
train_sppe/src/opt.py | marcnunez/CabinMonitoringV1 | 0 | 12766174 | <reponame>marcnunez/CabinMonitoringV1
# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by <NAME> (<EMAIL>)
# -----------------------------------------------------
import argparse
parser = argparse.ArgumentParser(description='PyTorch CabinMonitoringV1 Training')
"----------------------------- General options -----------------------------"
parser.add_argument('--expID', default='default', type=str,
help='Experiment ID')
parser.add_argument('--dataset', default='coco', type=str,
help='Dataset choice: mpii | coco')
parser.add_argument('--nThreads', default=30, type=int,
help='Number of data loading threads')
parser.add_argument('--snapshot', default=1, type=int,
help='How often to take a snapshot of the model (0 = never)')
"----------------------------- CabinMonitoringV1 options -----------------------------"
parser.add_argument('--addDPG', default=False, dest='addDPG',
help='Train with data augmentation', action='store_true')
"----------------------------- Model options -----------------------------"
parser.add_argument('--loadModel', default=None, type=str,
help='Provide full path to a previously trained model')
parser.add_argument('--nClasses', default=17, type=int,
help='Number of output channel')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--LR', default=1e-3, type=float,
help='Learning rate')
parser.add_argument('--momentum', default=0, type=float,
help='Momentum')
parser.add_argument('--weightDecay', default=0, type=float,
help='Weight decay')
parser.add_argument('--eps', default=1e-8, type=float,
help='epsilon')
parser.add_argument('--crit', default='MSE', type=str,
help='Criterion type')
parser.add_argument('--optMethod', default='rmsprop', type=str,
help='Optimization method: rmsprop | sgd | nag | adadelta')
"----------------------------- Training options -----------------------------"
parser.add_argument('--nEpochs', default=50, type=int,
help='Number of hourglasses to stack')
parser.add_argument('--epoch', default=0, type=int,
help='Current epoch')
parser.add_argument('--trainBatch', default=128, type=int,
help='Train-batch size')
parser.add_argument('--validBatch', default=24, type=int,
help='Valid-batch size')
parser.add_argument('--trainIters', default=0, type=int,
help='Total train iters')
parser.add_argument('--valIters', default=0, type=int,
help='Total valid iters')
"----------------------------- Data options -----------------------------"
parser.add_argument('--inputResH', default=320, type=int,
help='Input image height')
parser.add_argument('--inputResW', default=256, type=int,
help='Input image width')
parser.add_argument('--outputResH', default=80, type=int,
help='Output heatmap height')
parser.add_argument('--outputResW', default=64, type=int,
help='Output heatmap width')
parser.add_argument('--scale', default=0.3, type=float,
help='Degree of scale augmentation')
parser.add_argument('--rotate', default=40, type=float,
help='Degree of rotation augmentation')
parser.add_argument('--hmGauss', default=1, type=int,
help='Heatmap gaussian size')
opt = parser.parse_args()
| 2.453125 | 2 |
src/optimizer.py | KUTuaNithid/connect4Nithid | 41 | 12766175 | <filename>src/optimizer.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 19:36:30 2018
@author: Arpit
"""
import threading
class Optimizer(threading.Thread):
cnt = 0
stop_signal = False
def __init__(self, brain):
self.brain = brain
threading.Thread.__init__(self)
self.number = self.cnt
Optimizer.cnt += 1
def run(self):
print("Opt. thread " + str(self.number) + " started")
while not self.stop_signal:
self.brain.optimize()
def stop(self):
self.stop_signal = True
| 3.359375 | 3 |
leetcode/maxProfit.py | BoLin/2018-Projects | 2 | 12766176 | class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
n = len(prices) #从图像上来看更好解决,最大盈利是吃到每一个波谷到波峰
if n <= 1:
return 0
count = 0
i = 1
while i != n:
diff = max((prices[i] - prices[i - 1]),0)
count += diff
i += 1
return count
| 3.015625 | 3 |
api.py | luccanunes/urban-dict | 1 | 12766177 | <filename>api.py
def get_term(term: list):
import requests
from bs4 import BeautifulSoup
URL = "https://www.urbandictionary.com/define.php?term="
for word in term:
URL += word
if word != term[-1]:
URL += '+'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
try:
meaning = soup.select('.meaning')[0].text
except:
return { "Error": "Failed to find term meaning" }
else:
return meaning | 3.59375 | 4 |
milpool/models.py | knutdrand/milpool | 0 | 12766178 | <filename>milpool/models.py
import torch
from .pool import XYMILPool
class Quadratic(torch.nn.Module):
def __init__(self, n_input=1):
super().__init__()
self.linear = torch.nn.Linear(n_input, 1)
self.quadratic = torch.nn.Linear(n_input, 1, bias=False)
def forward(self, X):
return self.linear(X)+self.quadratic(X**2)
class MIL(torch.nn.Module):
def __init__(self, n_input, instance_model, pooling=XYMILPool()):
super().__init__()
self.instance_model = instance_model
self.pooling = pooling
def forward(self, X):
I = self.instance_model(X)
return self.pooling(I)
class SimpleMIL(MIL):
def __init__(self, n_input=1, pooling=XYMILPool()):
super().__init__(n_input, torch.nn.Linear(n_input, 1), pooling)
class QuadraticMIL(MIL):
def __init__(self, n_input=1, pooling=XYMILPool()):
super().__init__(n_input, Quadratic(n_input), pooling)
class ALinearMIL(MIL):
def __init__(self, n_input, pooling=XYMILPool()):
alinear = torch.nn.ReLU
n_hidden=5
instance_model = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
alinear(),
torch.nn.Linear(n_hidden, n_hidden),
alinear(),
torch.nn.Linear(n_hidden, n_hidden),
alinear(),
torch.nn.Linear(n_hidden, n_hidden),
alinear(),
torch.nn.Linear(n_hidden, 1))
super().__init__(n_input, instance_model, pooling)
class AdjustedALinearMIL(ALinearMIL):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.adjuster = torch.nn.Linear(1, 1)
def forward(self, X):
logodds = super().forward(X)
return self.adjuster(logodds)
| 3.15625 | 3 |
scripts/swap_column_values.py | vkozareva/single_cell_portal | 42 | 12766179 | <filename>scripts/swap_column_values.py
"""
Swap column values, but not column headers, of a TSV file
Example:
# Swap values in first and last columns of a TSV that has two header lines
python3 swap_column_values.py --num_header_rows 2 --swap_columns 0,-1 --input_file scp_coordinates.tsv
"""
import argparse
args = argparse.ArgumentParser(
prog='swap_column_values.py',
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
args.add_argument(
'--input_file', dest='input_file',
help='Path to input TSV file'
)
args.add_argument(
'--output_file', dest='output_file',
help='Path to use for output TSV file. Defaults to e.g. input_file.swapped.tsv'
)
args.add_argument(
'--num_header_rows', default=1, type=int, dest='num_header_rows',
help='Number of rows occupied by headers'
)
args.add_argument(
'--swap_columns', dest='swap_columns', default='0,-1',
help='Indexes of two columns to swap. Defaults to first (1) and last (-1) columns.'
)
parsed_args = args.parse_args()
input_file = parsed_args.input_file
output_file = parsed_args.output_file
num_header_rows = parsed_args.num_header_rows
swap_columns = parsed_args.swap_columns
swap_indexes = [int(index) for index in swap_columns.split(',')]
with open(input_file) as f:
lines = f.readlines()
headers = ''.join(lines[:num_header_rows])
value_lines = lines[num_header_rows:]
swapped_lines = []
for line in value_lines:
split_line = line.strip().split('\t')
column_1 = split_line[swap_indexes[0]]
column_2 = split_line[swap_indexes[1]]
swapped_line = split_line
# Swap the two specified columns
swapped_line[swap_indexes[0]] = column_2
swapped_line[swap_indexes[1]] = column_1
# Cast array to TSV string
swapped_line = '\t'.join(swapped_line)
swapped_lines.append(swapped_line)
swapped_lines = headers + '\n'.join(swapped_lines)
if output_file is None:
split_input_path = input_file.split('.')
output_path = split_input_path[:-1] + ['swapped', split_input_path[-1]]
output_file = '.'.join(output_path)
with open(output_file, 'w') as f:
f.write(swapped_lines) | 3.765625 | 4 |
blueprints/era_postgres/base_vm.py | halsayed/calm | 0 | 12766180 | from calm.dsl.builtins import AhvVmResources, AhvVm
from calm.dsl.builtins import AhvVmDisk, AhvVmNic, AhvVmGC
from vars import NETWORK_NAME
| 1.023438 | 1 |
tests/test_multithreading.py | himaghna/molSim | 1 | 12766181 | """ Test multithreading to ensure consistent behavior with
serial implementation."""
import unittest
import warnings
from os import remove
from os.path import exists, join
import numpy as np
from molSim.chemical_datastructures import MoleculeSet
from time import time
from tabulate import tabulate
class TestMultithreading(unittest.TestCase):
"""Unit tests to ensure consistency when running molSim as a single process
or when using multiprocessing.
"""
@classmethod
def setUpClass(self):
"""Create a SMILES database to use for comparisons and
find the similarity matrices and execution times.
"""
if not exists(".speedup-test"):
print("Speedup and Efficiency tests DISABLED.")
self.NO_SPEEDUP_TEST = True
else:
self.NO_SPEEDUP_TEST = False
self.N_REPLICATES = 3
warnings.warn(
"Speedup and Efficiency tests ENABLED, expect long runtime.",
ResourceWarning,
)
print(" ~ ~ Testing Multithreading ~ ~ ")
# basic consistency tests
self.text_fpath = "temp_multithread_smiles_seq.txt"
print(f"Creating text file {self.text_fpath}")
with open(self.text_fpath, "w") as file:
for smiles in ["C", "CC", "CCC", "O", "CCCC", "CO", "CCOCC"]:
file.write(smiles + "\n")
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self.correct_similarity_matrix = test_molecule_set.get_similarity_matrix()
if self.NO_SPEEDUP_TEST:
return
with open(join("tests", "data", "combinatorial_1.txt"), "r") as file:
data = file.readlines()
_100_molecules = data[1:102]
_500_molecules = data[1:502]
_1000_molecules = data[1:1002]
_5000_molecules = data[1:5002]
_10000_molecules = data[1:10002]
_15000_molecules = data[1:15002]
# data used for speedup and efficiency tests
self._100_molecules_fpath = "temp_multithread_speedup_100.txt"
print(f"Creating text file {self._100_molecules_fpath}")
with open(self._100_molecules_fpath, "w") as file:
for smiles in _100_molecules:
file.write(smiles)
print("Running 100 molecules with 1 process.")
self._100_molecules_serial_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self._100_molecules_serial_time += (time() - start) / self.N_REPLICATES
self._500_molecules_fpath = "temp_multithread_speedup_500.txt"
print(f"Creating text file {self._500_molecules_fpath}")
with open(self._500_molecules_fpath, "w") as file:
for smiles in _500_molecules:
file.write(smiles)
print("Running 500 molecules with 1 process.")
self._500_molecules_serial_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self._500_molecules_serial_time += (time() - start) / self.N_REPLICATES
self._1000_molecules_fpath = "temp_multithread_speedup_1000.txt"
print(f"Creating text file {self._1000_molecules_fpath}")
with open(self._1000_molecules_fpath, "w") as file:
for smiles in _1000_molecules:
file.write(smiles)
print("Running 1000 molecules with 1 process.")
self._1000_molecules_serial_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self._1000_molecules_serial_time += (time() - start) / self.N_REPLICATES
self._5000_molecules_fpath = "temp_multithread_speedup_5000.txt"
print(f"Creating text file {self._5000_molecules_fpath}")
with open(self._5000_molecules_fpath, "w") as file:
for smiles in _5000_molecules:
file.write(smiles)
print("Running 5000 molecules with 1 process.")
self._5000_molecules_serial_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self._5000_molecules_serial_time += (time() - start) / self.N_REPLICATES
self._10000_molecules_fpath = "temp_multithread_speedup_10000.txt"
print(f"Creating text file {self._10000_molecules_fpath}")
with open(self._10000_molecules_fpath, "w") as file:
for smiles in _10000_molecules:
file.write(smiles)
print("Running 10000 molecules with 1 process.")
self._10000_molecules_serial_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self._10000_molecules_serial_time += (time() - start) / self.N_REPLICATES
self._15000_molecules_fpath = "temp_multithread_speedup_15000.txt"
print(f"Creating text file {self._15000_molecules_fpath}")
with open(self._15000_molecules_fpath, "w") as file:
for smiles in _15000_molecules:
file.write(smiles)
print("Running 15000 molecules with 1 process.")
self._15000_molecules_serial_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=1,
fingerprint_type="morgan_fingerprint",
)
self._15000_molecules_serial_time += (time() - start) / self.N_REPLICATES
# data used for speedup and efficiency test 2
print("Running 100 molecules with 1 process.")
self._100_molecules_serial_time_2 = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=1,
fingerprint_type="topological_fingerprint",
)
self._100_molecules_serial_time_2 += (time() - start) / self.N_REPLICATES
print("Running 500 molecules with 1 process.")
self._500_molecules_serial_time_2 = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=1,
fingerprint_type="topological_fingerprint",
)
self._500_molecules_serial_time_2 += (time() - start) / self.N_REPLICATES
print("Running 1000 molecules with 1 process.")
self._1000_molecules_serial_time_2 = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=1,
fingerprint_type="topological_fingerprint",
)
self._1000_molecules_serial_time_2 += (time() - start) / self.N_REPLICATES
print("Running 5000 molecules with 1 process.")
self._5000_molecules_serial_time_2 = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=1,
fingerprint_type="topological_fingerprint",
)
self._5000_molecules_serial_time_2 += (time() - start) / self.N_REPLICATES
print("Running 10000 molecules with 1 process.")
self._10000_molecules_serial_time_2 = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=1,
fingerprint_type="topological_fingerprint",
)
self._10000_molecules_serial_time_2 += (time() - start) / self.N_REPLICATES
print("Running 15000 molecules with 1 process.")
self._15000_molecules_serial_time_2 = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=1,
fingerprint_type="topological_fingerprint",
)
self._15000_molecules_serial_time_2 += (time() - start) / self.N_REPLICATES
def test_multithreading_consistency_2_threads(self):
"""
Ensure that the similarity matrix produced with 2 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using two threads.",
)
def test_multithreading_consistency_3_threads(self):
"""
Ensure that the similarity matrix produced with 3 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=3,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using three threads.",
)
def test_multithreading_consistency_4_threads(self):
"""
Ensure that the similarity matrix produced with 4 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using four threads.",
)
def test_multithreading_consistency_5_threads(self):
"""
Ensure that the similarity matrix produced with 5 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=5,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using five threads.",
)
def test_multithreading_consistency_6_threads(self):
"""
Ensure that the similarity matrix produced with 6 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=6,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using six threads.",
)
def test_multithreading_consistency_7_threads(self):
"""
Ensure that the similarity matrix produced with 7 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=7,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using seven threads (equal to the number of molecules).",
)
def test_multithreading_consistency_10_threads(self):
"""
Ensure that the similarity matrix produced with 10 threads is identical to
that produced using a single thread and the serial implementation.
"""
test_molecule_set = MoleculeSet(
molecule_database_src=self.text_fpath,
molecule_database_src_type="text",
is_verbose=True,
similarity_measure="tanimoto",
n_threads=10,
fingerprint_type="morgan_fingerprint",
)
self.assertIsNone(
np.testing.assert_array_equal(
test_molecule_set.get_similarity_matrix(),
self.correct_similarity_matrix,
),
"Similarity matrix not equal when using ten threads (more than the number of molecules).",
)
def test_speedup_efficiency_tanimoto(self):
"""
Evaluate the speedup and efficieny of the multiprocessing approach.
"""
if self.NO_SPEEDUP_TEST:
return
print("~" * 10, "\n", "Speedup and Efficiency Test\n", "~" * 10)
# 100 molecules
print("Running 100 molecules with 2 processes.")
_100_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
_100_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_100_molecules_2_process_speedup = (
self._100_molecules_serial_time / _100_molecules_2_process_time
)
_100_molecules_2_process_efficiency = _100_molecules_2_process_speedup / 2
print("Running 100 molecules with 5 processes.")
_100_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
_100_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_100_molecules_5_process_speedup = (
self._100_molecules_serial_time / _100_molecules_5_process_time
)
_100_molecules_5_process_efficiency = _100_molecules_5_process_speedup / 5
print("Running 100 molecules with 10 processes.")
_100_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=8,
fingerprint_type="morgan_fingerprint",
)
_100_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_100_molecules_10_process_speedup = (
self._100_molecules_serial_time / _100_molecules_10_process_time
)
_100_molecules_10_process_efficiency = _100_molecules_10_process_speedup / 10
# 500 molecules
print("Running 500 molecules with 2 processes.")
_500_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
_500_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_500_molecules_2_process_speedup = (
self._500_molecules_serial_time / _500_molecules_2_process_time
)
_500_molecules_2_process_efficiency = _500_molecules_2_process_speedup / 2
print("Running 500 molecules with 5 processes.")
_500_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
_500_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_500_molecules_5_process_speedup = (
self._500_molecules_serial_time / _500_molecules_5_process_time
)
_500_molecules_5_process_efficiency = _500_molecules_5_process_speedup / 5
print("Running 500 molecules with 10 processes.")
_500_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=8,
fingerprint_type="morgan_fingerprint",
)
_500_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_500_molecules_10_process_speedup = (
self._500_molecules_serial_time / _500_molecules_10_process_time
)
_500_molecules_10_process_efficiency = _500_molecules_10_process_speedup / 10
# 1000 molecules
print("Running 1000 molecules with 2 processes.")
_1000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
_1000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_1000_molecules_2_process_speedup = (
self._1000_molecules_serial_time / _1000_molecules_2_process_time
)
_1000_molecules_2_process_efficiency = _1000_molecules_2_process_speedup / 2
print("Running 1000 molecules with 5 processes.")
_1000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
_1000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_1000_molecules_5_process_speedup = (
self._1000_molecules_serial_time / _1000_molecules_5_process_time
)
_1000_molecules_5_process_efficiency = _1000_molecules_5_process_speedup / 5
print("Running 1000 molecules with 10 processes.")
_1000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=8,
fingerprint_type="morgan_fingerprint",
)
_1000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_1000_molecules_10_process_speedup = (
self._1000_molecules_serial_time / _1000_molecules_10_process_time
)
_1000_molecules_10_process_efficiency = _1000_molecules_10_process_speedup / 10
print("Running 5000 molecules with 2 processes.")
# 5000 molecules
_5000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
_5000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_5000_molecules_2_process_speedup = (
self._5000_molecules_serial_time / _5000_molecules_2_process_time
)
_5000_molecules_2_process_efficiency = _5000_molecules_2_process_speedup / 2
print("Running 5000 molecules with 5 processes.")
_5000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
_5000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_5000_molecules_5_process_speedup = (
self._5000_molecules_serial_time / _5000_molecules_5_process_time
)
_5000_molecules_5_process_efficiency = _5000_molecules_5_process_speedup / 5
print("Running 5000 molecules with 10 processes.")
_5000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=8,
fingerprint_type="morgan_fingerprint",
)
_5000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_5000_molecules_10_process_speedup = (
self._5000_molecules_serial_time / _5000_molecules_10_process_time
)
_5000_molecules_10_process_efficiency = _5000_molecules_10_process_speedup / 10
# 10000 molecules
print("Running 10000 molecules with 2 processes.")
_10000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
_10000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_10000_molecules_2_process_speedup = (
self._10000_molecules_serial_time / _10000_molecules_2_process_time
)
_10000_molecules_2_process_efficiency = _10000_molecules_2_process_speedup / 2
print("Running 10000 molecules with 5 processes.")
_10000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
_10000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_10000_molecules_5_process_speedup = (
self._10000_molecules_serial_time / _10000_molecules_5_process_time
)
_10000_molecules_5_process_efficiency = _10000_molecules_5_process_speedup / 5
print("Running 10000 molecules with 10 processes.")
_10000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=8,
fingerprint_type="morgan_fingerprint",
)
_10000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_10000_molecules_10_process_speedup = (
self._10000_molecules_serial_time / _10000_molecules_10_process_time
)
_10000_molecules_10_process_efficiency = (
_10000_molecules_10_process_speedup / 10
)
# 15000 molecules
print("Running 15000 molecules with 2 processes.")
_15000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=2,
fingerprint_type="morgan_fingerprint",
)
_15000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_15000_molecules_2_process_speedup = (
self._15000_molecules_serial_time / _15000_molecules_2_process_time
)
_15000_molecules_2_process_efficiency = _15000_molecules_2_process_speedup / 2
print("Running 15000 molecules with 5 processes.")
_15000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=4,
fingerprint_type="morgan_fingerprint",
)
_15000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_15000_molecules_5_process_speedup = (
self._15000_molecules_serial_time / _15000_molecules_5_process_time
)
_15000_molecules_5_process_efficiency = _15000_molecules_5_process_speedup / 5
print("Running 15000 molecules with 10 processes.")
_15000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="tanimoto",
n_threads=8,
fingerprint_type="morgan_fingerprint",
)
_15000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_15000_molecules_10_process_speedup = (
self._15000_molecules_serial_time / _15000_molecules_10_process_time
)
_15000_molecules_10_process_efficiency = (
_15000_molecules_10_process_speedup / 10
)
print("Speedup:")
print(
tabulate(
[
["~", 2, 4, 8],
[
100,
_100_molecules_2_process_speedup,
_100_molecules_5_process_speedup,
_100_molecules_10_process_speedup,
],
[
500,
_500_molecules_2_process_speedup,
_500_molecules_5_process_speedup,
_500_molecules_10_process_speedup,
],
[
1000,
_1000_molecules_2_process_speedup,
_1000_molecules_5_process_speedup,
_1000_molecules_10_process_speedup,
],
[
5000,
_5000_molecules_2_process_speedup,
_5000_molecules_5_process_speedup,
_5000_molecules_10_process_speedup,
],
[
10000,
_10000_molecules_2_process_speedup,
_10000_molecules_5_process_speedup,
_10000_molecules_10_process_speedup,
],
[
15000,
_15000_molecules_2_process_speedup,
_15000_molecules_5_process_speedup,
_15000_molecules_10_process_speedup,
],
],
headers=["# mol", "", "# processes", ""],
)
)
print("Efficiency:")
print(
tabulate(
[
["~", 2, 4, 8],
[
100,
_100_molecules_2_process_efficiency,
_100_molecules_5_process_efficiency,
_100_molecules_10_process_efficiency,
],
[
500,
_500_molecules_2_process_efficiency,
_500_molecules_5_process_efficiency,
_500_molecules_10_process_efficiency,
],
[
1000,
_1000_molecules_2_process_efficiency,
_1000_molecules_5_process_efficiency,
_1000_molecules_10_process_efficiency,
],
[
5000,
_5000_molecules_2_process_efficiency,
_5000_molecules_5_process_efficiency,
_5000_molecules_10_process_efficiency,
],
[
10000,
_10000_molecules_2_process_efficiency,
_10000_molecules_5_process_efficiency,
_10000_molecules_10_process_efficiency,
],
[
15000,
_15000_molecules_2_process_efficiency,
_15000_molecules_5_process_efficiency,
_15000_molecules_10_process_efficiency,
],
],
headers=["# mol", "", "# processes", ""],
)
)
print("Execution Time in seconds (serial/parallel):")
print(
tabulate(
[
["~", 2, 4, 8],
[
100,
"{:.2f}/{:.2f}".format(
float(self._100_molecules_serial_time),
float(_100_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._100_molecules_serial_time),
float(_100_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._100_molecules_serial_time),
float(_100_molecules_10_process_time),
),
],
[
500,
"{:.2f}/{:.2f}".format(
float(self._500_molecules_serial_time),
float(_500_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._500_molecules_serial_time),
float(_500_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._500_molecules_serial_time),
float(_500_molecules_10_process_time),
),
],
[
1000,
"{:.2f}/{:.2f}".format(
float(self._1000_molecules_serial_time),
float(_1000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._1000_molecules_serial_time),
float(_1000_molecules_5_process_time),
),
"{:2f}/{:.2f}".format(
float(self._1000_molecules_serial_time),
float(_1000_molecules_10_process_time),
),
],
[
5000,
"{:.2f}/{:.2f}".format(
float(self._5000_molecules_serial_time),
float(_5000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._5000_molecules_serial_time),
float(_5000_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._5000_molecules_serial_time),
float(_5000_molecules_10_process_time),
),
],
[
10000,
"{:.2f}/{:.2f}".format(
float(self._10000_molecules_serial_time),
float(_10000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._10000_molecules_serial_time),
float(_10000_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._10000_molecules_serial_time),
float(_10000_molecules_10_process_time),
),
],
[
15000,
"{:.2f}/{:.2f}".format(
float(self._15000_molecules_serial_time),
float(_15000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._15000_molecules_serial_time),
float(_15000_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._15000_molecules_serial_time),
float(_15000_molecules_10_process_time),
),
],
],
headers=["# mol", "", "# processes", ""],
)
)
def test_speedup_efficiency_cosine(self):
"""
Evaluate the speedup and efficieny of the multiprocessing approach
with a more complex metric.
"""
if self.NO_SPEEDUP_TEST:
return
print("~" * 10, "\n", "Speedup and Efficiency Test 2\n", "~" * 10)
# 100 molecules
print("Running 100 molecules with 2 processes.")
_100_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=2,
fingerprint_type="topological_fingerprint",
)
_100_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_100_molecules_2_process_speedup = (
self._100_molecules_serial_time_2 / _100_molecules_2_process_time
)
_100_molecules_2_process_efficiency = _100_molecules_2_process_speedup / 2
print("Running 100 molecules with 5 processes.")
_100_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=4,
fingerprint_type="topological_fingerprint",
)
_100_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_100_molecules_5_process_speedup = (
self._100_molecules_serial_time_2 / _100_molecules_5_process_time
)
_100_molecules_5_process_efficiency = _100_molecules_5_process_speedup / 5
print("Running 100 molecules with 10 processes.")
_100_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._100_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=8,
fingerprint_type="topological_fingerprint",
)
_100_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_100_molecules_10_process_speedup = (
self._100_molecules_serial_time_2 / _100_molecules_10_process_time
)
_100_molecules_10_process_efficiency = _100_molecules_10_process_speedup / 10
# 500 molecules
print("Running 500 molecules with 2 processes.")
_500_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=2,
fingerprint_type="topological_fingerprint",
)
_500_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_500_molecules_2_process_speedup = (
self._500_molecules_serial_time_2 / _500_molecules_2_process_time
)
_500_molecules_2_process_efficiency = _500_molecules_2_process_speedup / 2
print("Running 500 molecules with 5 processes.")
_500_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=4,
fingerprint_type="topological_fingerprint",
)
_500_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_500_molecules_5_process_speedup = (
self._500_molecules_serial_time_2 / _500_molecules_5_process_time
)
_500_molecules_5_process_efficiency = _500_molecules_5_process_speedup / 5
print("Running 500 molecules with 10 processes.")
_500_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._500_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=8,
fingerprint_type="topological_fingerprint",
)
_500_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_500_molecules_10_process_speedup = (
self._500_molecules_serial_time_2 / _500_molecules_10_process_time
)
_500_molecules_10_process_efficiency = _500_molecules_10_process_speedup / 10
# 1000 molecules
print("Running 1000 molecules with 2 processes.")
_1000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=2,
fingerprint_type="topological_fingerprint",
)
_1000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_1000_molecules_2_process_speedup = (
self._1000_molecules_serial_time_2 / _1000_molecules_2_process_time
)
_1000_molecules_2_process_efficiency = _1000_molecules_2_process_speedup / 2
print("Running 1000 molecules with 5 processes.")
_1000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=4,
fingerprint_type="topological_fingerprint",
)
_1000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_1000_molecules_5_process_speedup = (
self._1000_molecules_serial_time_2 / _1000_molecules_5_process_time
)
_1000_molecules_5_process_efficiency = _1000_molecules_5_process_speedup / 5
print("Running 1000 molecules with 10 processes.")
_1000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._1000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=8,
fingerprint_type="topological_fingerprint",
)
_1000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_1000_molecules_10_process_speedup = (
self._1000_molecules_serial_time_2 / _1000_molecules_10_process_time
)
_1000_molecules_10_process_efficiency = _1000_molecules_10_process_speedup / 10
# 5000 molecules
print("Running 5000 molecules with 2 processes.")
_5000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=2,
fingerprint_type="topological_fingerprint",
)
_5000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_5000_molecules_2_process_speedup = (
self._5000_molecules_serial_time_2 / _5000_molecules_2_process_time
)
_5000_molecules_2_process_efficiency = _5000_molecules_2_process_speedup / 2
print("Running 5000 molecules with 5 processes.")
_5000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=4,
fingerprint_type="topological_fingerprint",
)
_5000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_5000_molecules_5_process_speedup = (
self._5000_molecules_serial_time_2 / _5000_molecules_5_process_time
)
_5000_molecules_5_process_efficiency = _5000_molecules_5_process_speedup / 5
print("Running 5000 molecules with 10 processes.")
_5000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._5000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=8,
fingerprint_type="topological_fingerprint",
)
_5000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_5000_molecules_10_process_speedup = (
self._5000_molecules_serial_time_2 / _5000_molecules_10_process_time
)
_5000_molecules_10_process_efficiency = _5000_molecules_10_process_speedup / 10
# 10000 molecules
print("Running 10000 molecules with 2 processes.")
_10000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=2,
fingerprint_type="topological_fingerprint",
)
_10000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_10000_molecules_2_process_speedup = (
self._10000_molecules_serial_time_2 / _10000_molecules_2_process_time
)
_10000_molecules_2_process_efficiency = _10000_molecules_2_process_speedup / 2
print("Running 10000 molecules with 5 processes.")
_10000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=4,
fingerprint_type="topological_fingerprint",
)
_10000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_10000_molecules_5_process_speedup = (
self._10000_molecules_serial_time_2 / _10000_molecules_5_process_time
)
_10000_molecules_5_process_efficiency = _10000_molecules_5_process_speedup / 5
print("Running 10000 molecules with 10 processes.")
_10000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._10000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=8,
fingerprint_type="topological_fingerprint",
)
_10000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_10000_molecules_10_process_speedup = (
self._10000_molecules_serial_time_2 / _10000_molecules_10_process_time
)
_10000_molecules_10_process_efficiency = (
_10000_molecules_10_process_speedup / 10
)
# 15000 molecules
print("Running 15000 molecules with 2 processes.")
_15000_molecules_2_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=2,
fingerprint_type="topological_fingerprint",
)
_15000_molecules_2_process_time += (time() - start) / self.N_REPLICATES
_15000_molecules_2_process_speedup = (
self._15000_molecules_serial_time_2 / _15000_molecules_2_process_time
)
_15000_molecules_2_process_efficiency = _15000_molecules_2_process_speedup / 2
print("Running 15000 molecules with 5 processes.")
_15000_molecules_5_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=4,
fingerprint_type="topological_fingerprint",
)
_15000_molecules_5_process_time += (time() - start) / self.N_REPLICATES
_15000_molecules_5_process_speedup = (
self._15000_molecules_serial_time_2 / _15000_molecules_5_process_time
)
_15000_molecules_5_process_efficiency = _15000_molecules_5_process_speedup / 5
print("Running 15000 molecules with 10 processes.")
_15000_molecules_10_process_time = 0
for i in range(self.N_REPLICATES):
start = time()
test_molecule_set = MoleculeSet(
molecule_database_src=self._15000_molecules_fpath,
molecule_database_src_type="text",
is_verbose=False,
similarity_measure="cosine",
n_threads=8,
fingerprint_type="topological_fingerprint",
)
_15000_molecules_10_process_time += (time() - start) / self.N_REPLICATES
_15000_molecules_10_process_speedup = (
self._15000_molecules_serial_time_2 / _15000_molecules_10_process_time
)
_15000_molecules_10_process_efficiency = (
_15000_molecules_10_process_speedup / 10
)
print("Speedup:")
print(
tabulate(
[
["~", 2, 4, 8],
[
100,
_100_molecules_2_process_speedup,
_100_molecules_5_process_speedup,
_100_molecules_10_process_speedup,
],
[
500,
_500_molecules_2_process_speedup,
_500_molecules_5_process_speedup,
_500_molecules_10_process_speedup,
],
[
1000,
_1000_molecules_2_process_speedup,
_1000_molecules_5_process_speedup,
_1000_molecules_10_process_speedup,
],
[
5000,
_5000_molecules_2_process_speedup,
_5000_molecules_5_process_speedup,
_5000_molecules_10_process_speedup,
],
[
10000,
_10000_molecules_2_process_speedup,
_10000_molecules_5_process_speedup,
_10000_molecules_10_process_speedup,
],
[
15000,
_15000_molecules_2_process_speedup,
_15000_molecules_5_process_speedup,
_15000_molecules_10_process_speedup,
],
],
headers=["# mol", "", "# processes", ""],
)
)
print("Efficiency:")
print(
tabulate(
[
["~", 2, 4, 8],
[
100,
_100_molecules_2_process_efficiency,
_100_molecules_5_process_efficiency,
_100_molecules_10_process_efficiency,
],
[
500,
_500_molecules_2_process_efficiency,
_500_molecules_5_process_efficiency,
_500_molecules_10_process_efficiency,
],
[
1000,
_1000_molecules_2_process_efficiency,
_1000_molecules_5_process_efficiency,
_1000_molecules_10_process_efficiency,
],
[
5000,
_5000_molecules_2_process_efficiency,
_5000_molecules_5_process_efficiency,
_5000_molecules_10_process_efficiency,
],
[
10000,
_10000_molecules_2_process_efficiency,
_10000_molecules_5_process_efficiency,
_10000_molecules_10_process_efficiency,
],
[
15000,
_15000_molecules_2_process_efficiency,
_15000_molecules_5_process_efficiency,
_15000_molecules_10_process_efficiency,
],
],
headers=["# mol", "", "# processes", ""],
)
)
print("Execution Time in seconds (serial/parallel):")
print(
tabulate(
[
["~", 2, 4, 8],
[
100,
"{:.2f}/{:.2f}".format(
float(self._100_molecules_serial_time_2),
float(_100_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._100_molecules_serial_time_2),
float(_100_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._100_molecules_serial_time_2),
float(_100_molecules_10_process_time),
),
],
[
500,
"{:.2f}/{:.2f}".format(
float(self._500_molecules_serial_time_2),
float(_500_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._500_molecules_serial_time_2),
float(_500_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._500_molecules_serial_time_2),
float(_500_molecules_10_process_time),
),
],
[
1000,
"{:.2f}/{:.2f}".format(
float(self._1000_molecules_serial_time_2),
float(_1000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._1000_molecules_serial_time_2),
float(_1000_molecules_5_process_time),
),
"{:2f}/{:.2f}".format(
float(self._1000_molecules_serial_time_2),
float(_1000_molecules_10_process_time),
),
],
[
5000,
"{:.2f}/{:.2f}".format(
float(self._5000_molecules_serial_time_2),
float(_5000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._5000_molecules_serial_time_2),
float(_5000_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._5000_molecules_serial_time_2),
float(_5000_molecules_10_process_time),
),
],
[
10000,
"{:.2f}/{:.2f}".format(
float(self._10000_molecules_serial_time_2),
float(_10000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._10000_molecules_serial_time_2),
float(_10000_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._10000_molecules_serial_time_2),
float(_10000_molecules_10_process_time),
),
],
[
15000,
"{:.2f}/{:.2f}".format(
float(self._15000_molecules_serial_time_2),
float(_15000_molecules_2_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._15000_molecules_serial_time_2),
float(_15000_molecules_5_process_time),
),
"{:.2f}/{:.2f}".format(
float(self._15000_molecules_serial_time_2),
float(_15000_molecules_10_process_time),
),
],
],
headers=["# mol", "", "# processes", ""],
)
)
@classmethod
def tearDownClass(self):
"""Delete temporary files used in testing."""
print("Deleting smiles database files.")
remove(self.text_fpath)
if not self.NO_SPEEDUP_TEST:
remove(self._100_molecules_fpath)
remove(self._500_molecules_fpath)
remove(self._1000_molecules_fpath)
remove(self._5000_molecules_fpath)
remove(self._10000_molecules_fpath)
remove(self._15000_molecules_fpath)
print(" ~ ~ Multithreading Test Complete ~ ~ ")
if __name__ == "__main__":
unittest.main()
| 2.71875 | 3 |
util/date_util.py | hroncok/Google-Calendar-Simple-API | 0 | 12766182 | <gh_stars>0
from datetime import datetime, timedelta
def get_utc_datetime(date, *args, **kwargs):
if isinstance(date, datetime):
return date.isoformat()
else:
return datetime(date, *args, **kwargs).isoformat()
def date_range(start_date, day_count):
for n in range(day_count):
yield start_date + timedelta(n)
| 3.015625 | 3 |
code/UI/OpenAPI/python-flask-server/swagger_server/models/response_envelope.py | ramseylab/RTX | 0 | 12766183 | <filename>code/UI/OpenAPI/python-flask-server/swagger_server/models/response_envelope.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.response import Response # noqa: F401,E501
from swagger_server import util
class ResponseEnvelope(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, response_ur_is: List[str]=None, responses: List[Response]=None, options: List[str]=None): # noqa: E501
"""ResponseEnvelope - a model defined in Swagger
:param response_ur_is: The response_ur_is of this ResponseEnvelope. # noqa: E501
:type response_ur_is: List[str]
:param responses: The responses of this ResponseEnvelope. # noqa: E501
:type responses: List[Response]
:param options: The options of this ResponseEnvelope. # noqa: E501
:type options: List[str]
"""
self.swagger_types = {
'response_ur_is': List[str],
'responses': List[Response],
'options': List[str]
}
self.attribute_map = {
'response_ur_is': 'responseURIs',
'responses': 'responses',
'options': 'options'
}
self._response_ur_is = response_ur_is
self._responses = responses
self._options = options
@classmethod
def from_dict(cls, dikt) -> 'ResponseEnvelope':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ResponseEnvelope of this ResponseEnvelope. # noqa: E501
:rtype: ResponseEnvelope
"""
return util.deserialize_model(dikt, cls)
@property
def response_ur_is(self) -> List[str]:
"""Gets the response_ur_is of this ResponseEnvelope.
List of URIs for Response objects to fetch and process # noqa: E501
:return: The response_ur_is of this ResponseEnvelope.
:rtype: List[str]
"""
return self._response_ur_is
@response_ur_is.setter
def response_ur_is(self, response_ur_is: List[str]):
"""Sets the response_ur_is of this ResponseEnvelope.
List of URIs for Response objects to fetch and process # noqa: E501
:param response_ur_is: The response_ur_is of this ResponseEnvelope.
:type response_ur_is: List[str]
"""
self._response_ur_is = response_ur_is
@property
def responses(self) -> List[Response]:
"""Gets the responses of this ResponseEnvelope.
List of Response objects to process # noqa: E501
:return: The responses of this ResponseEnvelope.
:rtype: List[Response]
"""
return self._responses
@responses.setter
def responses(self, responses: List[Response]):
"""Sets the responses of this ResponseEnvelope.
List of Response objects to process # noqa: E501
:param responses: The responses of this ResponseEnvelope.
:type responses: List[Response]
"""
self._responses = responses
@property
def options(self) -> List[str]:
"""Gets the options of this ResponseEnvelope.
List of options to guide what happens with the Response object(s) # noqa: E501
:return: The options of this ResponseEnvelope.
:rtype: List[str]
"""
return self._options
@options.setter
def options(self, options: List[str]):
"""Sets the options of this ResponseEnvelope.
List of options to guide what happens with the Response object(s) # noqa: E501
:param options: The options of this ResponseEnvelope.
:type options: List[str]
"""
self._options = options
| 2.21875 | 2 |
dgcrm/migrations/0004_auto_20170616_1326.py | DenysGurin/solnce | 0 | 12766184 | <filename>dgcrm/migrations/0004_auto_20170616_1326.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 13:26
from __future__ import unicode_literals
import dgcrm.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dgcrm', '0003_feadback_has_event'),
]
operations = [
migrations.RemoveField(
model_name='clientresultlinker',
name='client',
),
migrations.RemoveField(
model_name='clientresultlinker',
name='result',
),
migrations.RemoveField(
model_name='client',
name='results',
),
migrations.AddField(
model_name='result',
name='client',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dgcrm.Client'),
),
migrations.AddField(
model_name='result',
name='photo',
field=models.FileField(blank=True, upload_to=dgcrm.models.directory_path),
),
migrations.DeleteModel(
name='ClientResultLinker',
),
]
| 1.71875 | 2 |
pages/views.py | SueyGuey/RealPolitik | 0 | 12766185 | from django.shortcuts import render, redirect
from bs4 import BeautifulSoup
from django.views.generic import DetailView, FormView, CreateView
from news.models import Article, Comment
from django.db import IntegrityError
from django.db.models import Q
from .forms import AddComment
import requests
from urllib.request import urlopen, Request
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from operator import attrgetter
requests.packages.urllib3.disable_warnings()
def refresh(request):
foreign_policy_req = requests.get("https://foreignpolicy.com/category/latest/")
foreign_policy_soup = BeautifulSoup(foreign_policy_req.content, "html.parser")
foreign_policy = foreign_policy_soup.find_all('div', {'class': 'excerpt-content--list content-block'})
for headline in foreign_policy[::-1]:
new_article = Article()
new_article.title = headline.find_all('h3', {'class':'hed'})[0].text
new_article.url= headline.find_all('a', {'class':'hed-heading -excerpt'})[0]['href']
new_article.image_url = headline.find_all('img')[0]['data-src']
auth = headline.find_all('a', {'class':'author'})
if len(auth) != 0:
new_article.author = auth[0].text
else:
new_article.author = "FP"
new_article.site = "Foreign Policy"
new_article.site_url = "https://foreignpolicy.com"
try:
new_article.save() #checks for errors
except IntegrityError as e:
if 'UNIQUE constraint' in str(e.args): #a repeat article
pass
foreign_affairs_req = requests.get("https://www.foreignaffairs.com")
foreign_affairs_soup = BeautifulSoup(foreign_affairs_req.content, "html.parser")
foreign_affairs = foreign_affairs_soup.find_all('div', {'class' : 'magazine-list-item--image-link row'})
for headline in foreign_affairs[::-1]:
new_article = Article()
new_article.title = headline.find_all('h3', {'class':'article-card-title font-weight-bold ls-0 mb-0 f-sans'})[0].text
new_article.image_url = headline.find_all('img',{'class':'b-lazy b-lazy-ratio magazine-list-item--image d-none d-md-block'})[0]['data-src']
if len(new_article.image_url) > 199:
new_article.image_url = 'https://subscribe.foreignaffairs.com/FAF/pub_templates/faf/images/logo.png'
new_article.url = headline.find_all('a', {'class':'d-block flex-grow-1'})[0]['href']
new_article.author = headline.find_all('h4', {'class':'magazine-author font-italic ls-0 mb-0 f-serif'})[0].text
new_article.site = "Foreign Affairs"
new_article.site_url = "https://www.foreignaffairs.com"
try:
new_article.save()
except IntegrityError as e:
if 'UNIQUE constraint' in str(e.args):
pass
#they give a 403 error for other methods
china_power_req = Request("https://chinapower.csis.org/podcasts/", headers = {'User-Agent' : 'Mozilla/5.0'})
china_power_page = urlopen(china_power_req).read()
china_power_soup = BeautifulSoup(china_power_page, "html.parser")
china_power = china_power_soup.find_all('article')
for headline in china_power[::-1]:
#finding author
disc = headline.find_all('h2', {'class':'entry-title'})[0].text #description has the author's name
list_disc = disc.split() #find it in the text
record = False
list_auth = []
for name in list_disc:
if record:
list_auth.append(name) #add the name
if name == "with": #start at 'episode,'
record = True;
new_article = Article()
new_article.title = headline.find_all('h2', {'class':'entry-title'})[0].text
new_article.image_url = "https://megaphone.imgix.net/podcasts/722b9c2a-e6e1-11ea-a520-3349f6671499/image/uploads_2F1598366366917-v9rdxhpawhc-bee946f884ea9a141d33af2322074d0d_2F_ART_ChinaPower.jpg?ixlib=rails-2.1.2&w=400&h=400"
new_article.url = headline.find_all('a')[0]['href']
if len(list_auth) != 0:
new_article.author = " ".join(list_auth) + " & <NAME>"
else:
new_article.author = "<NAME>"
new_article.site = "China Power Podcasts"
new_article.site_url = "https://chinapower.csis.org/podcasts/"
try:
new_article.save()
except IntegrityError as e:
if 'UNIQUE constraint' in str(e.args):
pass
#for war on the rocks, each div class for the articles is different
warontherocks_req = Request("https://warontherocks.com/", headers = {'User-Agent' : 'Mozilla/5.0'})
warontherocks_page = urlopen(warontherocks_req).read()
warontherocks_soup = BeautifulSoup(warontherocks_page, "html.parser")
warontherocks = warontherocks_soup.find_all('div', {'class' : 'all-posts'})
#very nice and straight forward html from warontherocks
header_ = warontherocks[0].find_all('h3')
link_ = warontherocks[0].find_all('a')
img_ = warontherocks[0].find_all('img')
writer_ = warontherocks[0].find_all('h4')
for i in range(12,1,-1):
new_article = Article()
new_article.title = header_[i-1].text
new_article.image_url = img_[i-1]['src']
new_article.url = link_[2*i-1]['href']
new_article.author = writer_[i-1].text
new_article.site = "War on the Rocks"
new_article.site_url = "https://warontherocks.com"
try:
new_article.save()
except IntegrityError as e:
if 'UNIQUE constraint' in str(e.args):
pass
"""AP_FP_req = Request("https://apnews.com/hub/foreign-policy", headers = {'User-Agent' : 'Mozilla/5.0'})
AP_FP_page = urlopen(AP_FP_req).read()
AP_IL_req = Request("https://apnews.com/hub/international-relations", headers = {'User-Agent' : 'Mozilla/5.0'})
AP_IL_page = urlopen(AP_IL_req).read()
AP_FP_soup = BeautifulSoup(AP_FP_page, "html.parser")
AP_IL_soup = BeautifulSoup(AP_IL_page, "html.parser")
AP = AP_FP_soup.find_all('div', {'data-key': 'feed-card-wire-story-with-image'}) + AP_IL_soup.find_all('div', {'data-key': 'feed-card-wire-story-with-image'})
for headline in AP[::-1]:
new_article = Article()
new_article.title = headline.find_all('h1')[0].text
new_article.url= "https://apnews.com" + headline.find_all('a')[0]['href']
#img machine broke
img = headline.find_all('img', {'class': 'image-0-2-132'})
if len(img) == 0:
new_article.image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Associated_Press_logo_2012.svg/220px-Associated_Press_logo_2012.svg.png"
else:
new_article.image_url = img[0]['src']
list_auth = (headline.find_all('span')[0].text).split(" ")
if "GMT" in list_auth:
new_article.author = "AP"
else:
new_article.author = headline.find_all('span')[0].text
new_article.site = "Associated Press"
new_article.site_url = "https://apnews.com"
try:
new_article.save() #checks for errors
except IntegrityError as e:
if 'UNIQUE constraint' in str(e.args): #a repeat article
pass"""
#lowy institute
LI_req = Request("https://www.lowyinstitute.org/the-interpreter/archive", headers = {'User-Agent' : 'Mozilla/5.0'})
LI_page = urlopen(LI_req).read()
LI_soup = BeautifulSoup(LI_page, "html.parser")
LI = LI_soup.find_all('article')
for headline in LI[::-1]:
img = headline.find_all('div',{'class':'article-thumb'})[0]
if len(img) == 0:
img = headline.find_all('div',{'class':'article-thumb-wrap'})[0]
word = [] #getting the link into a list of chars
record = False
for letter in list(img['style']):
if record:
word.append(letter)
if letter == "'":
if record:
word.pop() #revmoving the ' at the end
break
record = True
new_article = Article()
new_article.title = headline.find_all('h2', {'class':'article-title txt-f4 txt-s6 mv-0 pv-xs'})[0].text
new_article.url= "https://www.lowyinstitute.org" + headline.find_all('a', {'class':'txt-dn'})[0]['href']
new_article.image_url = "".join(word)
new_article.author = headline.find_all('a', {'class':'txt-dn'})[1].text
new_article.site = "Lowy Institute"
new_article.site_url = "https://www.lowyinstitute.org/the-interpreter/archive"
try:
new_article.save()
except IntegrityError as e:
if 'UNIQUE constraint' in str(e.args):
pass
return redirect("../")
def getQuerySet(query = None): #for searching
queryset = []
queries = query.split(" ")
for q in queries:
posts = Article.objects.filter(Q(title__icontains = q)).distinct()
for post in posts:
queryset.append(post)
return list(set(queryset))
def home(request, *args, **kwargs):
query = ""
context = {}
if request.GET:
query = request.GET.get('q','')
context['query'] = str(query) #returns post relating to our search
articles = sorted(getQuerySet(query), key = attrgetter('time_added') , reverse = True) #gives it most recent order
page_num = request.GET.get('page',1)
pgntr = Paginator(articles, 10) #divides it into pages of 10 articles
#error checking
try:
articles = pgntr.page(page_num)
except EmptyPage:
articles = pgntr.page(pgntr.num_pages) #page doesn't exist so we go to page 1
except PageNotAnInteger:
articles = pgntr.page(1) #page not an int
context['articles'] = articles
return render(request,"home.html",context)
#viewing each article with its comments
class HomeDetailView(DetailView):
model = Article
template_name = 'detail_article.html'
class CommentView(CreateView):
model = Comment
template_name = 'add_comment.html'
form_class = AddComment
def form_valid(self,form):
#automatically have the post id
form.instance.post_id = self.kwargs['pk']
#automatically add username
form.instance.user = self.request.user
return super().form_valid(form)
def get_success_url(self):#goes back to page
return reverse('ArticleDetail', kwargs={'pk': self.kwargs['pk']})
def contact(request):
return render(request,"contact.html")
def about(request):
return render(request,"about.html") | 2.34375 | 2 |
xmagical/base_env.py | kevinzakka/x-magical | 19 | 12766186 | import abc
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
import pymunk as pm
from gym import spaces
import xmagical.entities as en
import xmagical.render as r
from xmagical.phys_vars import PhysicsVariablesBase, PhysVar
from xmagical.style import ARENA_ZOOM_OUT, COLORS_RGB, lighten_rgb
class PhysicsVariables(PhysicsVariablesBase):
"""Default values & randomisation ranges for key physical parameters of the environment."""
robot_pos_joint_max_force = PhysVar(5, (3.2, 5.5))
robot_rot_joint_max_force = PhysVar(1, (0.7, 1.5))
robot_finger_max_force = PhysVar(4, (2.5, 4.5))
shape_trans_joint_max_force = PhysVar(1.5, (1.0, 1.8))
shape_rot_joint_max_force = PhysVar(0.1, (0.07, 0.15))
class BaseEnv(gym.Env, abc.ABC):
# Constants for all envs.
ROBOT_RAD = 0.2
ROBOT_MASS = 1.0
SHAPE_RAD = ROBOT_RAD * 0.6
SIZE = 1.1
ARENA_BOUNDS_LRBT = [-SIZE, SIZE, -SIZE, SIZE]
ARENA_SIZE_MAX = max(ARENA_BOUNDS_LRBT)
# Minimum and maximum size of goal regions used during randomisation.
RAND_GOAL_MIN_SIZE = 0.5
RAND_GOAL_MAX_SIZE = 0.8
RAND_GOAL_SIZE_RANGE = RAND_GOAL_MAX_SIZE - RAND_GOAL_MIN_SIZE
# The following are used to standardise what "jitter" means across different
# tasks.
JITTER_PCT = 0.05
JITTER_POS_BOUND = ARENA_SIZE_MAX * JITTER_PCT / 2.0
JITTER_ROT_BOUND = JITTER_PCT * np.pi
JITTER_TARGET_BOUND = JITTER_PCT * RAND_GOAL_SIZE_RANGE / 2
def __init__(
self,
*, # Subclasses can have additional args.
robot_cls: Type[en.embodiments.NonHolonomicEmbodiment],
res_hw: Tuple[int, int] = (256, 256),
fps: float = 20.0,
phys_steps: int = 10,
phys_iter: int = 10,
max_episode_steps: Optional[int] = None,
view_mode: str = "allo",
rand_dynamics: bool = False,
) -> None:
assert view_mode in [
"allo",
"ego",
], "view_mode must be one of ['allo', 'ego']."
self.robot_cls = robot_cls
self.action_dim = robot_cls.DOF
self.phys_iter = phys_iter
self.phys_steps = phys_steps
self.fps = fps
self.res_hw = res_hw
self.max_episode_steps = max_episode_steps
self.rand_dynamics = rand_dynamics
# State/rendering (see reset()).
self._entities = None
self._space = None
self._robot = None
self._episode_steps = None
self._phys_vars = None
self._renderer_func = (
self._use_allo_cam if view_mode == "allo" else self._use_ego_cam
)
# This is for rendering and displaying.
self.renderer = None
self.viewer = None
# Set observation and action spaces.
self.observation_space = spaces.Box(
low=0, high=255, shape=(*self.res_hw, 3), dtype=np.uint8
)
self.action_space = spaces.Box(
np.array([-1] * self.action_dim, dtype=np.float32),
np.array([+1] * self.action_dim, dtype=np.float32),
dtype=np.float32,
)
self.seed()
def seed(self, seed: Optional[int] = None) -> List[int]:
"""Initialise the PRNG and return seed necessary to reproduce results.
The action space should probably be seeded in a downstream RL
application.
"""
if seed is None:
seed = np.random.randint(0, (1 << 31) - 1)
self.rng = np.random.RandomState(seed=seed)
return [seed]
def _make_robot(
self,
init_pos: Union[np.ndarray, Tuple[float, float]],
init_angle: float,
) -> en.embodiments.NonHolonomicEmbodiment:
return self.robot_cls(
radius=self.ROBOT_RAD,
mass=self.ROBOT_MASS,
init_pos=init_pos,
init_angle=init_angle,
)
def _make_shape(self, **kwargs) -> en.Shape:
return en.Shape(shape_size=self.SHAPE_RAD, mass=0.01, **kwargs)
@abc.abstractmethod
def on_reset(self) -> None:
"""Set up entities necessary for this environment, and reset any other
data needed for the env. Must create a robot in addition to any
necessary entities.
"""
pass
def add_entities(self, entities: Sequence[en.Entity]) -> None:
"""Adds a list of entities to the current entities list and sets it up.
Only intended to be used from within on_reset(). Needs to be called for
every created entity or else they will not be added to the space!
"""
for entity in entities:
if isinstance(entity, self.robot_cls):
self._robot = entity
self._entities.append(entity)
entity.setup(self.renderer, self._space, self._phys_vars)
def _use_ego_cam(self) -> None:
"""Egocentric agent view."""
self.renderer.set_cam_follow(
source_xy_world=(
self._robot.body.position.x,
self._robot.body.position.y,
),
target_xy_01=(0.5, 0.15),
viewport_hw_world=(
self._arena_h * ARENA_ZOOM_OUT,
self._arena_w * ARENA_ZOOM_OUT,
),
rotation=self._robot.body.angle,
)
def _use_allo_cam(self) -> None:
"""Allocentric 'god-mode' view."""
self.renderer.set_bounds(
left=self._arena.left * ARENA_ZOOM_OUT,
right=self._arena.right * ARENA_ZOOM_OUT,
bottom=self._arena.bottom * ARENA_ZOOM_OUT,
top=self._arena.top * ARENA_ZOOM_OUT,
)
def reset(self):
self._episode_steps = 0
# Delete old entities/space.
self._entities = []
self._space = None
self._robot = None
self._phys_vars = None
if self.renderer is None:
res_h, res_w = self.res_hw
background_color = lighten_rgb(COLORS_RGB["grey"], times=4)
self.renderer = r.Viewer(res_w, res_h, background_color)
else:
# These will get added back later.
self.renderer.reset_geoms()
self._space = pm.Space()
self._space.collision_slop = 0.01
self._space.iterations = self.phys_iter
if self.rand_dynamics:
# Randomise the physics properties of objects and the robot a
# little bit.
self._phys_vars = PhysicsVariables.sample(self.rng)
else:
self._phys_vars = PhysicsVariables.defaults()
# Set up robot and arena.
arena_l, arena_r, arena_b, arena_t = self.ARENA_BOUNDS_LRBT
self._arena = en.ArenaBoundaries(
left=arena_l, right=arena_r, bottom=arena_b, top=arena_t
)
self._arena_w = arena_r - arena_l
self._arena_h = arena_t - arena_b
self.add_entities([self._arena])
reset_rv = self.on_reset()
assert reset_rv is None, (
f"on_reset method of {type(self)} returned {reset_rv}, but "
f"should return None"
)
assert isinstance(self._robot, self.robot_cls)
assert len(self._entities) >= 1
assert np.allclose(self._arena.left + self._arena.right, 0)
assert np.allclose(self._arena.bottom + self._arena.top, 0)
self._renderer_func()
return self.render(mode="rgb_array")
def _phys_steps_on_frame(self):
spf = 1 / self.fps
dt = spf / self.phys_steps
for i in range(self.phys_steps):
for ent in self._entities:
ent.update(dt)
self._space.step(dt)
@abc.abstractmethod
def score_on_end_of_traj(self) -> float:
"""Compute the score for this trajectory.
Only called at the last step of the trajectory.
Returns:
score: number in [0, 1] indicating the worst possible
performance (0), the best possible performance (1) or something
in between. Should apply to the WHOLE trajectory.
"""
pass # pytype: disable=bad-return-type
@abc.abstractclassmethod
def get_reward(self) -> float:
"""Compute the reward for the current timestep.
This is called at the end of every timestep.
"""
pass # pytype: disable=bad-return-type
def step(self, action) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
self._robot.set_action(action)
self._phys_steps_on_frame()
self._episode_steps += 1
obs = self.render(mode="rgb_array")
reward = self.get_reward()
done = False
eval_score = 0.0
info = {}
if self.max_episode_steps is not None:
if self._episode_steps >= self.max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
if done:
eval_score = self.score_on_end_of_traj()
assert (
0 <= eval_score <= 1
), f"eval score {eval_score} out of range for env {self}"
info.update(eval_score=eval_score)
return obs, reward, done, info
def render(self, mode="human") -> Optional[np.ndarray]:
for ent in self._entities:
ent.pre_draw()
self._renderer_func()
obs = self.renderer.render()
if mode == "human":
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(obs)
else:
return obs
def close(self) -> None:
if self.renderer:
self.renderer.close()
self.renderer = None
if self.viewer:
self.viewer.close()
self.viewer = None
| 2.265625 | 2 |
py_neuromodulation/nm_decode.py | neuromodulation/py_neuromodulation | 7 | 12766187 | <filename>py_neuromodulation/nm_decode.py
from numpy.core.overrides import verify_matching_signatures
from sklearn import model_selection, metrics, linear_model, discriminant_analysis, base
from skopt.space import Real, Integer, Categorical
from skopt.utils import use_named_args
from skopt import gp_minimize, Optimizer
from sklearn.linear_model import ElasticNet
from sklearn.base import clone
from sklearn.utils import class_weight
from scipy.ndimage import (binary_dilation,
binary_erosion)
from scipy.ndimage import label as label_ndimage
from imblearn.over_sampling import RandomOverSampler
import pandas as pd
import os
import json
import numpy as np
import xgboost
import _pickle as cPickle
class Decoder:
features: pd.DataFrame
label: np.ndarray
model: base.BaseEstimator
cv_method: model_selection.BaseCrossValidator
threshold_score: bool
mov_detection_threshold: float
TRAIN_VAL_SPLIT: bool
RUN_BAY_OPT: bool
save_coef: bool
get_movement_detection_rate: bool
min_consequent_count: int
STACK_FEATURES_N_SAMPLES: bool
time_stack_n_samples: int
ros: RandomOverSampler = None
bay_opt_param_space: list = []
data: np.array
ch_ind_data: dict
grid_point_ind_data: dict
active_gridpoints: list
feature_names: list[str]
ch_ind_results: dict = {}
gridpoint_ind_results: dict = {}
all_ch_results: dict = {}
score_train:list = []
score_test:list = []
y_test:list = []
y_train:list = []
y_test_pr:list = []
y_train_pr:list = []
X_test:list = []
X_train:list = []
coef:list = []
mov_detection_rates_test:list = []
tprate_test:list = []
fprate_test:list = []
mov_detection_rates_train:list = []
tprate_train:list = []
fprate_train:list = []
best_bay_opt_params:list = []
VERBOSE : bool = False
class ClassMissingException(Exception):
print("only one class present")
def __init__(self,
features: pd.DataFrame,
label: np.ndarray,
label_name: str,
used_chs: list[str]=None,
model=linear_model.LinearRegression(),
eval_method=metrics.r2_score,
cv_method=model_selection.KFold(n_splits=3, shuffle=False),
threshold_score=True,
mov_detection_threshold:float =0.5,
TRAIN_VAL_SPLIT: bool=True,
RUN_BAY_OPT: bool=False,
STACK_FEATURES_N_SAMPLES: bool=True,
time_stack_n_samples: int = 5,
save_coef:bool =False,
get_movement_detection_rate:bool =False,
min_consequent_count:int =3,
bay_opt_param_space: list = [],
VERBOSE: bool = False) -> None:
"""Initialize here a feature file for processing
Read settings.json nm_channels.csv and features.csv
Read target label
Parameters
----------
model : machine learning model
model that utilizes fit and predict functions
eval_method : sklearn metrics
evaluation scoring method
cv_method : sklearm model_selection method
threshold_score : boolean
if True set lower threshold at zero (useful for r2),
mov_detection_threshold : float
if get_movement_detection_rate is True, find given minimum 'threshold' respective
consecutive movement blocks, by default 0.5
TRAIN_VAL_SPLIT (boolean):
if true split data into additinal validation, and run class weighted CV
save_coef (boolean):
if true, save model._coef trained coefficients
get_movement_detection_rate (boolean):
save detection rate and tpr / fpr as well
min_consequent_count (int):
if get_movement_detection_rate is True, find given 'min_consequent_count' respective
consecutive movement blocks with minimum size of 'min_consequent_count'
"""
self.features = features
self.label = label
self.label_name = label_name
self.data = np.nan_to_num(np.array(self.features[[col for col in self.features.columns
if not (('time' in col) or (self.label_name in col))]]))
self.used_chs = used_chs
self.model = model
self.eval_method = eval_method
self.cv_method = cv_method
self.threshold_score = threshold_score
self.mov_detection_threshold = mov_detection_threshold
self.TRAIN_VAL_SPLIT = TRAIN_VAL_SPLIT
self.RUN_BAY_OPT = RUN_BAY_OPT
self.save_coef = save_coef
self.get_movement_detection_rate = get_movement_detection_rate
self.min_consequent_count = min_consequent_count
self.STACK_FEATURES_N_SAMPLES = STACK_FEATURES_N_SAMPLES
self.time_stack_n_samples = time_stack_n_samples
self.bay_opt_param_space = bay_opt_param_space
self.VERBOSE = VERBOSE
if type(self.model) is discriminant_analysis.LinearDiscriminantAnalysis:
self.ros = RandomOverSampler(random_state=0)
def set_data_ind_channels(self):
"""specified channel individual data
"""
self.ch_ind_data = {}
for ch in self.used_chs:
self.ch_ind_data[ch] = np.nan_to_num(
np.array(
self.features[
[col for col in self.features.columns if col.startswith(ch)]
]
)
)
def set_CV_results(self, attr_name, contact_point=None):
"""set CV results in respectie nm_decode attributes
The reference is first stored in obj_set, and the used lateron
Parameters
----------
attr_name : string
is either all_ch_results, ch_ind_results, gridpoint_ind_results
contact_point : object, optional
usually an int specifying the grid_point or string, specifying the used channel,
by default None
"""
if contact_point is not None:
getattr(self, attr_name)[contact_point] = {}
obj_set = getattr(self, attr_name)[contact_point]
else:
obj_set = getattr(self, attr_name)
obj_set["score_train"] = self.score_train
obj_set["score_test"] = self.score_test
obj_set["y_test"] = self.y_test
obj_set["y_train"] = self.y_train
obj_set["y_test_pr"] = self.y_test_pr
obj_set["y_train_pr"] = self.y_train_pr
obj_set["X_train"] = self.X_train
obj_set["X_test"] = self.X_test
if self.save_coef:
obj_set["coef"] = self.coef
if self.get_movement_detection_rate:
obj_set["mov_detection_rate_test"] = self.mov_detection_rates_test
obj_set["mov_detection_rate_train"] = self.mov_detection_rates_train
obj_set["fprate_test"] = self.fprate_test
obj_set["fprate_train"] = self.fprate_train
obj_set["tprate_test"] = self.tprate_test
obj_set["tprate_train"] = self.tprate_train
if self.RUN_BAY_OPT is True:
obj_set["best_bay_opt_params"] = self.best_bay_opt_params
def run_CV_caller(self, feature_contacts: str="ind_channels"):
"""[summary]
Parameters
----------
feature_contacts : str, optional
[description], by default "ind_channels"
"""
valid_feature_contacts = ["ind_channels", "all_channels_combined", "grid_points"]
if feature_contacts not in valid_feature_contacts:
raise ValueError(f"{feature_contacts} not in {valid_feature_contacts}")
if feature_contacts == "grid_points":
for grid_point in self.active_gridpoints:
self.run_CV(self.grid_point_ind_data[grid_point], self.label)
self.set_CV_results('gridpoint_ind_results', contact_point=grid_point)
return self.gridpoint_ind_results
if feature_contacts == "ind_channels":
for ch in self.used_chs:
self.run_CV(self.ch_ind_data[ch], self.label)
self.set_CV_results('ch_ind_results', contact_point=ch)
return self.ch_ind_results
if feature_contacts == "all_channels_combined":
dat_combined = np.concatenate(list(self.ch_ind_data.values()), axis=1)
self.run_CV(dat_combined, self.label)
self.set_CV_results('all_ch_results', contact_point=None)
return self.all_ch_results
def set_data_grid_points(self, cortex_only=False, subcortex_only=False):
"""Read the run_analysis
Projected data has the shape (samples, grid points, features)
"""
# activate_gridpoints stores cortex + subcortex data
self.active_gridpoints = np.unique(
[i.split('_')[0] + "_" + i.split('_')[1]
for i in self.features.columns
if "grid" in i]
)
if cortex_only:
self.active_gridpoints = [
i
for i in self.active_gridpoints
if i.startswith("gridcortex")
]
if subcortex_only:
self.active_gridpoints = [
i
for i in self.active_gridpoints
if i.startswith("gridsubcortex")
]
self.feature_names = [
i[len(self.active_gridpoints[0]+"_"):]
for i in self.features.columns
if self.active_gridpoints[0]+"_" in i
]
self.grid_point_ind_data = {}
self.grid_point_ind_data = {
grid_point : np.nan_to_num(self.features[
[i
for i in self.features.columns
if grid_point in i]
]
)
for grid_point in self.active_gridpoints
}
def get_movement_grouped_array(self, prediction, threshold=0.5, min_consequent_count=5):
"""Return given a 1D numpy array, an array of same size with grouped consective blocks
Parameters
----------
prediction : np.array
numpy array of either predictions or labels, that is going to be grouped
threshold : float, optional
threshold to be applied to 'prediction', by default 0.5
min_consequent_count : int, optional
minimum required consective samples higher than 'threshold', by default 5
Returns
-------
labeled_array : np.array
grouped vector with incrementing number for movement blocks
labels_count : int
count of individual movement blocks
"""
mask = prediction > threshold
structure = [True] * min_consequent_count # used for erosion and dilation
eroded = binary_erosion(mask, structure)
dilated = binary_dilation(eroded, structure)
labeled_array, labels_count = label_ndimage(dilated)
return labeled_array, labels_count
def calc_movement_detection_rate(self, y_label, prediction, threshold=0.5, min_consequent_count=3):
"""Given a label and prediction, return the movement detection rate on the basis of
movements classified in blocks of 'min_consequent_count'.
Parameters
----------
y_label : [type]
[description]
prediction : [type]
[description]
threshold : float, optional
threshold to be applied to 'prediction', by default 0.5
min_consequent_count : int, optional
minimum required consective samples higher than 'threshold', by default 3
Returns
-------
mov_detection_rate : float
movement detection rate, where at least 'min_consequent_count' samples where high in prediction
fpr : np.array
sklearn.metrics false positive rate np.array
tpr : np.array
sklearn.metrics true positive rate np.array
"""
pred_grouped, _ = self.get_movement_grouped_array(prediction, threshold, min_consequent_count)
y_grouped, labels_count = self.get_movement_grouped_array(y_label, threshold, min_consequent_count)
hit_rate = np.zeros(labels_count)
pred_group_bin = np.array(pred_grouped>0)
for label_number in range(1, labels_count + 1): # labeling starts from 1
hit_rate[label_number-1] = np.sum(pred_group_bin[np.where(y_grouped == label_number)[0]])
try:
mov_detection_rate = np.where(hit_rate>0)[0].shape[0] / labels_count
except ZeroDivisionError:
print("no movements in label")
return 0, 0, 0
# calculating TPR and FPR: https://stackoverflow.com/a/40324184/5060208
CM = metrics.confusion_matrix(y_label, prediction)
TN = CM[0][0]
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
fpr = FP / (FP + TN)
tpr = TP / (TP + FN)
return mov_detection_rate, fpr, tpr
def init_cv_res(self) -> None:
self.score_train = []
self.score_test = []
self.y_test = []
self.y_train = []
self.y_test_pr = []
self.y_train_pr = []
self.X_test = []
self.X_train = []
self.coef = []
if self.get_movement_detection_rate is True:
self.mov_detection_rates_test = []
self.tprate_test = []
self.fprate_test = []
self.mov_detection_rates_train = []
self.tprate_train = []
self.fprate_train = []
if self.RUN_BAY_OPT is True:
self.best_bay_opt_params = []
@staticmethod
def append_previous_n_samples(X: np.ndarray, y: np.ndarray, n: int = 5):
"""
stack feature vector for n samples
"""
time_arr = np.zeros([X.shape[0]-n, int(n*X.shape[1])])
for time_idx, time_ in enumerate(np.arange(n, X.shape[0])):
for time_point in range(n):
time_arr[time_idx, time_point*X.shape[1]:(time_point+1)*X.shape[1]] = \
X[time_-time_point,:]
return time_arr, y[n:]
@staticmethod
def append_samples_val(X_train, y_train, X_val, y_val, n):
X_train, y_train = Decoder.append_previous_n_samples(
X_train,
y_train,
n=n
)
X_val, y_val = Decoder.append_previous_n_samples(
X_val,
y_val,
n=n
)
return X_train, y_train, X_val, y_val
def _fit_model(self, model, X_train, y_train):
if self.TRAIN_VAL_SPLIT is True:
X_train, X_val, y_train, y_val = \
model_selection.train_test_split(
X_train, y_train, train_size=0.7, shuffle=False)
if y_train.sum() == 0 or y_val.sum(0) == 0:
raise Decoder.ClassMissingException
if type(model) is xgboost.sklearn.XGBClassifier:
classes_weights = class_weight.compute_sample_weight(
class_weight='balanced',
y=y_train
)
model.fit(
X_train, y_train, eval_set=[(X_val, y_val)],
early_stopping_rounds=7, sample_weight=classes_weights,
verbose=self.VERBOSE, eval_metric="logloss")
else:
# might be necessary to adapt for other classifiers
model.fit(
X_train, y_train, eval_set=[(X_val, y_val)])
else:
# check for LDA; and apply rebalancing
if type(model) is discriminant_analysis.LinearDiscriminantAnalysis:
X_train, y_train = self.ros.fit_resample(X_train, y_train)
if type(model) is xgboost.sklearn.XGBClassifier:
model.fit(X_train, y_train, eval_metric="logloss") # to avoid warning
else:
model.fit(X_train, y_train)
return model
def _set_movement_detection_rates(
self,
y_test: np.ndarray,
y_test_pr: np.ndarray,
y_train: np.ndarray,
y_train_pr: np.ndarray
):
mov_detection_rate, fpr, tpr = self.calc_movement_detection_rate(
y_test,
y_test_pr,
self.mov_detection_threshold,
self.min_consequent_count
)
self.mov_detection_rates_test.append(mov_detection_rate)
self.tprate_test.append(tpr)
self.fprate_test.append(fpr)
mov_detection_rate, fpr, tpr = self.calc_movement_detection_rate(
y_train,
y_train_pr,
self.mov_detection_threshold,
self.min_consequent_count
)
self.mov_detection_rates_train.append(mov_detection_rate)
self.tprate_train.append(tpr)
self.fprate_train.append(fpr)
def run_CV(self, data=None, label=None):
"""Evaluate model performance on the specified cross validation.
If no data and label is specified, use whole feature class attributes.
Parameters
----------
data (np.ndarray):
data to train and test with shape samples, features
label (np.ndarray):
label to train and test with shape samples, features
Returns
-------
cv_res : float
mean cross validation result
"""
self.init_cv_res()
if data is None:
print("use all channel data as features")
data = self.data
label = self.label
for train_index, test_index in self.cv_method.split(self.data):
model_train = clone(self.model)
X_train, y_train = data[train_index, :], label[train_index]
X_test, y_test = data[test_index], label[test_index]
if self.STACK_FEATURES_N_SAMPLES is True:
X_train, y_train, X_test, y_test = Decoder.append_samples_val(
X_train,
y_train,
X_test,
y_test,
n=self.time_stack_n_samples
)
if y_train.sum() == 0 or y_test.sum() == 0: # only one class present
continue
if self.RUN_BAY_OPT is True:
X_train_bo, X_test_bo, y_train_bo, y_test_bo = \
model_selection.train_test_split(
X_train, y_train, train_size=0.7, shuffle=False)
if y_train_bo.sum() == 0 or y_test_bo.sum() == 0:
print("could not start Bay. Opt. with no labels > 0")
continue
params_bo = self.run_Bay_Opt(
X_train_bo,
y_train_bo,
X_test_bo,
y_test_bo,
rounds=10
)
# set bay. opt. obtained best params to model
params_bo_dict = {}
for i in range(len(params_bo)):
setattr(
model_train,
self.bay_opt_param_space[i].name,
params_bo[i]
)
params_bo_dict[self.bay_opt_param_space[i].name] = params_bo[i]
self.best_bay_opt_params.append(params_bo_dict)
# fit model
try:
model_train = self._fit_model(model_train, X_train, y_train)
except Decoder.ClassMissingException:
continue
if self.save_coef:
self.coef.append(model_train.coef_)
y_test_pr = model_train.predict(X_test)
y_train_pr = model_train.predict(X_train)
sc_te = self.eval_method(y_test, y_test_pr)
sc_tr = self.eval_method(y_train, y_train_pr)
if self.threshold_score is True:
if sc_tr < 0:
sc_tr = 0
if sc_te < 0:
sc_te = 0
if self.get_movement_detection_rate is True:
self._set_movement_detection_rates(
y_test,
y_test_pr,
y_train,
y_train_pr
)
self.score_train.append(sc_tr)
self.score_test.append(sc_te)
self.X_train.append(X_train)
self.X_test.append(X_test)
self.y_train.append(y_train)
self.y_test.append(y_test)
self.y_train_pr.append(y_train_pr)
self.y_test_pr.append(y_test_pr)
return np.mean(self.score_test)
def run_Bay_Opt(self,
X_train,
y_train,
X_test,
y_test,
rounds=30,
base_estimator="GP",
acq_func="EI",
acq_optimizer="sampling",
initial_point_generator="lhs"
):
"""Run skopt bayesian optimization
skopt.Optimizer:
https://scikit-optimize.github.io/stable/modules/generated/skopt.Optimizer.html#skopt.Optimizer
example:
https://scikit-optimize.github.io/stable/auto_examples/ask-and-tell.html#sphx-glr-auto-examples-ask-and-tell-py
Special attention needs to be made with the run_CV output,
some metrics are minimized (MAE), some are maximized (r^2)
Parameters
----------
X_train: np.ndarray
y_train: np.ndarray
X_test: np.ndarray
y_test: np.ndarray
rounds : int, optional
optimizing rounds, by default 10
base_estimator : str, optional
surrogate model, used as optimization function instead of cross validation, by default "GP"
acq_func : str, optional
function to minimize over the posterior distribution, by default "EI"
acq_optimizer : str, optional
method to minimize the acquisition function, by default "sampling"
initial_point_generator : str, optional
sets a initial point generator, by default "lhs"
Returns
-------
skopt result parameters
"""
def get_f_val(model_bo):
try:
model_bo = self._fit_model(model_bo, X_train, y_train)
except Decoder.ClassMissingException:
pass
return self.eval_method(y_test, model_bo.predict(X_test))
opt = Optimizer(
self.bay_opt_param_space,
base_estimator=base_estimator,
acq_func=acq_func,
acq_optimizer=acq_optimizer,
initial_point_generator=initial_point_generator
)
for _ in range(rounds):
next_x = opt.ask()
# set model values
model_bo = clone(self.model)
for i in range(len(next_x)):
setattr(model_bo, self.bay_opt_param_space[i].name, next_x[i])
f_val = get_f_val(model_bo)
res = opt.tell(next_x, f_val)
if self.VERBOSE:
print(f_val)
# res is here automatically appended by skopt
return res.x
def save(self, feature_path: str, feature_file: str, str_save_add=None) -> None:
"""Save decoder object to pickle
"""
# why is the decoder not saved to a .json?
if str_save_add is None:
PATH_OUT = os.path.join(feature_path, feature_file, feature_file + "_ML_RES.p")
else:
PATH_OUT = os.path.join(feature_path, feature_file, feature_file +
"_" + str_save_add + "_ML_RES.p")
print("model being saved to: " + str(PATH_OUT))
with open(PATH_OUT, 'wb') as output: # Overwrites any existing file.
cPickle.dump(self, output)
| 2 | 2 |
Day03_AoC.py | ricardofitas/Advent-of-code-2021 | 0 | 12766188 | <filename>Day03_AoC.py<gh_stars>0
c = open("__21_d03.txt")
lin = c.readlines()
## Part 1
klin = [list([lin[i].split("\n")[0] for i in range(len(lin))][j]) for j in range(len(lin))]
k2 = [1 if [klin[jj][len(klin[0])-ii-1] for jj in range(len(lin))].count('1') > [klin[jj][len(klin[0])-ii-1] for jj in range(len(lin))].count('0') else 0 for ii in range(len(klin[0]))]
prod = sum([k2[ii]*2**ii for ii in range(len(k2))])*sum([abs(k2[ii]-1)*2**ii for ii in range(len(k2))])
print(prod)
## Part 2
ii = 0
klin22 = klin.copy()
while len(klin) > 1:
klin2 = '1' if [klin[jj][ii] for jj in range(len(klin))].count('1') >= [klin[jj][ii] for jj in range(len(klin))].count('0') else '0'
klin = [klin[jj] for jj in range(len(klin)) if klin[jj][ii] == klin2]
ii += 1
k_1 = klin
klin = klin22.copy()
ii = 0
while len(klin) > 1:
klin3 = '1' if [klin[jj][ii] for jj in range(len(klin))].count('1') < [klin[jj][ii] for jj in range(len(klin))].count(
'0') else '0'
klin = [klin[jj] for jj in range(len(klin)) if klin[jj][ii] == klin3]
ii += 1
k_1 = k_1[0]
k_2 = klin[0]
i3 = sum([int(k_1[ii])*2**(len(k_1)-ii-1) for ii in range(len(k_1))])
j3 = sum([int(k_2[ii])*2**(len(k_2)-ii-1) for ii in range(len(k_2))])
print(i3*j3)
| 2.90625 | 3 |
src/config.py | cityiot/electric-bus-data-collector | 1 | 12766189 | # -*- coding: utf-8 -*-
# Copyright 2019 Tampere University
# This software was developed as a part of the CityIoT project: https://www.cityiot.fi/english
# This source code is licensed under the 3-clause BSD license. See license.txt in the repository root directory.
# Author(s): <NAME> <<EMAIL>>
'''
Helper module for reading configuration files in the JSON format
'''
import json
import utils
def loadConfig( fileName ):
'''
Reads the named file from configuration directory and converts it to JSON.
The conversion result is returned.
'''
confFile = utils.getAppDir() / 'conf' / fileName
with open( confFile, 'r' ) as file:
return json.load( file ) | 1.90625 | 2 |
quiz_bot/cli/utils.py | livestreamx/quiz-bot | 1 | 12766190 | <reponame>livestreamx/quiz-bot
import io
from typing import Optional, Type
from pydantic import BaseSettings
def get_settings(file: Optional[io.StringIO], settings_type: Type[BaseSettings]) -> BaseSettings:
if file is not None:
return settings_type.parse_raw(file.read())
return settings_type()
| 2.390625 | 2 |
depth_calling/utilities.py | cariaso/Cyrius | 30 | 12766191 | <gh_stars>10-100
#!/usr/bin/env python3
#
# Cyrius: CYP2D6 genotyper
# Copyright (c) 2019-2020 Illumina, Inc.
#
# Author: <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
import pysam
def parse_region_file(region_file):
"""Return the set of regions for counting from a bed file."""
region_dic = {}
with open(region_file) as read_region:
for line in read_region:
nchr, region_start, region_end, region_name, region_type, region_gc = (
line.strip().split()
)
region_start = int(region_start)
region_end = int(region_end)
region = (nchr, region_start, region_end, region_name)
region_dic.setdefault(region_type, []).append((region, region_gc))
return region_dic
def parse_gmm_file(gmm_file):
"""Return the gmm parameters stored in input file."""
dpar_tmp = {}
with open(gmm_file) as read_gmm:
for line in read_gmm:
split_line = line.strip().split()
dpar_tmp.setdefault(split_line[0], {})
list_value = [a.split(":")[-1] for a in split_line[2:]]
dpar_tmp[split_line[0]].setdefault(split_line[1], list_value)
return dpar_tmp
def open_alignment_file(alignment_file, reference_fasta=None, index_filename=None):
if alignment_file.endswith("cram"):
return pysam.AlignmentFile(
alignment_file, "rc", reference_filename=reference_fasta, index_filename=index_filename
)
return pysam.AlignmentFile(alignment_file, "rb", index_filename=index_filename)
| 2.515625 | 3 |
custom_components/senseme/version.py | bdraco/senseme-hacs | 0 | 12766192 | """Version for senseme-hacs."""
__version__ = "2.2.3"
| 0.847656 | 1 |
server_python/config.py | dkvirus/py-novel | 145 | 12766193 | # config.py
# encoding:utf-8
DEBUG = True
JSON_AS_ASCII = False
| 1.117188 | 1 |
SIRQIs/resources/modules/classes.py | chance-alvarado/SIRQIs-IBM | 0 | 12766194 | <gh_stars>0
"""Specify classes defining within and between individual dynamics.
The following classes and function define the dynamics of infection within and
between individuals in a simulated population. Time steps are progressed and
infection dynamics are simulated through the simulate_time_step function
acting on a GeneralPopulation, Quarantine, and Isolation object. Additional
class and function information is below:
Individual:
- Individual objects symbolize a unique person within a population.
Individuals have flags and timers that are updated from interactions
specified as methods within the GeneralPopulation, Quarantine, and
Isolation classes.
GeneralPopulation:
- The GeneralPopulation object holds a set of unique individuals and
is responsible for infecting, testing, and tracing contacts for individuals
based on specified parameters. Individuals are removed from general
population due to quarantine or isolation before an inevitable return.
Quarantine:
- The Quarantine object is responsible for holding individuals removed
from the general population due to contact tracing. Individuals may
or may not be infected which could result in possible transfer to
isolation. Individuals will either transfer to isolation or return
to the general population after a duration based on specified parameters.
Isolation:
- The Isolation object holds individuals removed from the general
population due to a positive test result - either through surveillance
testing or during a quarantine stay. Individuals are returned to the
general population after a duration based on specified parameters
simulate_time_step:
- Dynamics specified with classes are progressed one time step (day) per
iteration of this function. Logs of individuals within specific states
are updated after all interaction and dynamics are simulated for the
current time step.
More information can be found in this project's README file.
Explore this repository at:
https://github.com/chance-alvarado/SIRQIs-IBM/
Author:
<NAME>
LinkedIn: https://www.linkedin.com/in/chance-alvarado/
GitHub: https://github.com/chance-alvarado/
"""
# Imports
import numpy as np
# Define necessary classes
class Individual():
"""Define class for individual-level dynamics."""
def __init__(self, flag_set):
"""Initialize necessary attributes."""
# Define infection timer
self.infection_timer = 0
# Generate predetermined viral load curve
self.viral_load_curve = self.generate_viral_load_curve()
# Store current viral load
self.viral_load = 0
# Define binary state flags
self.susceptible = False
self.infected = False
self.detectable = False
self.infectious = False
self.recovered = False
# Deine testing timer and flags
self.testable = True
self.awaiting_results = False
self.days_till_results = 0
# Define isolation timer and flag
self.to_be_isolated = False
self.using_isolation_resources = False
self.isolation_timer = 0
# Define quarantine timer and flags
self.to_be_quarantined = False
self.using_quarantine_resources = False
self.to_be_transferred = False
self.days_till_quarantine = 0
self.days_till_transfer = 0
self.quarantine_timer = 0
# Historic timers
self.ever_infected = False
self.ever_isolated = False
self.ever_quarantined = False
# Initialize attributes with specified dictionary values
for key in flag_set:
setattr(self, key, flag_set[key])
def set_flags(self, flag_dict):
"""Set all specified flags to those in dict."""
for key in flag_dict.keys():
setattr(self, key, flag_dict[key])
def increment_timer(self, timer_dict):
"""Increment all timers by value specified."""
for key in timer_dict.keys():
current_val = getattr(self, key)
setattr(self, key, current_val + timer_dict[key])
def generate_viral_load_curve(self):
"""Create random viral load curve and evaluate on discrete time."""
# Array of infection progression period
t = np.arange(0, 28, dtype='int')
# Array to hold viral load at day of infection
viral_load = np.zeros(len(t), dtype='float')
# Generate random values necessary for construction
t_0 = np.random.uniform(2.5, 3.5)
t_peak = t_0 + 0.2 + np.random.gamma(1.8)
t_f = t_peak + np.random.uniform(5, 10)
v_peak = np.random.uniform(7, 11)
t_end = (3 - v_peak) * ((t_f - t_peak) / (6 - v_peak)) + t_peak
# Define piecewise functions
def viral_load_p1(t):
return ((v_peak - 3) / (t_peak - t_0)) * (t - t_0) + 3
def viral_load_p2(t):
return ((6 - v_peak) / (t_f - t_peak)) * (t - t_peak) + v_peak
# Define bounds for each piece
bounds_p1 = ((t >= t_0) & (t < t_peak))
bounds_p2 = ((t >= t_peak) & (t <= t_end))
# Calculate viral load
viral_load[bounds_p1] = viral_load_p1(t[bounds_p1])
viral_load[bounds_p2] = viral_load_p2(t[bounds_p2])
# Eliminate infection at last time step
viral_load[-1] = 0
# Return results
return viral_load
class GeneralPopulation():
"""Define class for general population dynamics."""
def __init__(self, parameters):
"""Initialize necessary attributes."""
# Lists to log counts of individuals in all states
self.total_susceptible = []
self.total_infected = []
self.total_infectious = []
self.total_recovered = []
# Unpack dictionaries of parameters and assign attributes
for key in parameters:
setattr(self, key, parameters[key])
# Initialize population
self.initialize_population()
# Log initial state
self.log_state()
def initialize_population(self):
"""Create population with given parameters."""
# Generate susceptible subpopulation
susceptible_flag_dict = {'susceptible': True}
susceptible_subpopulation = {Individual(flag_set=susceptible_flag_dict)
for n in range(self.num_susceptible)}
# Generate infected subpopulation
infected_flag_dict = {'infected': True}
infected_subpopulation = {Individual(flag_set=infected_flag_dict)
for n in range(self.num_infected)}
# Shift initial infection back one day
infection_range = list(
range(-1, len(self.initial_infection_distribution) - 1))
# Generate array of days infected for each infected individual
days_infected = np.random.choice(
a=infection_range,
size=self.num_susceptible,
replace=True,
p=self.initial_infection_distribution)
# Set days infected for each individual
for individual, num_days_infected in zip(infected_subpopulation,
days_infected):
setattr(individual, 'infection_timer', num_days_infected)
# Assign individuals to members attribute
self.members = susceptible_subpopulation.union(infected_subpopulation)
# Progress infection to set proper flags/timers
self.progress_infection()
def fetch_subpopulation(self, flag_dict, from_subpopulation=False):
"""Fetch individuals with flags/timers matching passed dictionary."""
# Subpopulation of individuals with matching flags/timers
subpopulation = set()
# Find specified source
if not isinstance(from_subpopulation, bool):
source = from_subpopulation
else:
source = self.members
# Index through all members
for individual in source:
# Add to set if all attributes match
if all([True if getattr(individual, key) == flag_dict[key]
else False for key in flag_dict.keys()]):
subpopulation.add(individual)
# Return subpopulation
return subpopulation
def add_to_population(self, individuals_to_be_added):
"""Add individuals back in to general populations."""
self.members.update(individuals_to_be_added)
def log_state(self):
"""Capture number of individuals in state and append to list."""
# Get current counts
num_susceptible = len(self.fetch_subpopulation({'susceptible': True}))
num_infected = len(self.fetch_subpopulation({'infected': True}))
num_infectious = len(self.fetch_subpopulation({'infectious': True}))
num_recovered = len(self.fetch_subpopulation({'recovered': True}))
# Append to lists
self.total_susceptible.append(num_susceptible)
self.total_infected.append(num_infected)
self.total_infectious.append(num_infectious)
self.total_recovered.append(num_recovered)
def progress_infection(self):
"""Progress stage of infection for infected individuals."""
# Fetch infected subpopulation
infected_subpopulation = self.fetch_subpopulation({'infected': True})
# Index through all infected individuals:
for individual in infected_subpopulation:
# Fetch previous viral load
previous_viral_load = individual.viral_load
# Increment infection timer
individual.increment_timer({'infection_timer': 1})
# Update current viral load and retrieve a copy
individual.viral_load = individual.viral_load_curve[
individual.infection_timer]
viral_load = individual.viral_load
# Check infected flag
if (viral_load == 0) and (previous_viral_load > viral_load):
individual.set_flags({'infected': False, 'recovered': True})
# Check infectious flag
if viral_load >= self.infectious_threshold:
individual.set_flags({'infectious': True})
else:
individual.set_flags({'infectious': False})
# Check detectable flag
if viral_load >= self.detectable_threshold:
individual.set_flags({'detectable': True})
else:
individual.set_flags({'detectable': False})
def infect_susceptible(self):
"""Infect susceptible individuals based on current population state."""
# Fetch neccessary counts
num_susceptible = len(self.fetch_subpopulation({'susceptible': True}))
num_infectious = len(self.fetch_subpopulation({'infectious': True}))
# Break if no susceptible
if (num_susceptible == 0):
return
# Fetch all interactable individuals
interactable_individuals = self.members
# Find number of infectious contacts
num_infectious_contacts = sum(np.random.choice(
a=list(range(len(self.daily_contacts_distribution))),
size=num_infectious,
replace=True,
p=self.daily_contacts_distribution))
# Select individuals to have contact with infectious individual
contact_with_infectious = np.random.choice(
a=list(interactable_individuals),
size=num_infectious_contacts,
replace=True
)
# Find susceptible individuals who had contact with infectious
susceptible_contact_with_infectious = self.fetch_subpopulation(
{'susceptible': True},
from_subpopulation=set(contact_with_infectious)
)
# Number of susceptible who had contact with infectious
num_susceptible_contact_with_infectious = len(
susceptible_contact_with_infectious)
# Determine if infection takes place for individuals
infection_probability = min(1,
self.probability_infection_given_contact)
infection_statuses = np.random.choice(
a=[True, False],
size=num_susceptible_contact_with_infectious,
replace=True,
p=[infection_probability, 1-infection_probability]
)
# Assign infectious status to individuals
for infection_status, individual in zip(
infection_statuses, susceptible_contact_with_infectious):
individual.set_flags(
{'infected': infection_status,
'susceptible': not infection_status}
)
# Fetch remaining susceptible individuals and apply outside infection
remaining_susceptible = list(self.fetch_subpopulation(
{'susceptible': True}))
num_remaining_susceptible = len(remaining_susceptible)
# Infection statuses from outside infection
outside_infection_statuses = np.random.choice(
a=[True, False],
size=num_remaining_susceptible,
replace=True,
p=[self.probability_outside_infection,
1-self.probability_outside_infection]
)
for infection_status, individual in zip(outside_infection_statuses,
remaining_susceptible):
individual.set_flags(
{'infected': infection_status,
'susceptible': not infection_status}
)
# Set historic infection for all individuals
newly_infected = self.fetch_subpopulation((
{'infected': True, 'ever_infected': False}))
for individual in newly_infected:
individual.set_flags({'ever_infected': True})
def test_population(self):
"""Test eligible individuals and set appropriate flags and timers."""
# Fetch individuals eleigible to test and not awaiting results
subpopulation_to_be_tested = self.fetch_subpopulation(
{'testable': True, 'awaiting_results': False})
# Number of individuals being tested on day
num_in_population = len(self.members)
num_to_be_tested = min(
[int(num_in_population * self.proportion_tested_daily),
len(subpopulation_to_be_tested)])
# Get testing delay for each test administered
days_till_results_list = np.random.choice(
a=list(range(len(self.days_till_results_distribution))),
size=num_to_be_tested,
replace=True,
p=self.days_till_results_distribution)
# Randomly select individuals to test
tested_subpopulation = np.random.choice(a=list(
subpopulation_to_be_tested),
size=num_to_be_tested,
replace=False)
# Index through all tested individuals
for individual, days_till_results in zip(tested_subpopulation,
days_till_results_list):
# Prevent further testing until results are returned
individual.set_flags({'awaiting_results': True,
'days_till_results': days_till_results})
if individual.detectable:
individual.set_flags({'to_be_isolated': True})
def progress_test_results(self):
"""Progress test and return individuals to be moved to isolation."""
# Fetch individuals who are to be isolated
individuals_to_be_isolated = self.fetch_subpopulation(
{'to_be_isolated': True, 'days_till_results': 0})
# Remove individuals to be isolated from general population
self.members.difference_update(individuals_to_be_isolated)
# Fetch remaining individuals to set appropriate flags
individuals_retestable = self.fetch_subpopulation(
{'days_till_results': 0})
# Allow individuals to be retested
for individual in individuals_retestable:
individual.set_flags({'awaiting_results': False})
# Fetch individuals awaiting test results
subpopulation_awaiting_results = self.fetch_subpopulation(
{'awaiting_results': True})
# Decrease timer
for individual in subpopulation_awaiting_results:
individual.increment_timer({'days_till_results': -1})
# Return individuals to be isolated
return individuals_to_be_isolated
def trace_contacts(self, num_individuals_to_be_isolated):
"""Trace contacts and set timer till individuals are quarantined."""
# Get set of individuals eligible to be quarantined
eligible_to_be_quarantined = self.fetch_subpopulation(
{'to_be_quarantined': False, 'testable': True})
# Trace new contacts if there are individuals eligible
if len(eligible_to_be_quarantined) != 0:
# Find total number of contacts from individuals to be isolated
num_contacts = sum(
np.random.choice(a=range(
len(self.daily_contacts_distribution)),
size=num_individuals_to_be_isolated,
replace=True,
p=self.daily_contacts_distribution))
# Find number of contacts that will be successfully reached
num_successful_contacts = 0
for contact_num in range(num_contacts):
if np.random.choice(a=[True, False],
p=[self.probability_successful_contact,
(1-self.probability_successful_contact)]
):
num_successful_contacts += 1
# Randomly select individuals for quarantine
new_individuals_to_be_quarantined = np.random.choice(
a=list(eligible_to_be_quarantined),
size=num_successful_contacts,
replace=True)
# Generate contact tracing delay for each individual
days_till_quarantine = np.random.choice(
a=range(len(self.days_till_quarantine_distribution)),
size=len(new_individuals_to_be_quarantined),
replace=True,
p=self.days_till_quarantine_distribution)
# Index through all individuals to be quarantined and set flags
for individual, num_days in zip(new_individuals_to_be_quarantined,
days_till_quarantine):
# Set flags
individual.set_flags({'to_be_quarantined': True,
'days_till_quarantine': num_days})
# Fetch all individuals to be quarantined and progress timers
waiting_for_quarantine = self.fetch_subpopulation(
{'to_be_quarantined': True})
# Set of individuals to be quarantined
individuals_to_be_quarantined = set()
for individual in waiting_for_quarantine:
individual.increment_timer({'days_till_quarantine': -1})
if individual.days_till_quarantine == 0:
individuals_to_be_quarantined.add(individual)
# Remove individuals to be quarantined
self.members.difference_update(individuals_to_be_quarantined)
# Return individuals to be quarantined
return individuals_to_be_quarantined
class Isolation():
"""Define class for isolation dynamics."""
def __init__(self, parameters):
"""Initialize necessary attributes."""
# Set of individuals currently in isolation
self.members = set()
# List for log of total isolated
self.total_isolated = []
# List for log of individuals isolated and using resources
self.isolated_using_resources = []
# Unpack dictionaries of parameters and assign attributes
for key in parameters:
setattr(self, key, parameters[key])
# Log initial state
self.log_state()
def fetch_subpopulation(self, flag_dict):
"""Fetch individuals with flags/timers matching passed dictionary."""
# Subpopulation of individuals with matching flags/timers
subpopulation = set()
# Index through all members
for individual in self.members:
# Add to set if all attributes match
if all([True if getattr(individual, key) == flag_dict[key]
else False for key in flag_dict.keys()]):
subpopulation.add(individual)
# Return fetched subpopulation
return subpopulation
def log_state(self):
"""Capture number of individuals in isolation and append to list."""
# Append current count of members
self.total_isolated.append(len(self.members))
# Fetch and append current count of individuals using resources
num_using_resources = len(self.fetch_subpopulation(
{'using_isolation_resources': True}))
self.isolated_using_resources.append(num_using_resources)
def admit_to_isolation(self, individuals_to_be_isolated):
"""Set appropriate flags and timers for individuals being admitted."""
# Dictionary of flags and timers for isolated individuals
isolated_dict = {
'infected': False,
'infectious': False,
'recovered': True,
'testable': self.eligible_for_retesting,
'awaiting_results': False,
'to_be_isolated': False,
'isolation_timer': self.days_in_isolation,
'ever_isolated': True
}
# Determine which individuals will use isolation resources
using_resources_list = np.random.choice(
a=[True, False],
size=len(individuals_to_be_isolated),
p=[self.probability_using_isolation_resources,
(1-self.probability_using_isolation_resources)])
# Iterate through individuals and set flags/timers
for individual, using_resources in zip(individuals_to_be_isolated,
using_resources_list):
# Set appropriate flags and timers
individual.set_flags(isolated_dict)
individual.set_flags(
{'using_isolation_resources': using_resources})
# Include admitted individuals in set of all members
self.members.update(individuals_to_be_isolated)
def progress_isolation(self):
"""Progress isolation timers and find individuals to be discharged."""
# Set of individuals to be discharged
individuals_to_be_discharged = set()
# Index through all isolated individuals
for individual in self.members:
# Decrease timer by one time step
individual.increment_timer({'isolation_timer': -1})
# individuals to be discharged
if individual.isolation_timer == 0:
individuals_to_be_discharged.add(individual)
# Remove individuals to be discharged
self.members.difference_update(individuals_to_be_discharged)
# Return individuals to be discharged
return individuals_to_be_discharged
class Quarantine():
"""Define class for quarantine dynamics."""
def __init__(self, parameters):
"""Initialize necessary attributes."""
# Set of individuals currently in isolation
self.members = set()
# List for log of total quarantined
self.total_quarantined = []
# List for log of individuals isolated and using resources
self.quarantined_using_resources = []
# Unpack dictionaries of parameters and assign attributes
for key in parameters:
setattr(self, key, parameters[key])
# Log initial state
self.log_state()
def fetch_subpopulation(self, flag_dict, from_subpopulation=False):
"""Fetch individuals with flags/timers matching passed dictionary."""
# Subpopulation of individuals with matching flags/timers
subpopulation = set()
# Find specified source
if not isinstance(from_subpopulation, bool):
source = from_subpopulation
else:
source = self.members
# Index through all members
for individual in source:
# Add to set if all attributes match
if all([True if getattr(individual, key) == flag_dict[key]
else False for key in flag_dict.keys()]):
subpopulation.add(individual)
return subpopulation
def log_state(self):
"""Log number of individuals currently in quarantine."""
# Append current count of members
self.total_quarantined.append(len(self.members))
# Fetch and append current count of individuals using resources
num_using_resources = len(self.fetch_subpopulation(
{'using_quarantine_resources': True}))
self.quarantined_using_resources.append(num_using_resources)
def admit_to_quarantine(self, individuals_to_be_quarantined):
"""Admit individuals to quarantine and set appropriate flags/timers."""
# Dictionary of flags for varying quarantined members
isolated_dict = {'infected': False,
'recovered': True,
'testable': True,
'awaiting_results': False,
'to_be_quarantined': False,
'to_be_transferred': True,
'quarantine_timer': self.days_in_quarantine,
'ever_quarantined': True
}
infected_not_detected_dict = {'infected': False,
'recovered': True,
'testable': True,
'awaiting_results': False,
'days_till_results': 0,
'to_be_quarantined': False,
'quarantine_timer':
self.days_in_quarantine,
'ever_quarantined': True
}
remaining_individuals_dict = {'awaiting_results': False,
'days_till_results': 0,
'to_be_quarantined': False,
'quarantine_timer':
self.days_in_quarantine,
'ever_quarantined': True
}
# Admit all individuals
self.members.update(individuals_to_be_quarantined)
# Determine which individuals will use quarantine resources
using_resources_list = np.random.choice(
a=[True, False],
size=len(individuals_to_be_quarantined),
p=[self.probability_using_quarantine_resources,
(1-self.probability_using_quarantine_resources)])
# Iterate through individuals and set flag
for individual, using_resources in zip(individuals_to_be_quarantined,
using_resources_list):
individual.set_flags(
{'using_quarantine_resources': using_resources})
# Fetch individuals already scheduled for isolation
individuals_eventually_isolated = self.fetch_subpopulation(
{'to_be_isolated': True},
from_subpopulation=individuals_to_be_quarantined)
# Set appropriate flags/timers for individuals being transferred
for individual in individuals_eventually_isolated:
individual.set_flags(isolated_dict)
# Find days till transfer
days_till_transfer = max(1, individual.days_till_results)
# Set transfer date
individual.set_flags({
'days_till_transfer': days_till_transfer,
'days_till_results': 0})
# Fetch individuals who may be eligible to transfer
individuals_to_check_for_transfer = self.fetch_subpopulation(
{'to_be_isolated': False, 'infected': True},
from_subpopulation=individuals_to_be_quarantined
)
# Index through all individuals and check for potential transfer
for individual in individuals_to_check_for_transfer:
# Check if ever detectable
if self.ever_detectable(individual):
# Find days till transfer
individual.days_till_transfer = self.find_days_till_transfer(
individual)
# Set appropriate flags/timers
individual.set_flags(isolated_dict)
# Set flags/timers for infected who will not be detected
else:
individual.set_flags(infected_not_detected_dict)
# Remove already admitted individuals from set of new admissions
individuals_to_be_quarantined.difference_update(
individuals_eventually_isolated,
individuals_to_check_for_transfer)
# Set appropriate flags/timers for remaining admissions
for individual in individuals_to_be_quarantined:
individual.set_flags(remaining_individuals_dict)
def ever_detectable(self, individual):
"""Determine if an individual will be detected while quarantined."""
# Fetch viral load info
days_infected = individual.infection_timer
current_viral_load = individual.viral_load
previous_viral_load = individual.viral_load_curve[days_infected - 1]
# Determine if they will ever become detectable
if (current_viral_load > previous_viral_load):
return True
# Determine infected will not be detected during quarantine
else:
return False
def find_days_till_transfer(self, individual):
"""Find day until transfer from quarantine."""
# Fetch days infected
days_infected = individual.infection_timer
# Find first detectable viral load
first_detectable_viral_load = list(
filter(lambda viral_load: viral_load > self.detectable_threshold,
individual.viral_load_curve))[0]
# Find first day detectable
first_day_detectable = list(individual.viral_load_curve).index(
first_detectable_viral_load)
# Add testing lag into days till transfer
num_days_till_transfer = first_day_detectable - days_infected \
+ np.random.choice(a=list(range(len(
self.days_till_results_distribution))),
p=self.days_till_results_distribution)
# Return days till transfer
return num_days_till_transfer
def progress_quarantine(self):
"""Progress timer and find individuals to be discharged/transferred."""
# Find individuals being transferred to isolation
individuals_to_be_transferred = self.fetch_subpopulation(
{'to_be_transferred': True, 'days_till_transfer': 0})
# Remove those to be isolated from members
self.members.difference_update(individuals_to_be_transferred)
# Fetch individuals being discharged
individuals_to_be_discharged = self.fetch_subpopulation(
{'quarantine_timer': 0})
# Remove those being discharged from members
self.members.difference_update(individuals_to_be_discharged)
# Increment quaratine timer for remaining members
for individual in self.members:
individual.increment_timer({'quarantine_timer': -1})
# Find individuals who will eventually be isolated
individuals_eventually_isolated = self.fetch_subpopulation(
{'to_be_transferred': True})
for individual in individuals_eventually_isolated:
individual.increment_timer({'days_till_transfer': -1})
# Return sets of individuals being transferred/discharged
return individuals_to_be_transferred, individuals_to_be_discharged
# Define function to simulate the progression of a single time step
def simulate_time_step(general_population, quarantine, isolation):
"""Simulate a discrete time step for the passed components."""
# Progress infection
general_population.progress_infection()
# Infect susceptible
general_population.infect_susceptible()
# Test individuals
general_population.test_population()
# Progress test results and fetch individuals to be isolated
individuals_to_be_isolated = general_population.progress_test_results()
# Trace contacts and find individuals to be quarantined
individuals_to_be_quarantined = general_population.trace_contacts(
len(individuals_to_be_isolated))
# Admit individuals to quarantine
quarantine.admit_to_quarantine(individuals_to_be_quarantined)
# Progress quarantine
(individuals_to_be_transferred,
individuals_discharged_from_quarantine) = quarantine.progress_quarantine()
# Progress isolation
individuals_discharged_from_isolation = isolation.progress_isolation()
# Find all individuals being admitted to isolation
all_individuals_being_isolated = individuals_to_be_isolated.union(
individuals_to_be_transferred)
# Admit individuals to isolation
isolation.admit_to_isolation(all_individuals_being_isolated)
# All individuals being readmitted back in to general population
all_individuals_discharged = individuals_discharged_from_isolation.\
union(individuals_discharged_from_quarantine)
# Readmit individuals
general_population.add_to_population(all_individuals_discharged)
# Update logs of number of individuals in different states
general_population.log_state()
quarantine.log_state()
isolation.log_state()
| 2.96875 | 3 |
rul_prediction/poly_reg.py | inovex/RCIS2021-degradation-bearing-vessels | 2 | 12766195 | from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
def fit_poly_reg(X, y, degree=1, memory_path=None) -> Pipeline:
polyreg = make_pipeline(PolynomialFeatures(degree), LinearRegression(), memory=memory_path)
polyreg.fit(X, y)
return polyreg
| 2.703125 | 3 |
examples/eg1.py | petercorke/bdsim | 64 | 12766196 | #!/usr/bin/env python3
import bdsim
sim = bdsim.BDSim(animation=True) # create simulator
print(sim)
bd = sim.blockdiagram() # create an empty block diagram
# define the blocks
demand = bd.STEP(T=1, pos=(0,0), name='demand')
sum = bd.SUM('+-', pos=(1,0))
gain = bd.GAIN(10, pos=(1.5,0))
plant = bd.LTI_SISO(0.5, [2, 1], name='plant', pos=(3,0))
scope = bd.SCOPE(styles=['k', 'r--'], pos=(4,0))
# connect the blocks
bd.connect(demand, sum[0], scope[1])
bd.connect(plant, sum[1])
bd.connect(sum, gain)
bd.connect(gain, plant)
bd.connect(plant, scope[0])
bd.compile() # check the diagram
bd.report() # list all blocks and wires
sim.set_options(animation=True, graphics=True)
out = sim.run(bd, 5, watch=[plant,demand]) # simulate for 5s
sim.savefig(scope, 'scope0')
sim.done(block=False)
print(out)
| 2.59375 | 3 |
resource/pypi/cryptography-1.7.1/src/_cffi_src/commoncrypto/sectrust.py | hipnusleo/Laserjet | 0 | 12766197 | <gh_stars>0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <Security/SecTrust.h>
"""
TYPES = """
typedef ... *SecTrustRef;
typedef uint32_t SecTrustResultType;
enum {
kSecTrustResultInvalid,
kSecTrustResultProceed,
kSecTrustResultDeny,
kSecTrustResultUnspecified,
kSecTrustResultRecoverableTrustFailure,
kSecTrustResultFatalTrustFailure,
kSecTrustResultOtherError
};
"""
FUNCTIONS = """
OSStatus SecTrustEvaluate(SecTrustRef, SecTrustResultType *);
OSStatus SecTrustCopyAnchorCertificates(CFArrayRef *);
"""
MACROS = """
/* The first argument changed from CFArrayRef to CFTypeRef in 10.8, so this
* has to go here for compatibility.
*/
OSStatus SecTrustCreateWithCertificates(CFTypeRef, CFTypeRef, SecTrustRef *);
"""
CUSTOMIZATIONS = """
"""
| 1.304688 | 1 |
tensorflow_io/core/python/experimental/mongodb_writer_ops.py | michaelbanfield/io | 1 | 12766198 | <reponame>michaelbanfield/io<filename>tensorflow_io/core/python/experimental/mongodb_writer_ops.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MongoDBWriter"""
import json
from urllib.parse import urlparse
import tensorflow as tf
from tensorflow_io.core.python.ops import core_ops
from tensorflow_io.core.python.experimental import serialization_ops
class MongoDBWriter:
"""Write documents to mongoDB.
The writer can be used to store documents in mongoDB while dealing with tensorflow
based models and inference outputs. Without loss of generality, consider an ML
model that is being used for inference. The outputs of inference can be modelled into
a structured record by enriching the schema with additional information( for ex: metadata
about input data and the semantics of the inference etc.) and can be stored in mongo
collections for persistence or future analysis.
To make a connection and write the documents to the mongo collections,
the `tfio.experimental.mongodb.MongoDBWriter` API can be used.
Example:
>>> URI = "mongodb://mongoadmin:default_password@localhost:27017"
>>> DATABASE = "tfiodb"
>>> COLLECTION = "test"
>>> writer = tfio.experimental.mongodb.MongoDBWriter(
uri=URI, database=DATABASE, collection=COLLECTION
)
>>> for i in range(1000):
... data = {"key{}".format(i): "value{}".format(i)}
... writer.write(data)
"""
def __init__(self, uri, database, collection):
"""Initialize the dataset with the following parameters
Args:
uri: The uri of the mongo server or replicaset to connect to.
- To connect to a MongoDB server with username and password
based authentication, the following uri pattern can be used.
Example: `"mongodb://mongoadmin:default_password@localhost:27017"`.
- Connecting to a replica set is much like connecting to a
standalone MongoDB server. Simply specify the replica set name
using the `?replicaSet=myreplset` URI option.
Example: "mongodb://host01:27017,host02:27017,host03:27017/?replicaSet=myreplset"
Additional information on writing uri's can be found here:
- [libmongoc uri docs](http://mongoc.org/libmongoc/current/mongoc_uri_t.html)
- [mongodb uri docs](https://docs.mongodb.com/manual/reference/connection-string/)
database: The database in the standalone standalone MongoDB server or a replica set
to connect to.
collection: The collection from which the documents have to be retrieved.
"""
self.uri = uri
self.database = database
self.collection = collection
self.resource = core_ops.io_mongo_db_writable_init(
uri=self.uri, database=self.database, collection=self.collection,
)
def write(self, doc):
"""Insert a single json document"""
core_ops.io_mongo_db_writable_write(
resource=self.resource, record=json.dumps(doc)
)
def _delete_many(self, doc):
"""Delete all matching documents"""
core_ops.io_mongo_db_writable_delete_many(
resource=self.resource, record=json.dumps(doc)
)
| 2.375 | 2 |
youkube/compoents/youkube_compoent.py | lyrl/youkube | 1 | 12766199 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2016年8月9日13:16:54
import datetime
import json
import os
import youkube.compoents.model as model
import youkube.compoents.youtube_compoent as youtube
import youkube.util as util
import time
import youkube.constants as constants
import youkube.compoents.youku_compoent as youkucom
logger = util.get_logger('Youkube')
"""
配置文件示例
user youtube要订阅的用户
video_dir 视频文件保存路径
thumbnail_dir 视频缩略图/封面图保存路径
sqlite3_file sqlite3数据库文件路
youku_client_id 优酷client id
youku_access_token 优酷access_token
{
"users": [
{"user":"greatscottlab", "channel_name": "GreateScoot", "youku_prefix": "GreateScoot - ", "desc": "模拟电路数字电路", "category": "科技"},
{"user":"DarduinMyMenlon", "channel_name": "Dota2 WTF", "youku_prefix" : "", "desc" : "Dota2 Wtf", "category": "游戏"}
{"user":"Larva2011ani", "channel_name": "Larva ", "youku_prefix" : "Larva - ", "desc" : "红虫黄虫", "category": "搞笑"}
],
"video_dir": "/root/video",
"thumbnail_dir": "/root/thumbnail",
"sqlite3_file": "/root/sqlite3.db",
"youku_client_id": "97c24e4be2c1383a",
"youku_access_token": "<KEY>"
}
"""
class Youkube(object):
def __init__(self, config_file_path):
with open(config_file_path) as file:
self.config = json.loads(file.read())
if not self.config:
raise YoukubeException("配置文件读取失败!")
self.repo = YoukubeRepo(self.config['sqlite3_file'])
self.youtube = youtube.YoutubeCompoentImpl()
self.youku = youkucom.Youku(self.config['youku_client_id'], self.config['youku_access_token'])
def run(self):
while True:
logger.info("[Youkube] - 检查并准备删除已上传成功的视频文件...")
self.del_uploaded_video_file()
logger.info("[Youkube] - 检查未完成上传的视频...")
self.retry_upload_task()
logger.info("[Youkube] - 抓取最新视频...")
self.fetch_new_videos()
logger.info(u"[Youkube] - 所有视频处理完成,等待1分钟重新获取新视频!")
time.sleep(60)
def fetch_new_videos(self):
for i in self.config['items']:
if i['type'] == 'user':
links = self.youtube.fetch_user_page_video_links(i['user'])
else:
links = self.youtube.fetch_channel_page_video_links(i['channel'])
self.fetch_new_video(self.rm_dup_link(links), i)
def rm_dup_link(self, links):
uniquelist = []
for i in links:
if (i not in uniquelist ) and (not self.repo.find_by_url(i)):
uniquelist.append(i)
return uniquelist
def fetch_new_video(self, uniquelist, use_info):
"""
{
"user":"greatscottlab",
"channel_name": "GreateScoot",
"youku_prefix": "GreateScoot - ",
"desc": "模拟电路数字电路",
"category": "科技"
}
"""
for link in uniquelist:
# 视频基本信息的字典数据,信息由youtube-dl 提供
info_dict = self.youtube.fetch_video_base_info(link)
# 将视频保存到数据库
try:
video_entity = self.__save_new_video_info_to_db__(info_dict, use_info)
except Exception as e:
logger.error(u"保存失败! reason :" + e.__str__())
continue
logger.debug(u"发现新视频 %s 时长 %s " % (video_entity.title, video_entity.duration))
logger.info(u"视频 %s 下载任务创建成功,正在下载!" % video_entity.title)
self.repo.chg_status(video_entity, constants.VIDEO_STATUS_DOWNLOADING)
self.youtube.download(link, self.config['video_dir'], video_entity.ext, info_dict['url'])
logger.info(u"视频 %s 下载成功,准备上传!" % video_entity.title)
video_entity.filesize = os.path.getsize(
"%s%s.%s" % (self.config['video_dir'], util.md5encode(video_entity.url), video_entity.ext))
self.repo.save(video_entity)
self.repo.chg_status(video_entity, constants.VIDEO_STATUS_DOWNLOADED)
self.retry_upload_task()
self.del_uploaded_video_file()
def retry_upload_task(self):
need_upload_video = self.repo.find_need_upload_video()
for n in need_upload_video:
n.filesize = os.path.getsize(
"%s%s.%s" % (self.config['video_dir'], util.md5encode(n.url), n.ext))
self.repo.save(n)
logger.info(u"[Youkube] - 视频 %s 开始上传!" % n.title)
self.repo.chg_status(n, constants.VIDEO_STATUS_UPLOADING)
try:
self.youku.upload(
"%s%s.%s" % (self.config['video_dir'], util.md5encode(n.url), n.ext),
n.youku_prefix + n.title, "", n.desc, n.category)
except Exception as e:
logger.warn(u"[Youkube] - 视频上传失败! : " + e.__str__())
continue
logger.info(u"[Youkube] - 视频 %s 上传完成!" % n.title)
self.repo.chg_status(n, constants.VIDEO_STATUS_UPLOADED)
self.del_uploaded_video_file()
def del_uploaded_video_file(self):
uploaded_videps = self.repo.find_uploaded_video()
for v in uploaded_videps:
file_paht = self.config['video_dir'] + '/' + v.url_hash + '.' + v.ext
is_exist = os.path.exists(file_paht)
if is_exist:
logger.info(u"[Youkube] - 视频 %s 已上传成功 ! 视频文件 %s 准备删除!" % (v.title, file_paht))
os.remove(file_paht)
logger.info(u"[Youkube] - 视频 %s 视频文件 %s 删除成功!" % (v.title, file_paht))
def __save_new_video_info_to_db__(self, info_dict, user_info):
"""
{
"user":"greatscottlab",
"channel_name": "GreateScoot",
"youku_prefix": "GreateScoot - ",
"desc": "模拟电路数字电路"},
"""
date_time_format = '%Y%m%d'
video = model.Video()
video.url = info_dict['webpage_url']
video.url_hash = util.md5encode(video.url)
video.uploader = info_dict['uploader']
video.title = info_dict['title']
video.like_count = info_dict['like_count']
video.dislike_count = info_dict['dislike_count']
video.duration = info_dict['duration']
video.format_note = info_dict['format_note']
video.height = info_dict['height']
video.width = info_dict['width']
video.resolution = info_dict['resolution']
video.view_count = info_dict['view_count']
video.video_id = info_dict['id']
video.format = info_dict['format']
video.filesize = 0 # info_dict['filesize']
video.ext = info_dict['ext']
video.thumbnail = info_dict['thumbnail']
try:
video.upload_date = datetime.datetime.strptime(info_dict['upload_date'], date_time_format)
except Exception:
video.upload_date = datetime.datetime.now()
video.create_time = datetime.datetime.now()
video.update_time = datetime.datetime.now()
try:
video.user = user_info['user']
except Exception:
video.user = user_info['channel']
video.channel_name = user_info['channel_name']
video.youku_prefix = user_info['youku_prefix']
video.desc = user_info['desc']
video.category = user_info['category']
self.repo.save(video)
return video
class YoukubeRepo(object):
"""数据库访问类
包括了视频信息,任务信息等等
Attributes:
sqlite3_file (str): 数据库文件位置
"""
def __init__(self, sqlite3_file):
if not sqlite3_file:
raise YoukubeRepoException("参数 sqlite3_file 不能为空!")
model.deferred_db.init(sqlite3_file)
try:
model.deferred_db.connect()
except Exception as e:
raise YoukubeRepoException("数据库连接失败: " + e.message)
if not model.Video.table_exists():
model.Video.create_table()
def save(self, video):
"""将新发布的视频信息保存到数据库
Args:
video (model.Video): 视频实体
"""
video.save()
def update(self, video):
"""将新发布的视频信息保存到数据库
Args:
video (model.Video): 视频实体
"""
video.update()
def find_by_url_hash(self, url_hash):
try:
model.Video.get(model.Video.url_hash == url_hash)
except:
return None
def find_by_url(self, url):
try:
return model.Video.get(model.Video.url == url)
except:
return None
def chg_status(self, video_entity, status):
video_entity.status = status
video_entity.update_time = datetime.datetime.now()
video_entity.save()
def find_need_upload_video(self):
return model.Video.select().where(model.Video.status >= 3 and model.Video.status <= 5)
def find_uploaded_video(self):
return model.Video.select().where(model.Video.status == 6)
class YoukubeRepoException(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return self.message
class YoukubeException(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return self.message | 1.96875 | 2 |
mfr/extensions/pdf/exceptions.py | yacchin1205/RDM-modular-file-renderer | 36 | 12766200 | from mfr.core.exceptions import ExporterError
class PillowImageError(ExporterError):
"""The Image related errors raised from a :class:`mfr.extentions.pdf`
and relating to the Pillow Library should inherit from PillowImageError
"""
__TYPE = 'pdf_pillow'
def __init__(self, message, *args, export_format: str='', detected_format: str='',
original_exception: Exception=None, **kwargs):
super().__init__(message, *args, exporter_class='image', **kwargs)
self.export_format = export_format
self.detected_format = detected_format
self.original_exception = self._format_original_exception(original_exception)
self.attr_stack.append([self.__TYPE, {
'export_format': self.export_format,
'detected_format': self.detected_format,
'original_exception': self.original_exception,
}])
| 2.484375 | 2 |
object_tracker/applications/data_update/roi_update_regions.py | aidoop/CoboMarkerTracking | 2 | 12766201 | import json
class ROIUpdateRegions:
roi_region_list = list()
def __init__(self):
self.roi_region_list.clear()
def add_roi_region(self, id, ltx, lty, rbx, rby):
testNestedDict = {
"id": id,
"region": {
"lt": {
"x": ltx,
"y": lty
},
"rb": {
"x": rbx,
"y": rby
}
}
}
self.roi_region_list.append(testNestedDict)
def print_roi_regions(self):
print(json.dumps(self.roi_region_list))
###############################################################################
# sample codes
###############################################################################
if __name__ == '__main__':
rur = ROIUpdateRegions()
rur.add_roi_region('abc', 100, 200, 500, 600)
rur.add_roi_region('uuu', 300, 500, 600, 900)
rur.print_roi_regions()
| 2.671875 | 3 |
MLCustomDownload.py | VaseSimion/Finance | 1 | 12766202 | <reponame>VaseSimion/Finance
import ExtractData as Ed
from datetime import datetime
from datetime import timedelta
import csv
import DatabaseStocks as Ds
import yfinance as yf
import math
from tqdm import tqdm
# This is to write the dataset and the verification, which means dates and stocks that are downloaded for training
csvwriter = csv.writer(open('dataset_custom.csv', 'w'), delimiter=',', lineterminator='\n',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
verification_csvwriter = csv.writer(open('dataset_verification_custom.csv', 'w'), delimiter=',', lineterminator='\n',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
increment = 0 # this is used to show progress
listOfStocksToAnalyze = Ds.get_investing_lists() # getting the stocks that are traded on Trading212
with tqdm(total=len(listOfStocksToAnalyze)) as pbar:
for stock in listOfStocksToAnalyze:
try:
initial_date = "2019-10-11" # date from when I start downloading data
last_date = "2020-12-14" # last date considered for training
last_date = datetime.strptime(last_date, "%Y-%m-%d")
date = datetime.strptime(initial_date, "%Y-%m-%d")
# Download data
weekly = yf.download(tickers=stock, interval="1wk", start=initial_date, threads=False)
# Remove all NaN values
for index, row in weekly.iterrows():
if math.isnan(row["Close"]) or math.isnan(row["Volume"]):
weekly = weekly.drop([index])
# Move the start date to be at least 1 year after first value downloaded
if (list(weekly.index)[0] + timedelta(days=365)) > date:
date = list(weekly.index)[0] + timedelta(days=365)
# Go through all dates bi/weekly and get the scaled data
while date < last_date:
[price, validation, volume] = Ed.get_latest_1_year_price_weekly(weekly, date)
list_to_be_saved = validation + price + volume
if len(list_to_be_saved) == 103:
csvwriter.writerow(list_to_be_saved)
verification_csvwriter.writerow([stock, date + timedelta(days=-1)])
date = date + timedelta(days=7)
pbar.update(1)
except:
print("something went bad with " + stock)
pbar.update(1)
| 3.234375 | 3 |
spacy-annotator/displacy/server.py | aniruddha-adhikary/spacy-dev-resources | 132 | 12766203 | #!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
import falcon
import spacy
import json
import sys
from spacy.pipeline import EntityRecognizer
import spacy.util
from spacy.tagger import Tagger
from .parse import Entities, TrainEntities
from falcon_cors import CORS
try:
unicode
except NameError:
unicode = str
_models = {}
def get_model(model_name):
if model_name not in _models:
model = spacy.load(model_name)
if model.tagger is None:
model.tagger = Tagger(model.vocab, features=Tagger.feature_templates)
if model.entity is None:
model.entity = EntityRecognizer(model.vocab, entity_types=['PERSON', 'NORP', 'FACILITY', 'ORG', 'GPE',
'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART',
'LANGUAGE', 'DATE', 'TIME', 'PERCENT',
'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'])
model.pipeline = [model.tagger, model.entity, model.parser]
_models[model_name] = model
return _models[model_name]
def update_vocabulary(model, texts):
for text in texts:
doc = model.make_doc(text)
for word in doc:
_ = model.vocab[word.orth]
class EntResource(object):
"""Parse text and return displaCy ent's expected output."""
def on_post(self, req, resp):
req_body = req.stream.read()
json_data = json.loads(req_body.decode('utf8'))
paragraphs = json_data.get('paragraphs')
model_name = json_data.get('model', 'en')
try:
model = get_model(model_name)
entities = []
for p in paragraphs:
e = Entities(model, p.get('text'))
entities.append(e.to_json())
resp.body = json.dumps(entities, sort_keys=True, indent=2)
resp.content_type = 'application/json'
resp.status = falcon.HTTP_200
except Exception:
resp.status = falcon.HTTP_500
class TrainEntResource(object):
"""Parse text and use it to train the entity recognizer."""
def on_post(self, req, resp):
req_body = req.stream.read()
json_data = json.loads(req_body.decode('utf8'))
paragraphs = json_data.get('paragraphs')
model_name = json_data.get('model', 'en')
try:
model = get_model(model_name)
texts = [paragraph.get('text') for paragraph in paragraphs]
update_vocabulary(model, texts)
entities = []
for p in paragraphs:
e = TrainEntities(model, p.get('text'), p.get('tags'))
entities.append(e.to_json())
resp.body = json.dumps(entities, sort_keys=True, indent=2)
resp.content_type = 'application/json'
resp.status = falcon.HTTP_200
except Exception:
print("Unexpected error:", sys.exc_info()[0])
resp.status = falcon.HTTP_500
cors = CORS(allow_all_origins=True)
APP = falcon.API(middleware=[cors.middleware])
APP.add_route('/ent', EntResource())
APP.add_route('/train', TrainEntResource())
| 2.3125 | 2 |
mmpose/core/optimizer/registry.py | chaowentao/mmpose | 367 | 12766204 | from mmcv.utils import Registry
OPTIMIZERS = Registry('optimizers')
| 1.25 | 1 |
lemon/default/main.py | InsaneMiner/Salt | 6 | 12766205 | <gh_stars>1-10
import lemon.StartDevServer as DevServer
if __name__ == "__main__":
DevServer.Start() | 0.90625 | 1 |
object_detection/vantara/lumada/client/gateway_client.py | cardosov/Hackathon2018ObjectDetector | 0 | 12766206 | from lumada.client.api.gateway_client_base import GatewayClientBase
from lumada.utils.validator import Validator
from lumada.client.lumada_client import LumadaClient
from lumada.client.asset_registration_client import AssetRegistrationClient
from lumada.client.asset_client import AssetClient
class GatewayClient(GatewayClientBase):
def __init__(self, gateway_client_config):
self._lumada_client = LumadaClient(Validator.validate_config_provided(gateway_client_config, 'AssetClientConfig'))
self._gateway_id = gateway_client_config.get_credentials().get_entity_id()
self._gateway_value = gateway_client_config.get_credentials().get_entity_value()
self._registration_client = AssetRegistrationClient(gateway_id=self._gateway_id,
gateway_value=self._gateway_value,
asset_registration_endpoint=gateway_client_config.get_registration_endpoint())
def register_asset_behind_gateway(self, asset_name, gateway_id, tags):
"""
Registers an asset behind a gateway
:param asset_name: Name of asset to register
:param gateway_id: ID of the gateway to register the client
:param tags: tags/params to be encoded on the url
:return: asset client
"""
Validator.validate_param(asset_name, 'AssetName')
Validator.validate_param(gateway_id, 'GatewayId')
asset_id = self._registration_client.register_asset(asset_name=asset_name, gateway_id=gateway_id, properties=tags)
asset_client = AssetClient.from_gateway(asset_id=asset_id, gateway_id=self._gateway_id, client=self._lumada_client)
return asset_client
def create_asset_client(self, asset_id):
"""
Create new asset client that communicates with lumada via the gateway
:param asset_id: ID of the asset to create
:return: Asset Client
"""
Validator.validate_param(asset_id, 'AssetId')
self._registration_client.verify_asset(asset_id=asset_id)
asset_client = AssetClient.from_gateway(asset_id=asset_id, gateway_id=self._gateway_id, client=self._lumada_client)
return asset_client
def close(self):
"""
Disconnects from given communication channel
"""
self._lumada_client.disconnect()
| 2.234375 | 2 |
jinahub/segmenters/Sentencizer/__init__.py | vivek2301/executors | 0 | 12766207 | from .sentencizer import Sentencizer
| 1.085938 | 1 |
the_josephus_problem02.py | manofern/the-josephus-problem | 0 | 12766208 | num = 42
list_of_numbers = []
for i in range(1, num+1):
list_of_numbers.append(i)
print(list_of_numbers)
death = 3
for i in list_of_numbers:
if i == num:
if i == death:
print("XX")
else:
print(i)
elif i < 10:
if i == death:
print(f"XX", end=" - ")
else:
print(f"0{i}", end=" - ")
elif i % 10 == 0:
if i == death:
print("XX")
else:
print(i)
else:
if i == death:
print("XX")
else:
print(f"{i}", end=" - ")
| 3.703125 | 4 |
cddm/test/test_core.py | inwwin/cddm | 0 | 12766209 | import unittest
import numpy as np
import cddm.core as core
from cddm.conf import FDTYPE, CDTYPE
from cddm.video import fromarrays
#test arrays
a = [1.,2,3,4]
b = [5,6,7,8]
t1 = [1,3,7,8]
t2 = [2,4,6,8]
#results fo calculations
cross_a_b = np.array([ 70., 100., 62., 28.],FDTYPE)
cross_a_b_t1_t2 = np.array([32., 72., 28., 38., 24., 38., 20., 8.],FDTYPE)
auto_a = np.array([30., 20., 11., 4.], FDTYPE)
auto_a_t1 = np.array([30., 12., 2., 0., 6., 8., 3., 4.],FDTYPE)
auto_sum_a = np.array([10. , 7.5, 5. , 2.5], FDTYPE)
auto_sum_a_t1 = np.array([10. , 3.5, 1.5, 0. , 2.5, 3. , 2. , 2.5],FDTYPE)
cross_sum_a = np.array([10., 15., 10., 5.], FDTYPE)
cross_sum_a_t1_t2 = np.array([ 4., 11., 4., 6., 4., 6., 4., 1.],FDTYPE)
cross_count_10 = np.array([10, 18, 16, 14, 12, 10, 8, 6, 4, 2],FDTYPE)
cross_count_t1_t2 = np.array([1, 5, 1, 3, 1, 3, 1, 1],FDTYPE)
auto_count_10 = np.array([10, 9, 8, 7, 6, 5, 4, 3, 2, 1],FDTYPE)
auto_count_t1 = np.array([4, 1, 1, 0, 1, 1, 1, 1],FDTYPE)
np.random.seed(0)
a2 = [a,a]
b2 = [b,b]
test_data1 = np.random.randn(32,19,8) + np.random.randn(32,19,8)*1j
test_data2 = np.random.randn(32,19,8) + np.random.randn(32,19,8)*1j
test_data1 = np.array(test_data1, CDTYPE)
test_data2 = np.array(test_data2, CDTYPE)
test_mask = np.ones((19,8),bool)
test_mask[0] = False
test_mask[:,0::3] = False
class TestCorrelateDifference(unittest.TestCase):
def setUp(self):
pass
def test_auto_correlate_fft(self):
out = core.auto_correlate_fft(a)
self.assertTrue(np.allclose(out,auto_a))
out = core.auto_correlate_fft(a,t1)
self.assertTrue(np.allclose(out,auto_a_t1, atol = 1e-6))
out = core.auto_correlate_fft(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1*2,atol = 1e-6))
def test_auto_correlate_fft2(self):
out = core.auto_correlate_fft(a2,axis = -1)
self.assertTrue(np.allclose(out[0],auto_a))
out = core.auto_correlate_fft(a2,t1,axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1, atol = 1e-6))
out = core.auto_correlate_fft(a2,t1, axis = -1, aout = out)
self.assertTrue(np.allclose(out[0],auto_a_t1*2, atol = 1e-6))
def test_auto_correlate_fft_n(self):
out = core.auto_correlate_fft(a, n = 3)
self.assertTrue(np.allclose(out,auto_a[0:3]))
out = core.auto_correlate_fft(a,t1,n = 3)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]))
out = core.auto_correlate_fft(a,t1,n = 3, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]*2))
def test_auto_correlate_fft_n2(self):
out = core.auto_correlate_fft(a2, axis = -1, n = 3)
self.assertTrue(np.allclose(out[0],auto_a[0:3]))
out = core.auto_correlate_fft(a2,t1,n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]))
out = core.auto_correlate_fft(a2,t1,n = 3, axis = -1, aout = out)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]*2))
def test_auto_correlate(self):
out = core.auto_correlate(a)
self.assertTrue(np.allclose(out,auto_a))
out = core.auto_correlate(a,t1)
self.assertTrue(np.allclose(out,auto_a_t1))
out = core.auto_correlate(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1*2))
def test_auto_correlate2(self):
out = core.auto_correlate(a2, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a))
out = core.auto_correlate(a2,t1, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1))
out = core.auto_correlate(a2,t1, axis = -1, aout = out)
self.assertTrue(np.allclose(out[0],auto_a_t1*2))
def test_auto_correlate_n(self):
out = core.auto_correlate(a, n = 3)
self.assertTrue(np.allclose(out,auto_a[0:3]))
out = core.auto_correlate(a,t1,n = 3)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]))
out = core.auto_correlate(a,t1,n = 3, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]*2))
def test_auto_correlate_n2(self):
out = core.auto_correlate(a2, n = 3,axis = -1)
self.assertTrue(np.allclose(out[0],auto_a[0:3]))
out = core.auto_correlate(a2,t1,n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]))
out = core.auto_correlate(a2,t1,n = 3, aout = out, axis = 1)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]*2))
def test_cross_correlate_fft(self):
out = core.cross_correlate_fft(a,b)
self.assertTrue(np.allclose(out,cross_a_b))
out = core.cross_correlate_fft(a,b,t1,t2)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2))
out = core.cross_correlate_fft(a,b,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2*2))
def test_cross_correlate_fft2(self):
out = core.cross_correlate_fft(a2,b2,axis = 1)
self.assertTrue(np.allclose(out[0],cross_a_b))
out = core.cross_correlate_fft(a2,b2,t1,t2,axis = 1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2))
out = core.cross_correlate_fft(a2,b2,t1,t2, aout = out,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2*2))
def test_cross_correlate_fft_n(self):
out = core.cross_correlate_fft(a,b, n = 3)
self.assertTrue(np.allclose(out,cross_a_b[:3]))
out = core.cross_correlate_fft(a,b,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]))
out = core.cross_correlate_fft(a,b,t1,t2, n = 3, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]*2))
def test_cross_correlate_fft_n2(self):
out = core.cross_correlate_fft(a2,b2, n = 3 ,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b[:3]))
out = core.cross_correlate_fft(a2,b2,t1,t2, n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2[:3]))
out = core.cross_correlate_fft(a2,b2,t1,t2, n = 3, aout = out, axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2[:3]*2))
def test_cross_correlate(self):
out = core.cross_correlate(a,b)
self.assertTrue(np.allclose(out,cross_a_b))
out = core.cross_correlate(a,b,t1,t2)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2))
out = core.cross_correlate(a,b,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2*2))
def test_cross_correlate2(self):
out = core.cross_correlate(a2,b2,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b))
out = core.cross_correlate(a2,b2,t1,t2,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2))
out = core.cross_correlate(a2,b2,t1,t2, aout = out,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2*2))
def test_cross_correlate_n(self):
out = core.cross_correlate(a,b, n = 3)
self.assertTrue(np.allclose(out,cross_a_b[:3]))
out = core.cross_correlate(a,b,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]))
out = core.cross_correlate(a,b,t1,t2, n = 3, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]*2))
def test_cross_correlate_n2(self):
out = core.cross_correlate(a2,b2, n = 3,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b[:3]))
out = core.cross_correlate(a2,b2,t1,t2, n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2[:3]))
out = core.cross_correlate(a2,b2,t1,t2, n = 3, aout = out, axis = -1)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]*2))
class TestSum(unittest.TestCase):
def test_auto_sum(self):
out = core.auto_sum(a)
self.assertTrue(np.allclose(out,auto_sum_a))
out = core.auto_sum(a,t1)
self.assertTrue(np.allclose(out,auto_sum_a_t1))
out = core.auto_sum(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1*2))
def test_auto_sum_n(self):
out = core.auto_sum(a, n = 3)
self.assertTrue(np.allclose(out,auto_sum_a[0:3]))
out = core.auto_sum(a,t1, n = 3)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]))
out = core.auto_sum(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*2))
out = core.auto_sum(a,t1, n = 3, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*3))
def test_auto_sum_fft(self):
out = core.auto_sum_fft(a,t1)
self.assertTrue(np.allclose(out,auto_sum_a_t1))
out = core.auto_sum_fft(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1*2))
def test_auto_sum_fft_n(self):
out = core.auto_sum_fft(a,t1, n = 3)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]))
out = core.auto_sum_fft(a,t1, n =3, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*2))
out = core.auto_sum_fft(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*3))
def test_cross_sum(self):
out = core.cross_sum(a)
self.assertTrue(np.allclose(out,cross_sum_a))
out = core.cross_sum(a,t1,t2)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2))
out = core.cross_sum(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2*2))
def test_cross_sum_n(self):
out = core.cross_sum(a, n=3)
self.assertTrue(np.allclose(out,cross_sum_a[0:3]))
out = core.cross_sum(a,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]))
out = core.cross_sum(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]*2))
def test_cross_sum_fft(self):
out = core.cross_sum_fft(a,t1,t2)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2))
out = core.cross_sum_fft(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2*2))
def test_cross_sum_fft_n(self):
out = core.cross_sum_fft(a,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]))
out = core.cross_sum_fft(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]*2))
out = core.cross_sum_fft(a,t1,t2, n =3, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]*3))
def test_cross_sum_equivalence_ND(self):
for axis in (0,1,2):
t1 = np.arange(test_data1.shape[axis])
t2 = np.arange(test_data1.shape[axis]) + 3
out1 = core.cross_sum(test_data1,t1,t2, axis = axis)
out2 = core.cross_sum_fft(test_data1,t1,t2, axis = axis)
self.assertTrue(np.allclose(out1,out2))
class TestCount(unittest.TestCase):
def test_cross_count(self):
out = core.cross_count(10)
self.assertTrue(np.allclose(out,cross_count_10))
out = core.cross_count(t1,t2)
self.assertTrue(np.allclose(out,cross_count_t1_t2))
out = core.cross_count(t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_count_t1_t2*2))
def test_cross_count_n(self):
out = core.cross_count(10, n = 5)
self.assertTrue(np.allclose(out,cross_count_10[0:5]))
out = core.cross_count(t1,t2,n=5)
self.assertTrue(np.allclose(out,cross_count_t1_t2[0:5]))
out = core.cross_count(t1,t2, aout = out)
self.assertTrue(np.allclose(out,2*cross_count_t1_t2[0:5]))
def test_auto_count(self):
out = core.auto_count(10)
self.assertTrue(np.allclose(out,auto_count_10))
out = core.auto_count(t1)
self.assertTrue(np.allclose(out,auto_count_t1))
out = core.auto_count(t1, aout = out)
self.assertTrue(np.allclose(out,auto_count_t1*2))
def test_auto_count_n(self):
out = core.auto_count(10, n = 5)
self.assertTrue(np.allclose(out,auto_count_10[0:5]))
out = core.auto_count(t1, n = 5)
self.assertTrue(np.allclose(out,auto_count_t1[:5]))
out = core.auto_count(t1, aout = out)
self.assertTrue(np.allclose(out,2*auto_count_t1[:5]))
class TestIcorr(unittest.TestCase):
def test_cross_equivalence(self):
for method in ("corr","diff","fft"):
bg,var = core.stats(test_data1, test_data2, axis = 0)
data = core.ccorr(test_data1, test_data2,n = 8, norm = 1, method = method)
out1 = core.normalize(data, bg, var)
vid = fromarrays((test_data1, test_data2))
data,bg,var = core.iccorr(vid, count = len(test_data1),chunk_size = 16,n = 8, norm = 1, method = method)
out2 = core.normalize(data, bg, var)
self.assertTrue(np.allclose(out1, out2))
def test_auto_equivalence_2(self):
for method in ("corr",):
bg,var = core.stats(test_data1, axis = 0)
data1 = core.ccorr(test_data1,test_data1, n = 8, norm = 2, method = method)
out1 = core.normalize(data1, bg, var, norm = 2)
data2,bg,var = core.iacorr(test_data1, n = 8, norm = 2, method = method)
out2 = core.normalize(data2, bg, var, norm = 2)
self.assertTrue(np.allclose(out1, out2))
def test_auto_equivalence_1(self):
for method in ("corr","fft","diff"):
bg,var = core.stats(test_data1, axis = 0)
data1 = core.acorr(test_data1, n = 8, norm = 1, method = method)
out1 = core.normalize(data1, bg, var, norm = 1)
data2,bg,var = core.iacorr(test_data1, n = 8, norm = 1, method = method)
out2 = core.normalize(data2, bg, var, norm = 1)
self.assertTrue(np.allclose(out1, out2))
class TestCorr(unittest.TestCase):
def setUp(self):
pass
def test_corr_regular_3(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 3, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_ccorr_regular_3_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
axis = 0
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 3, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
def test_acorr_regular_3(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, axis = axis)
data = core.ccorr(test_data1, test_data1, norm = 3, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
data = core.acorr(test_data1,norm = 3, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.acorr(test_data1,norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_ccorr_regular_1(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_acorr_regular_1(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, axis = axis)
data = core.acorr(test_data1, norm = 1, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
data = core.acorr(test_data1,norm = 1, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.acorr(test_data1,norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_1_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
axis = 0
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale,mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale,mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale,mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
def test_ccorr_regular_0(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_acorr_regular_0(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, axis = axis)
data = core.acorr(test_data1, norm = 0, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
data = core.acorr(test_data1,norm = 0, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_0_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
axis = 0
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale, mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale, mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_2(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_2_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
bg,var = core.stats(test_data1, test_data2)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "fft")
self.out = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale, mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "corr")
out_other = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale,mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
class TestRest(unittest.TestCase):
def test_abs2(self):
self.assertTrue(np.allclose(core.abs2(test_data1), np.abs(test_data1)**2))
if __name__ == "__main__":
unittest.main() | 2.21875 | 2 |
Tms-GCN-PyTorch/utils/data/spatiotemporal_csv_data.py | Joker-L0912/Tms-GCN-Py | 0 | 12766210 | import argparse
import numpy as np
import pytorch_lightning as pl
from torch.utils.data.dataloader import DataLoader
import utils.data.functions
class SpatioTemporalCSVDataModule(pl.LightningDataModule):
def __init__(
self,
feat_path: str,
adj_path: str,
batch_size: int = 32,
seq_len: int = 12,
pre_len: int = 3,
split_ratio: float = 0.8,
normalize: bool = True,
**kwargs
):
super(SpatioTemporalCSVDataModule, self).__init__()
self._feat_path = feat_path
self._adj_path = adj_path
self.batch_size = batch_size
self.seq_len = seq_len
self.pre_len = pre_len
self.split_ratio = split_ratio
self.normalize = normalize
self._feat = utils.data.functions.load_features(self._feat_path)
self._feat_max_val = np.max(self._feat)
self._adj = utils.data.functions.load_adjacency_matrix(self._adj_path)
self._dis = utils.data.functions.load_distance_matrix(r'data/sz_distance.csv')
self.direct = utils.data.functions.load_distance_matrix(r'data/sz_direct.csv')
@staticmethod
def add_data_specific_arguments(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--seq_len", type=int, default=32)
parser.add_argument("--pre_len", type=int, default=1)
parser.add_argument("--split_ratio", type=float, default=0.8)
parser.add_argument("--normalize", type=bool, default=True)
return parser
def setup(self, stage: str = None):
(
self.train_dataset,
self.val_dataset,
) = utils.data.functions.generate_torch_datasets(
self._feat,
self.seq_len,
self.pre_len,
split_ratio=self.split_ratio,
normalize=self.normalize,
)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=len(self.val_dataset))
@property
def feat_max_val(self):
return self._feat_max_val
@property
def adj(self):
return self._adj
@property
def dis(self):
return self._dis
| 2.328125 | 2 |
manilaclient/v2/share_group_type_access.py | Murray-LIANG/python-manilaclient | 0 | 12766211 | # Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Share group type access interface."""
from manilaclient import api_versions
from manilaclient import base
from manilaclient.common.apiclient import base as common_base
RESOURCES_PATH = '/share-group-types'
RESOURCE_PATH = '/share-group-types/%s/access'
RESOURCE_PATH_ACTION = '/share-group-types/%s/action'
RESOURCE_NAME = 'share_group_type_access'
class ShareGroupTypeAccess(common_base.Resource):
def __repr__(self):
return "<Share Group Type Access: %s>" % self.id
class ShareGroupTypeAccessManager(base.ManagerWithFind):
"""Manage :class:`ShareGroupTypeAccess` resources."""
resource_class = ShareGroupTypeAccess
@api_versions.wraps("2.31")
@api_versions.experimental_api
def list(self, share_group_type, search_opts=None):
if share_group_type.is_public:
return None
share_group_type_id = common_base.getid(share_group_type)
url = RESOURCE_PATH % share_group_type_id
return self._list(url, RESOURCE_NAME)
@api_versions.wraps("2.31")
@api_versions.experimental_api
def add_project_access(self, share_group_type, project):
"""Add a project to the given share group type access list."""
info = {'project': project}
self._action('addProjectAccess', share_group_type, info)
@api_versions.wraps("2.31")
@api_versions.experimental_api
def remove_project_access(self, share_group_type, project):
"""Remove a project from the given share group type access list."""
info = {'project': project}
self._action('removeProjectAccess', share_group_type, info)
def _action(self, action, share_group_type, info, **kwargs):
"""Perform a share group type action."""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
share_group_type_id = common_base.getid(share_group_type)
url = RESOURCE_PATH_ACTION % share_group_type_id
return self.api.client.post(url, body=body)
| 2.03125 | 2 |
bin/dustmasker_interval_to_bed.py | mpieva/quicksand-build | 0 | 12766212 | from csv import reader, writer
import sys
def get_id(s):
#adapted from Biopython SeqIO fasta parser
return s[1:].split(None, 1)[0]
r = reader(sys.stdin, delimiter="\t")
w = writer(sys.stdout, delimiter="\t")
for row in r:
row[0] = get_id(row[0]) #only keep the Accession number (trim everything after first space)
row[2] = int(row[2]) + 1 #bed file 3rd field is 1-based
w.writerow(row)
| 2.671875 | 3 |
Pratica/exe10.py | Joao-Inacio/Curso-de-Python3 | 1 | 12766213 | def FizzBuzz(n):
if (n % 5) == 0 and (n % 3) == 0:
return f'{n} FizzBuzz'
if (n % 3) == 0:
return f'{n} é fizz'
if (n % 5) == 0:
return f'{n} buzz'
return n
print(FizzBuzz(25))
| 3.515625 | 4 |
python/showpreprocess.py | imistyrain/ssd-models | 17 | 12766214 | import os
import cv2
import numpy as np
import sys
caffe_root = os.path.expanduser('~') + "/CNN/ssd"
sys.path.insert(0, caffe_root+'/python')
import caffe
from tqdm import tqdm
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat','bottle', 'bus', 'car', 'cat',
'chair','cow', 'diningtable', 'dog', 'horse','motorbike', 'person',
'pottedplant','sheep', 'sofa', 'train', 'tvmonitor')
# color index please refer to https://zhuanlan.zhihu.com/p/102303256
colors = [[0,0,0], [128,0,0],[0,128,0],[128,128,0],[0,0,128],[128,0,128],
[0,0,128],[128,128,128], [64,0,0],[192,0,0],[64,128,0],
[192,128,0], [64,0,128], [192,0,128], [64,128,128], [192,128,128],
[0,64,0], [128,64,0], [0,192,0], [128,192,0],[0,64,128]]
outputdir="output/preproess"
def showpreprocess(blobs,i,show=False):
data = np.array(blobs['data'].data)
label = np.array(blobs['label'].data)
img = data[0].transpose(1,2,0).copy()
objs = label[0][0]
height, width,_ = img.shape
for obj in objs:
x = int(obj[3]*width)
y = int(obj[4]*height)
x2 = int(obj[5]*width)
y2 = int(obj[6]*height)
cls = int(obj[1])
cv2.rectangle(img,(x,y),(x2,y2),colors[cls])
cv2.putText(img,CLASSES[cls],(x,y),1,1,colors[cls])
if show:
cv2.imshow("img",img)
cv2.waitKey()
cv2.imwrite(outputdir+"/"+str(i)+".jpg",img)
def main(model="voc/MobileNetSSD_preprocess.prototxt",show=False):
net = caffe.Net(model, caffe.TRAIN)
for i in tqdm(range(20)):
blobs = net.forward()
showpreprocess(blobs,i)
if __name__=="__main__":
if not os.path.exists(outputdir):
os.makedirs(outputdir)
main() | 2.265625 | 2 |
BrainQuake/gui_forms/ictal_form.py | HongLabTHU/Brainquake | 16 | 12766215 | <filename>BrainQuake/gui_forms/ictal_form.py<gh_stars>10-100
#! /usr/bin/python3.7
# -- coding: utf-8 -- **
from PyQt5.QtWidgets import QApplication, QSizePolicy, QMessageBox, QWidget, \
QPushButton, QLineEdit, QDesktopWidget, QGridLayout, QFileDialog, QListWidget, QLabel,QFrame,QGroupBox
from PyQt5.QtCore import Qt, QThread
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import cm
from matplotlib.widgets import Slider
# classes
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=7, height=5, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_axes([0.05, 0.1, 0.9, 0.8])
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot()
def plot(self):
self.axes.cla()
self.draw()
class Ictal_gui(object):
def setupUi(self, IctalModule):
# main window
IctalModule.setObjectName("IctalModule")
self.setWindowTitle('Ictal Computation')
self.resize(1200, 600)
self.center()
self.setStyleSheet("background-color:lightgrey;")
self.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.gridlayout = QGridLayout()
# canvas
self.canvas = PlotCanvas(self, width=10, height=5)
self.leftPannelLayout=QGridLayout()
self.leftPannelLayout.addWidget(self.canvas,1,1,25,24)
self.gridlayout.addLayout(self.leftPannelLayout, 1, 1, 1,1)
self.canvas.fig.canvas.mpl_connect('button_press_event', self.canvas_press_button)
self.canvas.fig.canvas.mpl_connect('scroll_event', self.disp_scroll_mouse)
# self.gridlayout.setColumnStretch(25,1)
self.rightPannelLayout=QGridLayout()
self.gridlayout.addLayout(self.rightPannelLayout,1,2,1,1)
self.gridlayout.setColumnStretch(1,25)
self.gridlayout.setColumnStretch(2,4)
self.patient_label=QLabel(self)
self.patient_label.setText('patient')
self.rightPannelLayout.addWidget(self.patient_label,2,1,1,1)
# input data
self.lineedit_patient_name = QLineEdit(self)
self.lineedit_patient_name.setText('name')
self.lineedit_patient_name.setToolTip('please input the patient name')
self.lineedit_patient_name.setStyleSheet(
"QLineEdit{border-style:none;border-radius:5px;padding:5px;background-color:#ffffff}QLineEdit:focus{border:2px solid gray; }")
self.rightPannelLayout.addWidget(self.lineedit_patient_name, 2, 2, 1, 1)
self.button_inputedf = QPushButton('import .edf data', self)
self.button_inputedf.setToolTip('click to input data')
self.button_inputedf.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.rightPannelLayout.addWidget(self.button_inputedf, 1, 1, 1, 2)
self.button_inputedf.clicked.connect(self.dialog_inputedfdata)
self.adjust_frame = QGroupBox(self)
self.adjust_frame.setStyleSheet("QGroupBox{border: 2px solid gray; border-radius: 5px;background-color:lightgrey;}QGroupBox:title{subcontrol-origin: margin;subcontrol-position: top left;padding: 0 3px 0 3px;}")
self.adjust_frame.setTitle('Adjust signal')
self.rightPannelLayout.addWidget(self.adjust_frame, 5, 1, 5, 2)
self.adjust_frame_layout = QGridLayout()
self.delchn_frame = QGroupBox(self)
self.delchn_frame.setStyleSheet("QGroupBox{border: 2px solid gray; border-radius: 5px;background-color:lightgrey;}QGroupBox:title{subcontrol-origin: margin;subcontrol-position: top left;padding: 0 3px 0 3px;}")
self.delchn_frame.setTitle('Delete channels')
self.rightPannelLayout.addWidget(self.delchn_frame, 11, 1, 4, 2)
self.delchn_frame_layout = QGridLayout()
self.filter_frame = QGroupBox(self)
self.filter_frame.setStyleSheet(
"QGroupBox{border: 2px solid gray; border-radius: 5px;background-color:lightgrey;}QGroupBox:title{subcontrol-origin: margin;subcontrol-position: top left;padding: 0 3px 0 3px;}")
self.filter_frame.setTitle('Filter')
self.rightPannelLayout.addWidget(self.filter_frame, 16, 1, 2, 2)
self.filter_frame_layout = QGridLayout()
self.compu_frame = QGroupBox(self)
self.compu_frame.setStyleSheet("QGroupBox{border: 2px solid gray; border-radius: 5px;background-color:lightgrey;}QGroupBox:title{subcontrol-origin: margin;subcontrol-position: top left;padding: 0 3px 0 3px;}")
self.compu_frame.setTitle('Computation')
self.rightPannelLayout.addWidget(self.compu_frame, 20, 1, 4, 2)
self.compu_frame_layout = QGridLayout()
# win up down
self.dis_down = QPushButton('win down', self)
self.dis_down.setToolTip('roll window down')
self.dis_down.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_down, 1, 1)
self.dis_down.clicked.connect(self.disp_win_down_func) # change value & one common display func
self.dis_down.setEnabled(False)
self.dis_up = QPushButton('win up', self)
self.dis_up.setToolTip('roll window up')
self.dis_up.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_up, 1,2)
self.dis_up.clicked.connect(self.disp_win_up_func) # change value & one common display func
self.dis_up.setEnabled(False)
# channels num
self.dis_more_chans = QPushButton('chans+', self)
self.dis_more_chans.setToolTip('more channels')
self.dis_more_chans.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_more_chans, 2, 1)
self.dis_more_chans.clicked.connect(self.disp_more_chans_func) # change value & one common display func
self.dis_more_chans.setEnabled(False)
self.dis_less_chans = QPushButton('chans-', self)
self.dis_less_chans.setToolTip('less channels')
self.dis_less_chans.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_less_chans, 2, 2)
self.dis_less_chans.clicked.connect(self.disp_less_chans_func) # change value & one common display func
self.dis_less_chans.setEnabled(False)
# wave mag
self.dis_add_mag = QPushButton('wave+', self)
self.dis_add_mag.setToolTip('wave magnitude up')
self.dis_add_mag.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_add_mag, 3, 1)
self.dis_add_mag.clicked.connect(self.disp_add_mag_func) # change value & one common display func
self.dis_add_mag.setEnabled(False)
self.dis_drop_mag = QPushButton('wave-', self)
self.dis_drop_mag.setToolTip('wave magnitude down')
self.dis_drop_mag.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_drop_mag, 3, 2)
self.dis_drop_mag.clicked.connect(self.disp_drop_mag_func) # change value & one common display func
self.dis_drop_mag.setEnabled(False)
# win left right
self.dis_left = QPushButton('left', self)
self.dis_left.setToolTip('roll window left')
self.dis_left.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_left, 4, 1)
self.dis_left.clicked.connect(self.disp_win_left_func) # change value & one common display func
self.dis_left.setEnabled(False)
self.dis_right = QPushButton('right', self)
self.dis_right.setToolTip('roll window right')
self.dis_right.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_right, 4, 2)
self.dis_right.clicked.connect(self.disp_win_right_func) # change value & one common display func
self.dis_right.setEnabled(False)
# time scale
self.dis_shrink_time = QPushButton('shrink', self)
self.dis_shrink_time.setToolTip('shrink time scale')
self.dis_shrink_time.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_shrink_time, 5, 1)
self.dis_shrink_time.clicked.connect(self.disp_shrink_time_func) # change value & one common display func
self.dis_shrink_time.setEnabled(False)
self.dis_expand_time = QPushButton('expand', self)
self.dis_expand_time.setToolTip('expand time scale')
self.dis_expand_time.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.adjust_frame_layout.addWidget(self.dis_expand_time, 5, 2)
self.dis_expand_time.clicked.connect(self.disp_expand_time_func) # change value & one common display func
self.dis_expand_time.setEnabled(False)
# filter data
self.disp_filter_low = QLineEdit(self)
self.disp_filter_low.setText('60')
self.disp_filter_low.setToolTip('filter low boundary')
self.disp_filter_low.setStyleSheet(
"QLineEdit{border-radius:5px;padding:5px;background-color:#ffffff}QLineEdit:focus{border:2px solid gray;}")
self.filter_frame_layout.addWidget(self.disp_filter_low, 1, 1)
self.disp_filter_high = QLineEdit(self)
self.disp_filter_high.setText('140')
self.disp_filter_high.setToolTip('filter high boudary')
self.disp_filter_high.setStyleSheet(
"QLineEdit{border-radius:5px;padding:5px;background-color:#ffffff}QLineEdit:focus{border:2px solid gray;}")
self.filter_frame_layout.addWidget(self.disp_filter_high, 1, 2)
self.filter_button = QPushButton('bandpass filter', self)
self.filter_button.setToolTip('filter the data')
self.filter_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.filter_frame_layout.addWidget(self.filter_button, 2, 1, 1, 2)
self.filter_button.clicked.connect(self.filter_data)
self.filter_button.setEnabled(False)
# del channels
self.chans_list = QListWidget(self)
self.chans_list.setToolTip('choose chans to delete')
self.chans_list.setStyleSheet("border-radius:5px;padding:5px;background-color:#ffffff;")
self.chans_list.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff);
self.chans_list.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.delchn_frame_layout.addWidget(self.chans_list, 1, 1, 3, 2)
self.chans_del_button = QPushButton(self)
self.chans_del_button.setText('delete chans')
self.chans_del_button.setToolTip('delete channels')
self.chans_del_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.delchn_frame_layout.addWidget(self.chans_del_button, 4, 1, 1, 2)
self.chans_del_button.clicked.connect(self.delete_chans)
self.chans_del_button.setEnabled(False)
# reset data
self.reset_data_display = QPushButton('reset data', self)
self.reset_data_display.setToolTip('reset data')
self.reset_data_display.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:#333536;}QPushButton:hover{background-color:k;}")
self.rightPannelLayout.addWidget(self.reset_data_display, 18, 1,1,2)
self.reset_data_display.clicked.connect(self.reset_data_display_func)
self.reset_data_display.setEnabled(False)
# baseline time and target time selection
self.baseline_button = QPushButton(self)
self.baseline_button.setText('baseline')
self.baseline_button.setToolTip('choose baseline time')
self.baseline_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.compu_frame_layout.addWidget(self.baseline_button, 1, 1, 1, 1)
self.baseline_button.clicked.connect(self.choose_baseline)
self.baseline_button.setEnabled(False)
self.target_button = QPushButton(self)
self.target_button.setText('target')
self.target_button.setToolTip('choose target time')
self.target_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.compu_frame_layout.addWidget(self.target_button, 1, 2, 1, 1)
self.target_button.clicked.connect(self.choose_target)
self.target_button.setEnabled(False)
# ei
self.ei_button = QPushButton(self)
self.ei_button.setText('ei')
self.ei_button.setToolTip('compute epilepsy index')
self.ei_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.compu_frame_layout.addWidget(self.ei_button, 2, 1, 1, 2)
self.ei_button.clicked.connect(self.ei_computation_func)
self.ei_button.setEnabled(False)
# hfer
self.hfer_button = QPushButton(self)
self.hfer_button.setText('hfer')
self.hfer_button.setToolTip('compute high frequency energy ratio')
self.hfer_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.compu_frame_layout.addWidget(self.hfer_button, 3, 1, 1, 2)
self.hfer_button.clicked.connect(self.hfer_computation_func)
self.hfer_button.setEnabled(False)
# fullband
self.fullband_button = QPushButton(self)
self.fullband_button.setText('full band')
self.fullband_button.setToolTip('compute full band characteristic')
self.fullband_button.setStyleSheet(
"QPushButton{border-radius:5px;padding:5px;color:#ffffff;background-color:dimgrey;}QPushButton:hover{background-color:k;}")
self.compu_frame_layout.addWidget(self.fullband_button, 4, 1, 1, 2)
self.fullband_button.clicked.connect(self.fullband_computation_func)
self.fullband_button.setEnabled(False)
# show main window
self.adjust_frame.setLayout(self.adjust_frame_layout)
self.filter_frame.setLayout(self.filter_frame_layout)
self.delchn_frame.setLayout(self.delchn_frame_layout)
self.compu_frame.setLayout(self.compu_frame_layout)
self.setLayout(self.gridlayout)
self.show() | 2.15625 | 2 |
aliyun-python-sdk-csb/aliyunsdkcsb/request/v20171118/FindApproveServiceListRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 0 | 12766216 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class FindApproveServiceListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CSB', '2017-11-18', 'FindApproveServiceList','CSB')
self.set_protocol_type('https');
def get_projectName(self):
return self.get_query_params().get('projectName')
def set_projectName(self,projectName):
self.add_query_param('projectName',projectName)
def get_approveLevel(self):
return self.get_query_params().get('approveLevel')
def set_approveLevel(self,approveLevel):
self.add_query_param('approveLevel',approveLevel)
def get_showDelService(self):
return self.get_query_params().get('showDelService')
def set_showDelService(self,showDelService):
self.add_query_param('showDelService',showDelService)
def get_csbId(self):
return self.get_query_params().get('csbId')
def set_csbId(self,csbId):
self.add_query_param('csbId',csbId)
def get_alias(self):
return self.get_query_params().get('alias')
def set_alias(self,alias):
self.add_query_param('alias',alias)
def get_serviceName(self):
return self.get_query_params().get('serviceName')
def set_serviceName(self,serviceName):
self.add_query_param('serviceName',serviceName) | 1.75 | 2 |
ips/worker/management/commands/get_ips_from_coordinator.py | marklit/mass-ipv4-whois | 20 | 12766217 | from random import randint
from time import sleep
from urlparse import urlparse
from django.conf import settings
from django.core.management.base import BaseCommand
import netaddr
import redis
import requests
from ips.config import get_config
from worker.models import IPv4Whois
from worker.tasks import (whois_afrinic,
whois_apnic,
whois_arin,
whois_lacnic,
whois_ripencc)
def in_known_cidr_block(ip_address):
redis_con = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB)
cidrs = redis_con.get('cidrs')
if not cidrs or not len(cidrs):
return False
return len(netaddr.all_matching_cidrs(ip_address, cidrs.split(','))) > 0
class Command(BaseCommand):
help = 'Get IPs to lookup from coordinator'
def handle(self, *args, **options):
lookup = {
'rdap.afrinic.net': whois_afrinic,
'rdap.apnic.net': whois_apnic,
'rdap.arin.net': whois_arin,
'rdap.lacnic.net': whois_lacnic,
'rdap.db.ripe.net': whois_ripencc,
}
bootstrap_url = 'http://rdap.arin.net/bootstrap/ip/%s'
while True:
# get 10,000 IPs from coordinator
coordinator_endpoint = get_config('COORDINATOR_ENDPOINT')
if not coordinator_endpoint:
print 'Unable to get coordinator address, will try later'
sleep(randint(3, 15))
continue
# Make sure there is a Kafka endpoint to report results back to
# before continuing
kafka_host = get_config('KAFKA_HOST')
if not kafka_host:
print 'No Kafka host to report back to, will try later'
sleep(randint(3, 15))
continue
try:
resp = requests.get(coordinator_endpoint, timeout=120)
except Exception as exc:
print exc
print 'Sleeping for a bit to give the coordinator a break'
sleep(randint(3, 8))
continue
if resp.status_code != 200:
# Coordinator might no be up, try later.
print 'Got non-HTTP 200 back from coordinator, will try later'
sleep(5)
continue
if resp.text.strip().upper() == 'END':
# No more IPs to work with or the list hasn't finished
# generating yet
print 'No more IPs from coordinator, will check back later'
sleep(30)
continue
# Find the Registry for each IP
for ip in resp.text.split(','):
# validate ip here
_ip = IPv4Whois(address=ip)
_ip.save()
if in_known_cidr_block(ip):
_ip.status = IPv4Whois.STATUS_WITHIN_KNOWN_CIDR
_ip.save()
continue
try:
resp = requests.head(bootstrap_url % ip, timeout=10)
except Exception as exc:
print exc
_ip.status = IPv4Whois.STATUS_LOOKUP_REGISTRY_FAILED
_ip.save()
continue
if 'Location' not in resp.headers:
_ip.status = IPv4Whois.STATUS_LOOKUP_REGISTRY_FAILED
_ip.save()
continue
url = urlparse(resp.headers['Location'])
rdap_host = url.netloc.lower().strip()
if rdap_host in lookup:
_ip.status = IPv4Whois.STATUS_LOOKUP_REGISTRY_SUCCESS
_ip.save()
# Queue Registry-specific WHOIS lookup
lookup[rdap_host].delay(_ip.pk)
else:
_ip.status = IPv4Whois.STATUS_LOOKUP_REGISTRY_FAILED
_ip.save()
| 1.984375 | 2 |
civet/extraction/starting_models.py | FNNDSC/pycivet | 0 | 12766218 | """
Data from the `$MNI_DATAPATH/surface-extraction` directory.
"""
from civet.extraction.surfaces import RegularSurface
from civet.globals import MNI_DATAPATH
class SurfaceModel(RegularSurface['SurfaceModel']):
"""
Represents a surface data file from the `$MNI_DATAPATH/surface-extraction` directory.
"""
@classmethod
def get_model(cls, name: str) -> 'SurfaceModel':
return cls(MNI_DATAPATH / 'surface-extraction' / name)
WHITE_MODEL_320 = SurfaceModel.get_model('white_model_320.obj')
| 2.421875 | 2 |
Python3/1040-Moving-Stones-Until-Consecutive-II/soln.py | wyaadarsh/LeetCode-Solutions | 5 | 12766219 | class Solution:
def numMovesStonesII(self, stones: List[int]) -> List[int]:
def helper():
n = len(stones)
if (stones[-2] - stones[0] == n - 2 and stones[-1] - stones[-2] > 2) or (stones[-1] - stones[1] == n - 2 and stones[1] - stones[0] > 2):
return 2
dq = collections.deque()
ans = 0
for num in stones:
dq.append(num)
while dq[-1] - dq[0] + 1 > n:
dq.popleft()
ans = max(ans, len(dq))
return n - ans
stones.sort()
n = len(stones)
mx = 0
for i in range(n - 1):
a, b = stones[i], stones[i + 1]
mx += max(b - a - 1, 0)
mx -= max(min(stones[1] - stones[0] - 1, stones[-1] - stones[-2] - 1), 0)
return helper(), mx
| 2.796875 | 3 |
953_Verifying_an_Alien_Dictionary.py | joshlyman/Josh-LeetCode | 0 | 12766220 | <reponame>joshlyman/Josh-LeetCode<filename>953_Verifying_an_Alien_Dictionary.py
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
order_indx = {}
for idx,letter in enumerate(order):
order_indx[letter] = idx
# compare adjacent words
for i in range(len(words)-1):
word1 = words[i]
word2 = words[i+1]
# words that are the same can be skipped
if word1 == word2:
continue
# longer words, that start with the adjacent word, should not come first
if len(word1)>len(word2):
if word1.startswith(word2):
return False
# compare each character, it must be smaller or equal to that of the adjacent word
for k in range(min(len(word1),len(word2))):
if order_indx[word1[k]]<order_indx[word2[k]]:
break
elif order_indx[word1[k]]==order_indx[word2[k]]:
continue
else:
return False
return True
# Time: O(C): C is total content of words
# Space:O(1) | 3.34375 | 3 |
pynetlinux/util.py | youviewtv/pynetlinux | 69 | 12766221 | import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
binary_type = bytes
else:
binary_type = str
| 2.34375 | 2 |
controller/employment_controller.py | waynshang/stock_institution | 1 | 12766222 | import urllib3
import os
import sys
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
from request import get_employment_data
from datetime import date
from utils import get_label_data, getLogger
DEBUG = getLogger()
def main(label, commencement_date, end_date):
result = get_employment_data()
try:
value_within_date = get_label_data(result, label, commencement_date, end_date)
except Exception as error:
DEBUG.error("======Error======")
if 'msg' in result: DEBUG.info(result['msg'])
DEBUG.error("{}".format(error))
return {}
DEBUG.info(value_within_date)
total = 0
for value in value_within_date.values():
total += value
DEBUG.info(total)
if __name__ == '__main__':
try:
commencement_date = '2020-03-01'
end_date = '2021-05-01'
label = "c:36"
main(label, commencement_date, end_date)
except KeyboardInterrupt:
exit() | 3.0625 | 3 |
climateeconomics/sos_wrapping/sos_wrapping_emissions/ghgemissions/ghgemissions_discipline.py | os-climate/witness-core | 1 | 12766223 | <gh_stars>1-10
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from climateeconomics.core.core_witness.climateeco_discipline import ClimateEcoDiscipline
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import numpy as np
from climateeconomics.core.core_emissions.ghg_emissions_model import GHGEmissions
class GHGemissionsDiscipline(ClimateEcoDiscipline):
"GHGemissions discipline for DICE"
# ontology information
_ontology_data = {
'label': 'GHG Emission WITNESS Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': 'fas fa-smog fa-fw',
'version': '',
}
years = np.arange(2020, 2101)
name = 'GHGEmissions'
_maturity = 'Research'
# https://ghgprotocol.org/sites/default/files/ghgp/Global-Warming-Potential-Values%20%28Feb%2016%202016%29_1.pdf
# From IPCC AR5
GWP_100_default = {'CO2': 1.0,
'CH4': 28.,
'N2O': 265.}
GWP_20_default = {'CO2': 1.0,
'CH4': 85.,
'N2O': 265.}
DESC_IN = {
'year_start': ClimateEcoDiscipline.YEAR_START_DESC_IN,
'year_end': ClimateEcoDiscipline.YEAR_END_DESC_IN,
'time_step': ClimateEcoDiscipline.TIMESTEP_DESC_IN,
'GHG_global_warming_potential20': {'type': 'dict', 'unit': 'kgCO2eq/kg', 'default': GWP_20_default, 'visibility': ClimateEcoDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness', 'user_level': 3},
'GHG_global_warming_potential100': {'type': 'dict', 'unit': 'kgCO2eq/kg', 'default': GWP_100_default, 'visibility': ClimateEcoDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness', 'user_level': 3},
'CO2_land_emissions': {'type': 'dataframe', 'unit': 'GtCO2', 'visibility': ClimateEcoDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness'},
'CO2_indus_emissions_df': {'type': 'dataframe', 'unit': 'Gt', 'visibility': ClimateEcoDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness'},
'GHG_total_energy_emissions': {'type': 'dataframe', 'unit': 'Gt', 'visibility': ClimateEcoDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness'},
}
DESC_OUT = {
'co2_emissions_Gt': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_energy_mix', 'unit': 'Gt'},
'CO2_emissions_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness', 'unit': 'Gt'},
'GHG_emissions_detail_df': {'type': 'dataframe', 'unit': 'Gt'},
'GWP_emissions': {'type': 'dataframe', 'unit': 'GtCO2eq'}
}
def init_execution(self):
in_dict = self.get_sosdisc_inputs()
self.emissions_model = GHGEmissions(in_dict)
def run(self):
# Get inputs
inputs_dict = self.get_sosdisc_inputs()
self.emissions_model.configure_parameters_update(inputs_dict)
# Compute de emissions_model
self.emissions_model.compute()
# Store output data
co2_emissions_df = self.emissions_model.compute_co2_emissions_for_carbon_cycle()
dict_values = {'GHG_emissions_detail_df': self.emissions_model.ghg_emissions_df,
'co2_emissions_Gt': self.emissions_model.GHG_total_energy_emissions[['years', 'Total CO2 emissions']],
'CO2_emissions_df': co2_emissions_df,
'GWP_emissions': self.emissions_model.gwp_emissions}
self.store_sos_outputs_values(dict_values)
def compute_sos_jacobian(self):
"""
Compute jacobian for each coupling variable
gradient of coupling variable to compute:
co2_emissions_Gt
"""
inputs_dict = self.get_sosdisc_inputs()
years = np.arange(
inputs_dict['year_start'], inputs_dict['year_end'] + 1, inputs_dict['time_step'])
# land emissions
CO2_land_emissions = inputs_dict['CO2_land_emissions']
for column in CO2_land_emissions.columns:
if column != "years":
self.set_partial_derivative_for_other_types(
('CO2_emissions_df', 'total_emissions'), ('CO2_land_emissions', column), np.identity(len(years)))
self.set_partial_derivative_for_other_types(
('CO2_emissions_df', 'cum_total_emissions'), ('CO2_land_emissions', column), np.tril(np.ones((len(years), len(years)))))
self.set_partial_derivative_for_other_types(
('CO2_emissions_df', 'total_emissions'), ('CO2_indus_emissions_df', 'indus_emissions'), np.identity(len(years)))
self.set_partial_derivative_for_other_types(
('CO2_emissions_df', 'cum_total_emissions'), ('CO2_indus_emissions_df', 'indus_emissions'), np.tril(np.ones((len(years), len(years)))))
self.set_partial_derivative_for_other_types(
('CO2_emissions_df', 'total_emissions'), ('GHG_total_energy_emissions', 'Total CO2 emissions'), np.identity(len(years)))
self.set_partial_derivative_for_other_types(
('CO2_emissions_df', 'cum_total_emissions'), ('GHG_total_energy_emissions', 'Total CO2 emissions'), np.tril(np.ones((len(years), len(years)))))
self.set_partial_derivative_for_other_types(
('co2_emissions_Gt', 'Total CO2 emissions'), ('GHG_total_energy_emissions', 'Total CO2 emissions'), np.identity(len(years)))
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['GHG emissions per sector', 'Global Warming Potential']
#chart_list = ['sectoral energy carbon emissions cumulated']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
charts = []
if chart_filters is not None:
for chart_filter in chart_filters:
charts = chart_filter.selected_values
if 'GHG emissions per sector' in charts:
for ghg in GHGEmissions.GHG_TYPE_LIST:
new_chart = self.get_chart_emission_per_sector(ghg)
if new_chart is not None:
instanciated_charts.append(new_chart)
if 'Global Warming Potential' in charts:
for gwp_year in [20, 100]:
new_chart = self.get_chart_gwp(gwp_year)
if new_chart is not None:
instanciated_charts.append(new_chart)
return instanciated_charts
def get_chart_gwp(self, gwp_year):
GWP_emissions = self.get_sosdisc_outputs(
'GWP_emissions')
chart_name = f'Global warming potential at {gwp_year} years'
new_chart = TwoAxesInstanciatedChart(
'years', 'GWP [GtCO2]', chart_name=chart_name, stacked_bar=True)
for ghg in GHGEmissions.GHG_TYPE_LIST:
new_serie = InstanciatedSeries(list(GWP_emissions['years'].values), list(GWP_emissions[f'{ghg}_{gwp_year}'].values),
ghg, 'bar')
new_chart.series.append(new_serie)
return new_chart
def get_chart_emission_per_sector(self, ghg):
GHG_emissions_detail_df = self.get_sosdisc_outputs(
'GHG_emissions_detail_df')
chart_name = f'{ghg} emissions per sector'
new_chart = TwoAxesInstanciatedChart(
'years', f'{ghg} Emissions [Gt]', chart_name=chart_name, stacked_bar=True)
sector_list = ['energy', 'land', 'industry']
for sector in sector_list:
if f'{ghg} {sector}_emissions' in GHG_emissions_detail_df:
new_serie = InstanciatedSeries(list(GHG_emissions_detail_df['years'].values), list(GHG_emissions_detail_df[f'{ghg} {sector}_emissions'].values),
sector, 'bar')
new_chart.series.append(new_serie)
return new_chart
| 1.625 | 2 |
core/lineFollower.py | magikerwin1993/Line-Thinning-Python | 4 | 12766224 | <reponame>magikerwin1993/Line-Thinning-Python
import os
import cv2
import numpy as np
import matplotlib as mpl
import logging
from core.process import searchObject, getWindow, getPerimeterInfos, getEdgePoints, getPointers, getSkeletonPoint, updateData, connectPoints
from core.utils import plotPoint, plotWindow
class LineFollower():
def __init__(self, d=2, black_text=True):
self.d = d
self.black_text = black_text
self._history = {
"img_src" : None,
"img_dst" : None,
"pointers" : [],
"skeleton" : [],
}
def process(self, img_src):
if not self.black_text: img_src = 255 - img_src
img_dst = self._pipeline(img_src)
if not self.black_text: img_dst = 255 - img_dst
return img_dst
def _pipeline(self, img_src):
input_h, input_w = img_src.shape[:2]
img_dst = np.ones_like(img_src, np.uint8) * 255
data = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
data = np.clip(255 - data, 0, 1)
pointers_total = []
skeleton_total = []
pointers_pending = []
skeleton_pending = []
process = False
while True:
if not process:
point_encounter = searchObject(data)
if point_encounter is None:
logging.info('No object found! [Done]')
break
else:
logging.info('Encounter object!')
process = True
pointers = [np.array(point_encounter), np.array(point_encounter)]
pointers_total.append(pointers)
skeleton_total.append(point_encounter)
else:
W = getWindow(pointers_total[-1][:2], input_w, input_h, self.d)
points_edge = getEdgePoints(data, W)
pointers = getPointers(points_edge)
updateData(data, W)
skeleton_total.append(getSkeletonPoint(W))
## check
if pointers is None:
logging.info('End-of-Line!')
pointers_total.append([])
if len(pointers_pending) == 0:
logging.info('This object done!')
process = False
else:
logging.info("Back to pending branch")
pointers_total.append(pointers_pending.pop())
skeleton_total.append(skeleton_pending.pop())
continue
elif len(pointers) == 2:
logging.info('Single-Branch')
pointers_total.append(pointers)
else:
logging.info('Multi-Branch')
pointers_total.append(pointers)
pointers_pending.append(pointers[2:])
skeleton_pending.append(skeleton_total[-1])
# export skeleton
connectPoints(img_dst, skeleton_total[-2], skeleton_total[-1])
# update history
self._history["img_src"] = img_src
self._history["img_dst"] = img_dst
self._history["pointers"] = pointers_total
self._history["skeleton"] = skeleton_total
return img_dst
def display_history(self, limit_steps=5, export_directory_path=None):
if len(self._history["skeleton"]) == 0:
print("No history found!")
return
img_process = self._history["img_src"].copy()
img_skeleton = np.ones_like(img_process, np.uint8) * 255
input_h, input_w = img_process.shape[:2]
cols = 2
rows = len(self._history["pointers"])
fig = plt.figure(figsize=(cols*3, rows*3))
for time_id, pointers in enumerate(self._history["pointers"]):
if time_id == limit_steps: break
# 1. display processing
ax1 = plt.subplot(rows, cols, cols * time_id + 1)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
if len(pointers) > 0:
# plot current pointers
plotPoint(pointers[0], 'red')
plotPoint(pointers[1], 'red')
# plot window
W = getWindow(pointers[:2], input_w, input_h, self.d)
plotWindow(W)
# plot next pointers
if time_id != (len(self._history["pointers"]) - 1):
for point_id, point in enumerate(self._history["pointers"][time_id+1]):
if point_id < 2:
plotPoint(point, 'green')
else:
plotPoint(point, 'blue')
# clear window
img_process[W[1]:W[3]+1, W[0]:W[2]+1] = 255
plt.imshow((img_process*0.7 + img.copy()*0.3).astype(np.uint8), cmap='gray')
plt.grid()
plt.xticks(np.arange(0.5, input_w, step=1))
plt.yticks(np.arange(0.5, input_h, step=1))
plt.title("Processing")
plt.text(-input_w/2, input_h/2, f"T={time_id}", fontsize=15)
# display output skeleton
ax2 = plt.subplot(rows, cols, cols * time_id + 2)
ax2.set_xticklabels([])
ax2.set_yticklabels([])
if len(pointers) > 0:
connectPoints(img_skeleton, self._history["skeleton"][time_id], self._history["skeleton"][time_id+1])
plt.imshow(img_skeleton, cmap='gray')
plt.grid()
plt.xticks(np.arange(0.5, input_w, step=1))
plt.yticks(np.arange(0.5, input_h, step=1))
plt.title("Skeleton")
# Save display image in specified directory
if export_directory_path is not None:
if os.path.exists(export_directory_path):
assert(0), f"the export directory already exists! \"{export_directory_path}\""
else:
os.makedirs(export_directory_path)
extent1 = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
extent2 = ax2.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
extent = mpl.transforms.Bbox(np.array([extent1.get_points()[0],
extent2.get_points()[1]]))
plt.savefig(os.path.join(export_directory_path, f'{time_id:05d}.png'),
bbox_inches=extent.expanded(1.4, 1.3))
| 2.5625 | 3 |
test_update_status_groups.py | MoveOnOrg/reach-groups | 0 | 12766225 | import pytest
from _pytest.monkeypatch import MonkeyPatch
from update_status_groups import update_status_groups
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class Test():
monkeypatch = MonkeyPatch()
existing_groups = []
# Mock API request for token.
def mock_get_token(self, args):
return 'mock token'
# Mock API request for groups.
def mock_get_groups(self, args):
return {'user_groups': self.existing_groups}
# Mock query results for members by state.
def mock_get_psql_results(self, args):
return [{'state': 'CO', 'member_ids': 'mock,member,ids'}]
# Mock API request to update group.
def mock_update_group(self, args):
return {'name': 'updated %s' % args.GROUP_ID, 'member_ids': args.MEMBER_IDS}
# Mock API request to create group.
def mock_create_group(self, args):
return {'name': 'created %s' % args.GROUP_NAME, 'member_ids': args.MEMBER_IDS}
def test_update_status_groups(self):
Test.monkeypatch.setattr("update_status_groups.get_token", self.mock_get_token)
Test.monkeypatch.setattr("update_status_groups.get_groups", self.mock_get_groups)
Test.monkeypatch.setattr("update_status_groups.get_psql_results", self.mock_get_psql_results)
Test.monkeypatch.setattr("update_status_groups.update_group", self.mock_update_group)
Test.monkeypatch.setattr("update_status_groups.create_group", self.mock_create_group)
# All args are mocked, but still required.
args = {
'DB_HOST': 'mock',
'DB_PORT': 'mock',
'DB_USER': 'mock',
'DB_PASS': '<PASSWORD>',
'DB_NAME': 'mock',
'REACH_API_USER': 'mock',
'REACH_API_PASS': 'mock',
'STATUS_NAME': 'mock',
'DB_QUERY': 'mock'
}
args = Struct(**args)
# Test create.
self.existing_groups = []
result = update_status_groups(args)
assert result == {
'created': [
{'name': 'created CO: mock', 'member_ids': 'mock,member,ids'}
],
'updated': []
}
# Test update.
self.existing_groups = [{'name': 'CO: mock', 'id': 'existing-group-id'}]
result = update_status_groups(args)
assert result == {
'created': [],
'updated': [
{'name': 'updated existing-group-id', 'member_ids': 'mock,member,ids'}
]
}
| 2.390625 | 2 |
luxon/utils/pkg.py | HieronymusCrouse/luxon | 7 | 12766226 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 <NAME> <<EMAIL>>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import importlib.util
from pkg_resources import (resource_stream, resource_listdir,
resource_isdir, resource_exists,
iter_entry_points)
from luxon.utils.singleton import NamedSingleton
from luxon.utils.imports import import_module
from luxon.utils.files import mkdir, is_dir
from luxon.utils.files import exists as f_exists
from luxon.exceptions import NotFoundError
from luxon.core.logger import GetLogger
log = GetLogger(__name__)
class EntryPoints(metaclass=NamedSingleton):
def __init__(self, name):
self.named_objects = {}
for entry_point in iter_entry_points(group=name):
self.named_objects.update({entry_point.name: entry_point.load()})
def __getattr__(self, name):
try:
return self.named_objects[name]
except KeyError:
raise AttributeError(name) from None
def __getitem__(self, name):
try:
return self.named_objects[name]
except KeyError:
raise NotFoundError("Entry Point '%s' not found"
% name) from None
def __iter__(self):
return iter(self.named_objects)
class Module(object):
"""Imports installed module and performs actions on it
Args:
module (str): Module import path definition.
"""
def __init__(self, module):
try:
import_module(module)
self._module = module
except ImportError:
raise ImportError(module) from None
def exists(self, path, error=False):
"""Does the resource exist
Args:
path (str): resource location
"""
try:
val = resource_exists(self._module, path)
if val is False and error is True:
raise FileNotFoundError('%s/%s' % (self._module,
path,)) from None
return val
except ImportError:
raise ImportError(self._module) from None
def read(self, path):
"""Returns resource as a string
Args:
path (str): resource location
"""
try:
self.exists(path, True)
with resource_stream(self._module,
path) as res:
return res.read()
except ImportError:
raise ImportError(self._module) from None
def file(self, path):
"""Returns resource as a string
Args:
path (str): resource location
"""
try:
self.exists(path, True)
return resource_stream(self._module,
path)
except ImportError:
raise ImportError(self._module) from None
def list(self, path):
"""List directories in the resource
Args:
path (str): resource location
"""
try:
self.exists(path, True)
return resource_listdir(self._module, path)
except ImportError:
raise ImportError(self._module) from None
def is_dir(self, path):
"""Is the resource a directory
Args:
path (str): resource location
"""
try:
self.exists(path, True)
return resource_isdir(self._module, path)
except ImportError:
raise ImportError(self._module) from None
def is_file(self, path):
"""Is the resource a file
Args:
path (str): resource location
"""
try:
self.exists(path, True)
if resource_isdir(self._module, path):
return False
except ImportError:
raise ImportError(self._module) from None
def walk(self, path):
"""Walk through the resource
Args:
path (str): resource location
"""
try:
self.exists(path, True)
def _walk(real_path, walk_path):
files = []
directory = self.list(real_path)
for f in directory:
file_path = real_path.rstrip('/') + '/' + f
file_walk_path = walk_path.rstrip('/') + '/' + f
if self.is_dir(file_path):
files.append(file_walk_path)
files += _walk(file_path, file_walk_path)
else:
files.append(file_walk_path)
return files
return _walk(path, '')
except ImportError:
raise ImportError(self._module) from None
def copy(self, src, dst, new_extension=None):
"""Copy the resource
Args:
src (str): resource location
dst (str): destination
"""
try:
self.exists(src, True)
if self.is_dir(src):
mkdir(dst, recursive=True)
real_src = src
walk_files = self.walk(real_src)
for walk_file in walk_files:
real_src = src.rstrip('/') + '/' + walk_file.strip('/')
real_dst = dst.rstrip('/') + '/' + walk_file.strip('/')
if self.is_dir(real_src):
mkdir(real_dst, recursive=True)
else:
content = self.read(real_src)
if new_extension is not None and f_exists(real_dst):
real_dst += "." + new_extension.strip('.')
print("Copy file %s:%s to %s" % (self._module,
real_src,
real_dst,))
with open(real_dst, 'wb') as new_file:
new_file.write(content)
else:
content = self.read(src)
src_file = src.strip('/').split('/')[-1]
if is_dir(dst):
dst = dst.rstrip('/') + '/' + src_file
if new_extension is not None and f_exists(dst):
dst += "." + new_extension.strip('.')
print("Copy file %s:/%s to %s" % (self._module, src, dst,))
with open(dst, 'wb') as new_file:
new_file.write(content)
except ImportError:
raise ImportError(self._module) from None
def exists(package):
if importlib.util.find_spec(package) is None:
return False
return True
| 1.320313 | 1 |
bert/modeling.py | alclone94/KorRoBERTa | 0 | 12766227 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>PORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
logger = logging.getLogger(__name__)
BERT_CONFIG_NAME = 'bert_config.json'
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
embedding_size=128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
layer_norm_eps=1e-12):
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.LayerNorm = BertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return attention_probs, context_layer
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertAttention, self).__init__()
self.output_attentions = output_attentions
self.self = BertSelfAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
attentions, self_output = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return attentions, attention_output
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertLayer, self).__init__()
self.output_attentions = output_attentions
self.attention = BertAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
attentions, attention_output = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return attentions, layer_output
return layer_output
'''
class BertEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertEncoder, self).__init__()
self.config = config
self.output_attentions = output_attentions
self.layer = BertLayer(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
#self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if config.embedding_size != config.hidden_size:
self.embedding_to_hidden = nn.Linear(config.embedding_size, config.hidden_size)
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
if self.config.embedding_size != self.config.hidden_size: # embedding to hidden
hidden_states = self.embedding_to_hidden(hidden_states)
all_encoder_layers = []
all_attentions = []
for i in range(self.config.num_hidden_layers) :
hidden_states = self.layer(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
attentions, hidden_states = hidden_states
all_attentions.append(attentions)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return all_attentions, all_encoder_layers
return all_encoder_layers
'''
class BertEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertEncoder, self).__init__()
self.config = config
self.output_attentions = output_attentions
layer = BertLayer(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if config.embedding_size != config.hidden_size:
self.embedding_to_hidden = nn.Linear(config.embedding_size, config.hidden_size)
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
if self.config.embedding_size != self.config.hidden_size: # embedding to hidden
hidden_states = self.embedding_to_hidden(hidden_states)
all_encoder_layers = []
all_attentions = []
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
attentions, hidden_states = hidden_states
all_attentions.append(attentions)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return all_attentions, all_encoder_layers
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, model_path, *inputs, **kwargs):
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
config = BertConfig.from_json_file(os.path.join(model_path, BERT_CONFIG_NAME))
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertModel, self).__init__(config)
self.output_attentions = output_attentions
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.apply(self.init_bert_weights)
def prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
""" Gather all multi-head outputs.
Return: list (layers) of multihead module outputs with gradients
"""
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, input_ids, attention_mask=None, output_all_encoded_layers=True, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
#extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
#extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
head_mask=head_mask)
if self.output_attentions:
all_attentions, encoded_layers = encoded_layers
sequence_output = encoded_layers[-1]
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if self.output_attentions:
return all_attentions, encoded_layers
return encoded_layers
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForPreTraining, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, attention_mask=None, masked_lm_labels=None, head_mask=None):
outputs = self.bert(input_ids, attention_mask,
output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output = outputs
else:
sequence_output = outputs
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None :
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
elif self.output_attentions:
return all_attentions, prediction_scores
return prediction_scores | 2.015625 | 2 |
src/cms/menus/migrations/0007_alter_navigationbar_name.py | UniversitaDellaCalabria/uniCMS | 6 | 12766228 | # Generated by Django 3.2.3 on 2021-05-21 12:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsmenus', '0006_auto_20210507_1618'),
]
operations = [
migrations.AlterField(
model_name='navigationbar',
name='name',
field=models.CharField(max_length=255),
),
]
| 1.523438 | 2 |
source/framework/python/test/to_numpy_test.py | pcraster/lue | 1 | 12766229 | <gh_stars>1-10
import lue.framework as lfr
import lue_test
import numpy as np
def setUpModule():
lue_test.start_hpx_runtime()
def tearDownModule():
return
lue_test.stop_hpx_runtime()
class ToNumPyTest(lue_test.TestCase):
@lue_test.framework_test_case
def test_array(self):
array_shape = (60, 40)
partition_shape = (10, 10)
dtype = np.dtype(np.int32)
fill_value = 5
array = lfr.create_array(array_shape, partition_shape, dtype, fill_value)
numpy_array = lfr.to_numpy(array)
self.assertEqual(numpy_array.dtype, dtype)
np.testing.assert_array_equal(numpy_array, np.full(array_shape, fill_value, dtype=dtype))
@lue_test.framework_test_case
def test_mark_no_data(self):
array_shape = (60, 40)
partition_shape = (10, 10)
dtype = np.dtype(np.int32)
# Create array containing only no-data elements
array = lfr.where(lfr.create_array(array_shape, partition_shape, dtype, 5) != 5, 7)
numpy_array = lfr.to_numpy(array, 9)
np.testing.assert_array_equal(numpy_array, np.full(array_shape, 9, dtype=dtype))
@lue_test.framework_test_case
def test_small_array(self):
array_shape = (1, 1)
partition_shape = (1, 1)
dtype = np.dtype(np.int32)
fill_value = 5
array = lfr.create_array(array_shape, partition_shape, dtype, fill_value)
numpy_array = lfr.to_numpy(array)
self.assertEqual(numpy_array.dtype, dtype)
np.testing.assert_array_equal(numpy_array, np.full(array_shape, fill_value, dtype=dtype))
| 2.078125 | 2 |
tasks/docker.py | andrewl/datadog-agent | 0 | 12766230 | <filename>tasks/docker.py
"""
Docker related tasks
"""
import os
import shutil
import sys
import tempfile
import time
from invoke import task
from invoke.exceptions import Exit
from .dogstatsd import DOGSTATSD_TAG
def retry_run(ctx, *args, **kwargs):
remaining_retries = 5
while True:
warn = True
if remaining_retries == 0:
warn = False
r = ctx.run(*args, warn=warn, **kwargs)
if r.ok:
return r
# Pause between retries. Hope it helps.
time.sleep(5)
remaining_retries -= 1
return r
@task
def test(ctx):
"""
Run docker tests
"""
ctx.run("python3 ./Dockerfiles/agent/secrets-helper/test_readsecret.py")
@task
def integration_tests(ctx, skip_image_build=False, skip_build=False, python_command="python3"):
"""
Run docker integration tests
"""
if not skip_image_build:
# postpone the import otherwise `image_build` will be added to the docker
# namespace
from .dogstatsd import image_build
image_build(ctx, skip_build=skip_build)
print("Starting docker integration tests")
env = {"DOCKER_IMAGE": DOGSTATSD_TAG}
ctx.run("{} ./test/integration/docker/dsd_listening.py".format(python_command), env=env)
@task
def dockerize_test(ctx, binary, skip_cleanup=False):
"""
Run a go test in a remote docker environment and pipe its output to stdout.
Host and target systems must be identical (test is built on the host).
"""
import docker
client = docker.from_env()
temp_folder = tempfile.mkdtemp(prefix="ddtest-")
ctx.run("cp %s %s/test.bin" % (binary, temp_folder))
with open("%s/Dockerfile" % temp_folder, 'w') as stream:
stream.write(
"""FROM docker/compose:debian-1.28.3
ENV DOCKER_DD_AGENT=yes
WORKDIR /
CMD /test.bin
COPY test.bin /test.bin
"""
)
# Handle optional testdata folder
if os.path.isdir("./testdata"):
ctx.run("cp -R testdata %s" % temp_folder)
stream.write("COPY testdata /testdata")
test_image, _ = client.images.build(path=temp_folder, rm=True)
scratch_volume = client.volumes.create()
test_container = client.containers.run(
test_image.id,
detach=True,
pid_mode="host", # For origin detection
environment=["SCRATCH_VOLUME_NAME=" + scratch_volume.name, "SCRATCH_VOLUME_PATH=/tmp/scratch"],
volumes={
'/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'mode': 'ro'},
'/proc': {'bind': '/host/proc', 'mode': 'ro'},
'/sys/fs/cgroup': {'bind': '/host/sys/fs/cgroup', 'mode': 'ro'},
scratch_volume.name: {'bind': '/tmp/scratch', 'mode': 'rw'},
},
)
exit_code = test_container.wait()['StatusCode']
print(test_container.logs(stdout=True, stderr=False, stream=False))
sys.stderr.write(test_container.logs(stdout=False, stderr=True, stream=False).decode(sys.stderr.encoding))
if not skip_cleanup:
shutil.rmtree(temp_folder)
test_container.remove(v=True, force=True)
scratch_volume.remove(force=True)
client.images.remove(test_image.id)
if exit_code != 0:
raise Exit(code=exit_code)
@task
def delete(ctx, org, image, tag, token):
print("Deleting {org}/{image}:{tag}".format(org=org, image=image, tag=tag))
ctx.run(
"curl 'https://hub.docker.com/v2/repositories/{org}/{image}/tags/{tag}/' -X DELETE -H 'Authorization: JWT {token}' &>/dev/null".format(
org=org, image=image, tag=tag, token=token
)
)
@task
def pull_base_images(ctx, dockerfile, signed_pull=True):
"""
Pulls the base images for a given Dockerfile, with
content trust enabled by default, to ensure the base
images are signed
"""
images = set()
stages = set()
with open(dockerfile, "r") as f:
for line in f:
words = line.split()
# Get source images
if len(words) < 2 or words[0].lower() != "from":
continue
images.add(words[1])
# Get stage names to remove them from pull
if len(words) < 4 or words[2].lower() != "as":
continue
stages.add(words[3])
if stages:
print("Ignoring intermediate stage names: {}".format(", ".join(stages)))
images -= stages
print("Pulling following base images: {}".format(", ".join(images)))
pull_env = {}
if signed_pull:
pull_env["DOCKER_CONTENT_TRUST"] = "1"
for i in images:
ctx.run("docker pull {}".format(i), env=pull_env)
| 2.359375 | 2 |
Update_database.py | jmeliodas96/Updated-Data-Bases-Over-The-Air | 0 | 12766231 | <gh_stars>0
# Script for get information from www.mcc-mnc.com
# This Script compare two list, the first list for store the external content and second list store the local content from database(table lista)
# After this is load in DataFrames for process and compare every row.
#By <NAME>
# dependences to install
import re
import urllib2
import json
import pandas as pd
import sys
import os
import subprocess
import sched
import time
import datetime
import numpy as np
import psycopg2
from pprint import pprint
import csv
# scheduler show the date when begin the script
scheduler = sched.scheduler(time.time, time.sleep)
# postgresql credentials
DBNAME = "deviceinfo"
USER = "Jimmy"
HOST = "localhost"
PASSWORD = "<PASSWORD>"
FILE = "data.csv"
# Web Scraping for get information
def getCarrierInfo():
# expression regular for find content between <td>' '</td>
td_re = re.compile('<td>([^<]*)</td>'*6)
html = urllib2.urlopen('http://mcc-mnc.com/').read()
# tbody_start begin the ran on <tbody></tbody>
tbody_start = False
# dictionary
mcc_mnc_list = []
# account for generate id
i = 0
# for every line in the document html, make a split('\n')
for line in html.split('\n'):
if '<tbody>' in line:
tbody_start = True
elif '</tbody>' in line:
break
# elif tbody_start is true
elif tbody_start:
i = i + 1
# find into the line in td
td_search = td_re.search(line)
# list temp
current_item = {}
# for ran the line and find the mnc,mcc,iso[1:2:3]
td_search = td_re.split(line)
# generate key_carrier(mcc,mnc)
one = current_item['mcc'] = td_search[1]
two = current_item['mnc'] = td_search[2]
current_item['iso'] = td_search[3]
current_item['country'] = td_search[4]
current_item['countryc'] = td_search[5]
current_item['network'] = td_search[6][0:-1]
current_item['key_carrier'] = one + two
current_item['id'] = i
# insert
mcc_mnc_list.append(current_item)
# generate out ext.json
with open('ext.json', 'w') as file:
json.dump(mcc_mnc_list, file, indent=2)
# get carrier_list from table lista into database deviceinfo
def getCarrierList():
os.putenv('PGPASSWORD', '{0}'.format(PASSWORD))
command = 'psql -U {2} -d {1} -h {0} -p 5432 -t -A -c "COPY (select * from lista) TO STDOUT CSV HEADER" > lista.csv'.format(HOST,DBNAME,USER)
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.readlines()
carrier_list = pd.read_csv('lista.csv')
unless = ['id', 'mccint', 'mncint']
for item in unless:
del(carrier_list[item])
return carrier_list
# get external list from local directory, file external.json
def getExtList():
ext_list = pd.read_json('ext.json',orient='records')
unless = ['id','key_carrier']
for item in unless:
del(ext_list[item])
return ext_list
# get external list from local directory, file external.json
def getNewList():
new_list = pd.read_json('new_data_update.json',orient='records')
return new_list
def parser_data():
# start script date
print('Updating data start : ', str(datetime.datetime.now()))
# calling functions for get data
getCarrierInfo()
external_list = getExtList()
local_list = getCarrierList()
#sorting list before load in dataframe
external_list = external_list[['mcc','mnc','iso','country','countryc','network']]
local_list = local_list[['mcc','mnc','iso','country','countryc','network']]
# choose the column into dataframes
df1 = pd.DataFrame(external_list,columns=['mcc','mnc','iso','country','countryc','network'])
df2 = pd.DataFrame(local_list,columns=['mcc','mnc','iso','country','countryc','network'])
# replace elements for values 0 into holl empty
df1 = df1.fillna(0)
df2 = df2.fillna(0)
# columns to convert at integer
colsdf1 = ['mcc','mnc']
colsdf2 = ['mcc','mnc']
# convert column mcc and mnc to base int64
df2[colsdf2] = df2[colsdf2].dropna().apply(np.int64)
df1[colsdf1] = df1[colsdf1].dropna().apply(np.int64)
# this is the total number of index in the table lista from database
total_index_df1 = (len(df1))
total_index_df2 = (len(df2))
# printing two dataframes
print df1
print '-----------------------------------------------------------'
print df2
print '-----------------------------------------------------------'
# dictionarys for store the new data updated
data_update = []
# accounter for get result
rigth = 0
wrong = 0
empty_count = 0
i = 0
#over iterate on data frame 1
for index_a, row_a in df1.iterrows():
current_not_exist = {}
print '--------------'
print 'DataFrame One : ',index_a, row_a['mcc']
print '--------------'
#over iterate on data frame 2
for index_b, row_b in df2.iterrows():
print '--------------'
print 'DataFrame Two : ', index_b, row_b['mcc']
print (row_a['mcc'],row_a['mnc'])
print '--------------'
print (row_b['mcc'],row_b['mnc'])
# if key_carrier(mcc,mnc) in df1 exist into df2, so increment the account rigth
if(df1['mcc'].values[index_a] == df2['mcc'].values[index_b]) and (df1['mnc'].values[index_a] == df2['mnc'].values[index_b]):
# counter for get the rigth conditions is true
print 'ok, this exist '
rigth = rigth + 1
#elif key_carrier(mcc,mnc) in df2 is empty, so increment the account empty_count
elif((df2['mcc'].values[index_b] == {} and df2['mnc'].values[index_b] == {}).any()):
print 'empty'
empty_count = empty_count + 1
# else, the key_carrier(mcc,mnc) in df1 doesn't exist in df2, get the values of index and insert the new values in df
elif(index_b >= total_index_df2):
print 'this index in df2 does not exist'
break
else:
print 'no ok'
wrong = wrong + 1
# end of function parser_data()
#-----------------------------------------------------------------------------------------
print '::::::::::::::::::::::::::::::::::::::::::::'
print '------ Total amount extracted from mcc-mnc.com ---------> : ', total_index_df1
print '::::::::::::::::::::::::::::::::::::::::::::'
print '::::::::::::::::::::::::::::::::::::::::::::'
print '------ Total amount extracted from Data Base ---------> : ', total_index_df2
print '::::::::::::::::::::::::::::::::::::::::::::'
if(rigth == total_index_df1):
print 'All logs are rigth, its dont need update, total rigth : ', rigth
# if the amount found is < that total logs, so print the amount
elif(rigth < total_index_df1):
print 'values rigth : ', rigth
total = total_index_df1 - rigth
print 'The total logs that needs insert in table lista are : ', total
for index_a,row_a in df1.iterrows():
current_item = {}
# if rigth is menor to index_a, rigth content the total rigth, index_a start from index_a = 0, here begin get values and insert the new data
if(rigth < index_a) :
# get values
mcc = df1['mcc'].values[index_a]
mnc = df1['mnc'].values[index_a]
network = df1['network'].values[index_a]
iso = df1['iso'].values[index_a]
country = df1['country'].values[index_a]
countryc = df1['countryc'].values[index_a]
# insert
current_item['mcc'] = mcc
current_item['mnc'] = mnc
current_item['network'] = network
current_item['iso'] = iso
current_item['country'] = country
current_item['countryc'] = countryc
data_update.append(current_item)
else:
i = i + 1
else:
print '-------l--------'
# generate a new_data_update.json with content of data_update[]
with open('new_data_update.json', 'w') as file:
json.dump(data_update, file, indent=2)
# begin the Conecction at database
status_connection = True
try:
conn = psycopg2.connect("dbname='"+ DBNAME +"' user='"+ USER +"' host='"+ HOST +"' password='"+ PASSWORD +"'")
except psycopg2.DataError as e:
status_connection = False
logger.error("Check the parameter to connect to postgresql!! -> %s",e.pgerror)
if status_connection :
print 'Conecction successful'
cursor = conn.cursor()
# getting new values for table
new_list = getNewList()
new_list = new_list[['mcc','mnc','iso','country','countryc','network']]
df3 = pd.DataFrame(new_list, columns=['mcc', 'mnc', 'iso', 'country', 'countryc', 'network'])
# generate a csv file
df3.to_csv('data.csv', header=True, encoding='utf-8', index=False)
# inserting
os.putenv('PGPASSWORD', '{0}'.format(PASSWORD))
command0 = 'psql -U {0} -d {1} -h {2} -p 5432 -t -A -c "\COPY lista(mcc,mnc,iso,country,countryc,network) FROM \'/home/kousei/Task/Last_update/other/{3}\' DELIMITER \',\' CSV HEADER"'.format(USER,DBNAME,HOST,FILE)
subprocess.Popen(command0, shell=True, stdout=subprocess.PIPE).stdout.readlines()
print 'The update is finish!!! Good day :)!'
else:
print('No connect')
print '::::::::::::::::::::::::::::::::::::::::::::'
print 'The total amount key_carrier founds : ', rigth, ' in DF1 --->> wrong amount :: ', wrong
print '::::::::::::::::::::::::::::::::::::::::::::'
print 'The total key_carriers ran in DF2 are :: ', total_index_df2, ' row , of which -> ', ((wrong) / (total_index_df1)),' were incorrect in DF1(external content) and ', rigth ,' were correct in DF1(external content)--> of a total of -->> ', total_index_df1, ' row in DF1'
# in the case that : field mcc and mnc is empty, count show a how many are empty
print 'The total key_carries empty are : ', empty_count
print ' : ', data_update
if(__name__ == '__main__'):
scheduler.enter(0, 1, parser_data, ())
scheduler.run()
| 3 | 3 |
kryptobot/markets/market_simulator.py | eristoddle/Kryptobot | 24 | 12766232 | from .market import Market
from . import position
from ..db.models import TradingOrder
import logging
logger = logging.getLogger(__name__)
class MarketSimulator(Market):
"""Wrapper for market that allows simulating simple buys and sells"""
def __init__(self, exchange, base_currency, quote_currency, quote_currency_balance, strategy):
super().__init__(exchange, base_currency, quote_currency, strategy)
self.starting_balance = quote_currency_balance
self.quote_balance = quote_currency_balance
self.base_balance = 0
self.simulating = False
def __del__(self):
self.session.close()
def add_session(self, session):
self.session = session()
def limit_buy(self, quantity, price):
if self.quote_balance >= quantity * price:
self.quote_balance = self.quote_balance - quantity * price
self.base_balance = self.base_balance + quantity
order = TradingOrder(
exchange=self.exchange.id,
strategy_id= self.strategy.strategy_id,
run_key=self.strategy.run_key,
pair=self.analysis_pair,
position='buy',
amount=quantity,
price=price,
simulated="simulated"
)
self.session.add(order)
self.session.commit()
logger.info("Executed buy simulation of " + str(quantity) + " " + self.base_currency + " for " + str(price) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation buy")
def limit_sell(self, quantity, price):
if self.base_balance >= quantity:
self.base_balance = self.base_balance - quantity
self.quote_balance = self.quote_balance + quantity * price
order = TradingOrder(
exchange=self.exchange.id,
strategy_id= self.strategy.strategy_id,
run_key=self.strategy.run_key,
pair=self.analysis_pair,
position='sell',
amount=quantity,
price=price,
simulated="simulated"
)
self.session.add(order)
self.session.commit()
logger.info("Executed sell simulation of " + str(quantity) + " " + self.base_currency + " for " + str(price) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation sell")
def market_buy(self, quantity):
if self.quote_balance >= quantity * self.get_ask_price():
self.quote_balance = self.quote_balance - quantity * self.get_ask_price()
self.base_balance = self.base_balance + quantity
logger.info("Executed buy simulation of " + str(quantity) + " " + self.base_currency + " for " + str(self.get_ask_price()) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation buy")
def market_sell(self, quantity):
if self.base_balance >= quantity:
self.base_balance = self.base_balance - quantity
self.quote_balance = self.quote_balance + quantity * self.get_bid_price()
logger.info("Executed sell simulation of " + str(quantity) + " " + self.base_currency + " for " + str(self.get_bid_price()) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation sell")
def get_ask_price(self):
"""Get ask price for simulation"""
if not self.simulating:
"""if operating on live data, use actual ask"""
return self.exchange.fetchTicker(self.analysis_pair)['ask']
else:
"""if operating on historical data, use close"""
return self.latest_candle['5m'][4]
def get_bid_price(self):
if not self.simulating:
"""if operating on live data, use actual ask"""
return self.exchange.fetchTicker(self.analysis_pair)['bid']
else:
"""if operating on historical data, use close"""
return self.latest_candle['5m'][4]
def get_wallet_balance(self):
return self.quote_balance
def open_long_position_simulation(market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent):
"""Create simulated long position"""
# logger.info("Opening simulated long position")
position = LongPositionSimulator(market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent)
position.open()
return position
def open_short_position_simulation(market, amount, price):
"""Create simulated short position"""
logger.info("Opening simulated short position")
position = ShortPositionSimulator(market, amount, price)
position.open()
return position
# TODO: %m interval also hardcoded here, search the project for 5m
class LongPositionSimulator(position.LongPosition):
"""Simulated long position. Overrides the functionality of creating an actual order to use the MarketSimulators balance and calculations"""
def __init__(self, market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent):
super().__init__(market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent)
# TODO: 5m interval is hard coded here
def liquidate_position(self):
"""Will use this method to actually create the order that liquidates the position"""
logger.info("Closing simulated long position")
open_short_position_simulation(self.market, self.amount, self.market.latest_candle['5m'][3])
self.is_open = False
def open(self):
self.market.limit_buy(self.amount, self.price)
self.is_open = True
def update(self, sell=False):
"""Use this method to trigger position to check if profit target has been met, and re-set trailiing stop loss"""
# logger.info("UPDATING LONG POSITION")
if self.market.latest_candle['5m'][3] < self.trailing_stoploss or \
self.market.latest_candle['5m'][3] < self.fixed_stoploss or \
self.market.latest_candle['5m'][3] >= self.profit_target or \
sell is True: # check price against last calculated trailing stoploss
self.liquidate_position()
# re-calculate trailing stoploss
self.trailing_stoploss = self.calculate_trailing_stoploss()
class ShortPositionSimulator(position.ShortPosition):
"""Simulated short position. Overrides the functionality of creating an actual order to use the MarketSimulators balance and calculations"""
def __init__(self, market, amount, price):
super().__init__(market, amount, price)
def open(self):
self.market.limit_sell(self.amount, self.price)
| 3.0625 | 3 |
tests/test_1_eng.py | KepAlex-404/Raspoznovalka | 0 | 12766233 | <reponame>KepAlex-404/Raspoznovalka
from src.sentiment import sentiment
"""positive rew"""
text = """
nice hotel expensive parking got good deal stay hotel anniversary, arrived late evening took advice previous reviews
did valet parking, check quick easy, little disappointed non-existent view room room clean nice size, bed comfortable
woke stiff neck high pillows, not soundproof like heard music room night morning loud bangs doors opening closing hear
people talking hallway, maybe just noisy neighbors,
aveda bath products nice, did not goldfish stay nice touch taken advantage staying longer, location great walking
distance shopping, overall nice experience having pay 40 parking night
"""
def test_positive():
print("Test 1 - ", sentiment(text, '../src/algos'))
return sentiment(text, '../src/algos')[0]
| 2.34375 | 2 |
tests/test_merge_genesets.py | pathwayforte/PathwayForte | 10 | 12766234 | <reponame>pathwayforte/PathwayForte<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Test merge gene set."""
import logging
import os
import unittest
from collections import Counter
import pandas as pd
from pathway_forte.constants import DATA, check_gmt_files
from pathway_forte.mappings import get_mapping_dict, load_compath_mapping_dfs
logger = logging.getLogger(__name__)
# Mappings linked with duplicate WikiPathways/KEGG representation need to be skipped since they one pathway maps to
# multiple mappings
BLACK_LIST = {
'R-HSA-5683057', 'WP623', 'WP61', 'WP3845', 'WP366', 'R-HSA-9006936', 'R-HSA-2028269', 'WP75', 'WP3858',
'WP2586', 'WP2873', 'R-HSA-174403', 'WP100', 'WP4506', 'R-HSA-196819', 'WP4297', 'R-HSA-1430728', 'R-HSA-163210',
'R-HSA-1430728', 'hsa00190', 'hsa04010', 'WP382', 'WP422', 'hsa04350', 'WP560', 'hsa04390', 'hsa04392'
}
class TestMergeGmt(unittest.TestCase):
"""Test merged gene set file."""
@unittest.skipUnless(os.path.exists(DATA), 'Only run if data folder exists')
def test_gmt_file(self):
"""Check concordance of pathways in merged gene set file and equivalent pathway mappings dictionary."""
kegg_reactome_df, kegg_wikipathways_df, wikipathways_reactome_df, special_mappings_df = load_compath_mapping_dfs()
equivalent_mappings_dict = get_mapping_dict(
pd.concat([kegg_reactome_df, kegg_wikipathways_df, wikipathways_reactome_df]),
'equivalentTo'
)
_, _, _, merge_gene_set = check_gmt_files()
with open(merge_gene_set) as file:
# Get the two first cells in each row (pathway ids and resources)
pathway_tuples = [
line.split('\t')[0:2]
for line in file
]
# Zip ids and resources to a common
pathways_mapped = [
list(zip(resources.split('|'), pathway_ids.split('|')))
for pathway_ids, resources in pathway_tuples
]
for pathways in pathways_mapped:
if 1 == len(pathways):
# If the pathway doesnt have a mapping in the GMT file but it is in the equivalent mapping dict
# RAISE ERROR
if pathways[0] in equivalent_mappings_dict:
raise ValueError(f'{pathways} should have a mapping')
continue
copy_pathway = pathways
for index, (resource, pathway) in enumerate(pathways):
# Skip multiduplicate pathways
if pathway in BLACK_LIST:
continue
mapping_pathways_in_iteration = copy_pathway.copy()
mapping_pathways_in_iteration.pop(index)
real_mappings = equivalent_mappings_dict[(resource, pathway)]
self.assertEqual(set(mapping_pathways_in_iteration), set(real_mappings))
counter = Counter(
pathway
for pathways in pathways_mapped
for resource, pathway in pathways
)
# All pathways should be only present once. Check for duplicates
for pathway, count in counter.items():
self.assertEqual(count, 1)
| 2.21875 | 2 |
components/geniuswrapper.py | JLiekenbrock/lyrics-visualiser | 0 | 12766235 | import lyricsgenius
# wrapper for lyrics which remembers last search
# if there are several providers implemented this class should be inherited
class geniuslyrics:
"""
Class for searching for lyrics
"""
def __init__(self,_token = "<KEY>",_timeout=15,_retries=3,_verbose=False):
self.__session = lyricsgenius.Genius(_token,timeout=_timeout,retries=_retries,verbose=_verbose)
self.__artistname = None
self.__artistinstance = None
self.__titlename = None
self.__titleinstance = None
self.__lyrics = None
def get_session(self):
return self.__session
def search_artist(self,artist):
if(artist is not self.__artistname):
self.__artistname = artist
self.__artistinstance = self.__session.search_artist(artist, max_songs=1)
def get_artist(self):
return self.__artistname
def get_artistinstance(self):
return self.__artistinstance
def search_title(self,title):
if title is not self.__titlename and self.__artistinstance is not None:
self.__titlename = title
self.__titleinstance = self.__artistinstance.song(title)
self.__set_lyrics()
else:
self.__titleinstance = None
self.__set_lyrics()
def get_title(self):
return self.__titlename
def __set_lyrics(self):
if self.__titleinstance is not None:
self.__lyrics = self.__titleinstance.lyrics
else:
self.__lyrics = None
def get_lyrics(self):
return self.__lyrics
def search_lyrics(self,title,artist):
self.search_artist(artist)
self.search_title(title)
return self.get_lyrics() | 2.90625 | 3 |
gluon/tests/test_utils.py | lightcoder127/Web2py | 2 | 12766236 | <filename>gluon/tests/test_utils.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit tests for utils.py """
import unittest
from fix_path import fix_sys_path
fix_sys_path(__file__)
from utils import md5_hash
class TestUtils(unittest.TestCase):
""" Tests the utils.py module """
def test_md5_hash(self):
""" Tests the md5_hash function """
data = md5_hash("web2py rocks")
self.assertEqual(data, '79509f3246a2824dee64635303e99204')
class TestPack(unittest.TestCase):
""" Tests the compileapp.py module """
def test_compile(self):
from compileapp import compile_application, remove_compiled_application
from gluon.fileutils import w2p_pack, w2p_unpack
import os
#apps = ['welcome', 'admin', 'examples']
apps = ['welcome']
for appname in apps:
appname_path = os.path.join(os.getcwd(), 'applications', appname)
compile_application(appname_path)
remove_compiled_application(appname_path)
test_path = os.path.join(os.getcwd(), "%s.w2p" % appname)
unpack_path = os.path.join(os.getcwd(), 'unpack', appname)
w2p_pack(test_path, appname_path, compiled=True, filenames=None)
w2p_pack(test_path, appname_path, compiled=False, filenames=None)
w2p_unpack(test_path, unpack_path)
return
if __name__ == '__main__':
unittest.main()
| 2.515625 | 3 |
input_mapping.py | qbrc-cnap/generic_microsatellite_identification | 0 | 12766237 | <gh_stars>0
#! /usr/bin/python3
from base.models import Resource
import os
def map_inputs(user, unmapped_data, id_list):
'''
`user` is a User instance (or subclass). This gives us
the option of applying user-specific logic to the mapping.
Since the code that calls this function does NOT know
the structure of the input data, it cannot impose any logic
such as filtering Resource objects for a particular user.
Therefore we have to keep that information here
`unmapped_data` is some data structure sent by
the frontend. The structure is known to the
developer since they specified the input element responsible
for creating the data. For example, a file chooser will send
a list/array of primary keys.
`id_list` is a list of WDL input "names"/ids that we are mapping
to. Note that the ordering is important. Make sure the logic below
matches the order in gui.json
'''
r1_suffix = '_R1.fastq.gz'
r2_suffix = '_R2.fastq.gz'
r1_path_list = []
r2_path_list = []
for pk in unmapped_data:
r = Resource.objects.get(pk=pk)
if (r.owner == user) or (user.is_staff):
if r.path.endswith(r1_suffix):
r1_path_list.append(r.path)
elif r.path.endswith(r2_suffix):
r2_path_list.append(r.path)
else:
print('Skipping %s' % r.path)
else:
raise Exception('The user %s is not the owner of Resource with primary key %s.' % (user, pk))
# now we have a list of files that had the correct naming scheme.
# Need to check for pairing:
r1_samples = [os.path.basename(x)[:-len(r1_suffix)] for x in r1_path_list]
r2_samples = [os.path.basename(x)[:-len(r2_suffix)] for x in r2_path_list]
r1_dict = dict(zip(r1_samples, r1_path_list))
r2_dict = dict(zip(r2_samples, r2_path_list))
sample_intersection = set(r1_samples).intersection(r2_samples)
# now have the samples that have both R1 and R2. Create the final map
final_r1_list = []
final_r2_list = []
for s in sample_intersection:
final_r1_list.append(r1_dict[s])
final_r2_list.append(r2_dict[s])
return {id_list[0]:final_r1_list, id_list[1]:final_r2_list}
| 2.84375 | 3 |
checkov/cloudformation/checks/resource/aws/IAMRoleAllowsPublicAssume.py | Devocean8-Official/checkov | 1 | 12766238 | <gh_stars>1-10
import json
from typing import List
from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckResult, CheckCategories
class IAMRoleAllowsPublicAssume(BaseResourceCheck):
def __init__(self):
name = "Ensure IAM role allows only specific services or principals to assume it"
id = "CKV_AWS_60"
supported_resources = ['AWS::IAM::Role']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
self.evaluated_keys = ["Properties/AssumeRolePolicyDocument/Statement"]
if 'Properties' in conf:
properties = conf['Properties']
if 'AssumeRolePolicyDocument' in properties:
assume_role_policy_doc = properties['AssumeRolePolicyDocument']
if isinstance(assume_role_policy_doc, str):
assume_role_policy_doc = json.loads(assume_role_policy_doc)
if 'Statement' in assume_role_policy_doc:
statements = assume_role_policy_doc['Statement']
if isinstance(statements, list):
for statement_index, statement in enumerate(statements):
if 'Effect' in statement:
if statement['Effect'] == "Deny":
continue
if 'Principal' in statement:
principal = statement['Principal']
if 'AWS' in principal:
aws_principals = principal['AWS']
if aws_principals == "*":
self.evaluated_keys = [f"Properties/AssumeRolePolicyDocument/Statement/[{statement_index}]/Principal/AWS"]
return CheckResult.FAILED
if isinstance(aws_principals, list):
for principal_index, principal in enumerate(aws_principals):
if principal == "*":
self.evaluated_keys = [
f"Properties/AssumeRolePolicyDocument/Statement/[{statement_index}]/Principal/[{principal_index}]/AWS"]
return CheckResult.FAILED
return CheckResult.PASSED
check = IAMRoleAllowsPublicAssume()
| 2.203125 | 2 |
wsgi.py | wnormandin/pokeyapi | 0 | 12766239 | <filename>wsgi.py
from api import api
if __name__=="__main__":
api.run()
| 1.328125 | 1 |
aries_cloudcontroller/model/revoke_request.py | didx-xyz/aries-cloudcontroller-pyton | 5 | 12766240 | <filename>aries_cloudcontroller/model/revoke_request.py
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional, Union, Literal # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator, Field, Extra # noqa: F401
class RevokeRequest(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
RevokeRequest - a model defined in OpenAPI
cred_ex_id: Credential exchange identifier [Optional].
cred_rev_id: Credential revocation identifier [Optional].
publish: (True) publish revocation to ledger immediately, or (default, False) mark it pending [Optional].
rev_reg_id: Revocation registry identifier [Optional].
"""
cred_ex_id: Optional[str] = None
cred_rev_id: Optional[str] = None
publish: Optional[bool] = None
rev_reg_id: Optional[str] = None
def __init__(
self,
*,
cred_ex_id: Optional[str] = None,
cred_rev_id: Optional[str] = None,
publish: Optional[bool] = None,
rev_reg_id: Optional[str] = None,
**kwargs,
):
super().__init__(
cred_ex_id=cred_ex_id,
cred_rev_id=cred_rev_id,
publish=publish,
rev_reg_id=rev_reg_id,
**kwargs,
)
@validator("cred_ex_id")
def cred_ex_id_pattern(cls, value):
# Property is optional
if value is None:
return
pattern = r"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}"
if not re.match(pattern, value):
raise ValueError(
f"Value of cred_ex_id does not match regex pattern ('{pattern}')"
)
return value
@validator("cred_rev_id")
def cred_rev_id_pattern(cls, value):
# Property is optional
if value is None:
return
pattern = r"^[1-9][0-9]*$"
if not re.match(pattern, value):
raise ValueError(
f"Value of cred_rev_id does not match regex pattern ('{pattern}')"
)
return value
@validator("rev_reg_id")
def rev_reg_id_pattern(cls, value):
# Property is optional
if value is None:
return
pattern = r"^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):4:([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+))(:.+)?:CL_ACCUM:(.+$)"
if not re.match(pattern, value):
raise ValueError(
f"Value of rev_reg_id does not match regex pattern ('{pattern}')"
)
return value
class Config:
allow_population_by_field_name = True
RevokeRequest.update_forward_refs()
| 2.265625 | 2 |
pelicanconf.py | feltnerm/blog | 0 | 12766241 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = '<NAME>'
SITENAME = "<NAME>'s Weblog"
SITENAME_SHORT = "<NAME>'s Weblog"
EMAIL = '<EMAIL>'
SITEURL = ''
DESCRIPTION = "The thoughts and writings of <NAME>."
THEME = 'theme/feltnerm-pelican-theme'
DEVELOP = True
PATH = 'content'
TIMEZONE = 'America/Chicago'
READERS = {
'html': None
}
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
STATIC_PATHS = [
'static/CNAME',
'static/robots.txt',
'static/hackers.txt',
'static/humans.txt'
]
EXTRA_PATH_METADATA = {
'static/CNAME': { 'path': 'CNAME' },
'static/robots.txt': { 'path': 'robots.txt' },
'static/hackers.txt': { 'path': 'hackers.txt' },
'static/humans.txt': { 'path': 'humans.txt' }
}
REPO_HOME = 'https://github.com/feltnerm/blog'
TWITTER_USERNAME = 'feltnermj'
GITHUB_USERNAME = 'feltnerm'
LASTFM_USERNAME = 'plugitin'
FACEBOOK_USERNAME = 'feltnerm'
ANALYTICS = {
'GOOGLE': 'UA-45806952-1'
}
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}/index.html'
ARCHIVES_SAVE_AS = 'posts/index.html'
YEAR_ARCHIVE_SAVE_AS='posts/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS='posts/{date:%Y}/{date:%m}/index.html'
DAY_ARCHIVE_SAVE_AS='posts/{date:%Y}/{date:%m}/{date:%d}/index.html'
AUTHORS_SAVE_AS=''
CATEGORY_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
MARKDOWN = {
'extension_configs': {
# 'markdown.extensions.codehilite': {'css_class': 'highlight'},
'markdown.extensions.smarty': {},
'markdown.extensions.sane_lists': {},
# "proxy" for `markdown.extensions.extra``
'pymdownx.extra': {},
# emphasis that is more like GFM
'pymdownx.betterem': {},
# use emoji shortcodes
'pymdownx.emoji': {},
# code highlighting
'pymdownx.highlight': {},
'pymdownx.inlinehilite': {},
'pymdownx.superfences': {},
# turn markdown links into ... links
'pymdownx.magiclink': {},
# strict parsing of headers
'pymdownx.saneheaders': {},
# fancy symbols
'pymdownx.smartsymbols': {},
# @todo: where did this extension go?!
# 'markdown.extensions.headerid': {},
},
'output_format': 'html5',
}
| 1.53125 | 2 |
data/aligned_dataset.py | OnizukaLab/pytorch-CycleGAN-and-pix2pix | 0 | 12766242 | import os.path
import pickle
import random
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_numbering_dataset
import numpy as np
from PIL import Image
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = [
e[1] for e in sorted(make_numbering_dataset(self.dir_AB, opt.max_dataset_size), key=lambda idx: idx[0])]
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
with open(opt.captions, 'rb') as f:
x = pickle.load(f)
train_captions, test_captions = x[0], x[1]
self.captions = train_captions if opt.phase == "train" else test_captions
self.ixtoword, self.wordtoix = x[2], x[3]
del x, train_captions, test_captions
self.n_words = len(self.ixtoword)
print('Load from: ', opt.captions)
self.captions_per_image = opt.captions_per_image
self.text_words_num = opt.text_words_num
def get_caption(self, sent_ix):
# a list of indices for a sentence
sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')
if (sent_caption == 0).sum() > 0:
print('ERROR: do not need END (0) token', sent_caption)
num_words = len(sent_caption)
# pad with 0s (i.e., '<end>')
x = np.zeros(self.text_words_num, dtype='int64')
x_len = num_words
if num_words <= self.text_words_num:
x[:num_words] = sent_caption
else:
ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum
np.random.shuffle(ix)
ix = ix[:self.text_words_num]
ix = np.sort(ix)
x = sent_caption[ix]
x_len = self.text_words_num
return x, x_len
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
# split AB image into A and B
w, h = AB.size
if w > h:
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
else:
A = AB
B = AB
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
caption_idx = self.captions_per_image * index + random.randint(0, self.captions_per_image - 1)
caption, caption_len = self.get_caption(caption_idx)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path,
"caption": caption, "caption_len": caption_len}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
| 2.59375 | 3 |
test/test_converters.py | kjulian3/NNet | 29 | 12766243 | <gh_stars>10-100
import unittest
import sys
sys.path.append('..')
import numpy as np
import onnx
from NNet.converters.nnet2onnx import nnet2onnx
from NNet.converters.onnx2nnet import onnx2nnet
from NNet.converters.pb2nnet import pb2nnet
from NNet.converters.nnet2pb import nnet2pb
import onnxruntime
from NNet.python.nnet import *
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import graph_util
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
class TestConverters(unittest.TestCase):
def test_onnx(self):
### Options###
nnetFile = "nnet/TestNetwork.nnet"
testInput = np.array([1.0,1.0,1.0,100.0,1.0]).astype(np.float32)
##############
# Convert NNET to ONNX and save ONNX network to given file
# Adapt network weights and biases so that no input or output normalization is required to evaluate network
onnxFile = nnetFile[:-4]+"onnx"
nnet2onnx(nnetFile,onnxFile=onnxFile,normalizeNetwork=True)
# Convert ONNX back to NNET and save NNET network
# Note that unless input mins and maxes are specified, the minimum and maximum floating point values will be written
nnetFile2 = nnetFile[:-4]+"v2.nnet"
onnx2nnet(onnxFile,nnetFile=nnetFile2)
## Test that the networks are equivalent
# Load models
nnet = NNet(nnetFile)
sess = onnxruntime.InferenceSession(onnxFile)
nnet2 = NNet(nnetFile2)
# Evaluate ONNX
onnxInputName = sess.get_inputs()[0].name
onnxOutputName = sess.get_outputs()[0].name
onnxEval = sess.run([onnxOutputName],{onnxInputName: testInput})[0]
# Evaluate Original NNET
inBounds = np.all(testInput>=nnet.mins) and np.all(testInput<=nnet.maxes)
self.assertTrue(inBounds)
nnetEval = nnet.evaluate_network(testInput)
# Evaluate New NNET
inBounds = np.all(testInput>=nnet2.mins) and np.all(testInput<=nnet2.maxes)
self.assertTrue(inBounds)
nnetEval2 = nnet2.evaluate_network(testInput)
percChangeONNX = max(abs((nnetEval-onnxEval)/nnetEval))*100.0
percChangeNNet = max(abs((nnetEval-nnetEval2)/nnetEval))*100.0
# Evaluation should not change
self.assertTrue(percChangeONNX<1e-3)
self.assertTrue(percChangeNNet<1e-3)
def test_pb(self):
### Options###
nnetFile = "nnet/TestNetwork.nnet"
testInput = np.array([1.0,1.0,1.0,100.0,1.0]).astype(np.float32)
##############
# Convert NNET to tensorflow and save tensorflow network to given file as a frozen protocol buffer file
# Adapt network weights and biases so that no input or output normalization is required to evaluate network
pbFile = nnetFile[:-4]+"pb"
nnet2pb(nnetFile,pbFile=pbFile,normalizeNetwork=True)
# Convert tensorflow pb back to NNET and save NNET network
# Note that unless input mins and maxes are specified, the minimum and maximum floating point values will be written
nnetFile2 = nnetFile[:-4]+"v2.nnet"
pb2nnet(pbFile,nnetFile=nnetFile2)
## Test that the networks are equivalent
# Load models
nnet = NNet(nnetFile)
nnet2 = NNet(nnetFile2)
### Read protobuf file and begin session ###
with tf.gfile.GFile(pbFile, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
sess = tf.Session(graph=graph)
placeholders = [x for x in sess.graph.get_operations() if x.node_def.op == 'Placeholder']
self.assertTrue(len(placeholders)==1)
inputName = placeholders[0].name
outputName = sess.graph.get_operations()[-1].name
# Evaluate tensorflow
pbEval = sess.run(outputName+":0",{inputName+":0": testInput.reshape((1,5))})[0]
# Evaluate Original NNET
inBounds = np.all(testInput>=nnet.mins) and np.all(testInput<=nnet.maxes)
self.assertTrue(inBounds)
nnetEval = nnet.evaluate_network(testInput)
# Evaluate New NNET
inBounds = np.all(testInput>=nnet2.mins) and np.all(testInput<=nnet2.maxes)
self.assertTrue(inBounds)
nnetEval2 = nnet2.evaluate_network(testInput)
percChangePB = max(abs((nnetEval-pbEval)/nnetEval))*100.0
percChangeNNet = max(abs((nnetEval-nnetEval2)/nnetEval))*100.0
# Evaluation should not change
self.assertTrue(percChangePB<1e-3)
self.assertTrue(percChangeNNet<1e-3)
if __name__ == '__main__':
unittest.main()
| 2.25 | 2 |
proxy.py | RusherRG/Scalable-Proxy-Server | 2 | 12766244 | <filename>proxy.py
import random
import requests
import threading
import string
letters = string.ascii_letters + string.digits
def send_request():
url = "https://webhook.site/9d71f794-c6f4-4ee6-a97a-dd66d56007d9"
method = ['GET', 'POST'][random.randint(0, 1)]
if method == 'GET':
params = {
'url': url,
'param1': ''.join([letters[random.randint(0, len(letters)-1)]
for i in range(5)]),
'param2': ''.join([letters[random.randint(0, len(letters)-1)]
for i in range(5)])
}
response = requests.get('http://127.0.0.1:5000', params=params)
else:
data = {
'url': url,
'name': ''.join([letters[random.randint(0, len(letters)-1)]
for i in range(5)]),
'title': ''.join([letters[random.randint(0, len(letters)-1)]
for i in range(5)])
}
response = requests.post('http://127.0.0.1:5000', data=data)
print(response.text)
if __name__ == "__main__":
for _ in range(1):
threads = []
for i in range(20):
thread = threading.Thread(target=send_request)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
| 2.796875 | 3 |
examples/examples.py | tsbischof/fda510k | 0 | 12766245 | import networkx
import fda
# Da Vinci robotic system
regulatory_graph = networkx.DiGraph()
regulatory_graph.add_node(fda.empty)
seeds = [fda.FDAApproval("K173585"),
fda.FDAApproval("K081113")]
for seed in seeds:
fda.populate_predicates(regulatory_graph, seed)
for seed in seeds:
subgraph = fda.networkx_to_graphviz(
fda.get_subgraph(regulatory_graph, seed))
subgraph.body = list(filter(lambda edge: "000000" not in edge,
subgraph.body))
subgraph.render(seed.id)
| 2.359375 | 2 |
app/routes.py | ceciCoding/Queerevent | 0 | 12766246 | <filename>app/routes.py<gh_stars>0
import os
from app import app, db
from flask import Flask, render_template, request, session, url_for, redirect, flash, get_flashed_messages, jsonify, make_response
from flask_mail import Mail, Message
from app.models import User, Event, user_favorites
from tempfile import mkdtemp
from flask_login import LoginManager, current_user, login_user, logout_user, login_required, UserMixin
from flask_sqlalchemy import SQLAlchemy
from app.forms import LoginForm, RegistrationForm, EditForm
from werkzeug.urls import url_parse
from datetime import datetime
import geocoder
from app.helpers import decode_image, checkFavorites, set_events
EVENT_TYPES = ["Physical", "Online"]
PERIODICITIES = ["One time", "Recurring"]
#routes
@app.route("/", methods=['GET', 'POST'])
def home():
if current_user.is_anonymous:
return render_template("landing.html")
else:
#search bar logic
q = request.args.get("q")
if q:
events = Event.query.filter((Event.name.contains(q)))
else:
events = Event.query.all()
for event in events:
if checkFavorites(event):
event.fan = True
if event.img:
event.image = decode_image(event.img)
return render_template("home.html", title="Find Events", events=events, search=q)
#this one is just to handle not falling into the landing page over and over
@app.route("/index", methods=['GET', 'POST'])
def index():
q = request.args.get("q")
if q:
events = Event.query.filter((Event.name.contains(q)))
else:
events = Event.query.all()
for event in events:
if event.img:
event.image = decode_image(event.img)
return render_template("home.html", title="Find Events", events=events, search=q)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
if user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('home'))
else:
flash("Invalid password")
return render_template('login.html', form=form)
else:
flash("Invalid username")
return render_template('login.html', form=form)
return render_template('login.html', form=form)
@app.route("/create-account", methods=["GET", "POST"])
def create():
form = RegistrationForm()
if form.validate_on_submit():
new_user = User(name=form.name.data, email=form.email.data)
new_user.set_password(form.password.data)
db.session.add(new_user)
db.session.commit()
return redirect(url_for('login'))
return render_template("create-account.html", form=form)
@app.route("/logout")
@login_required
def logout():
logout_user();
return redirect(url_for('index'))
@app.route("/new", methods=["GET", "POST"])
@login_required
def new_event():
if request.method == 'GET':
return render_template("new.html")
else:
#done this way and not with wtforms to preserve cool data binding in JS
r = request.form
img = request.files['img']
name = r.get("name")
event_type = r.get("type")
periodicity = r.get("periodicity")
if periodicity != 'Recurring':
date = r.get("date")
date = datetime.strptime(date, '%Y-%m-%d')
else:
date = None
period = r.get("period")
location = r.get("location")
starting_time = r.get("starting")
link = r.get("link")
organizer = r.get("organizer")
organizer_web = r.get("web")
description = r.get("description")
#check for errors in the selects
if event_type not in EVENT_TYPES or periodicity not in PERIODICITIES:
return render_template("new.html")
#commit to the database
event = Event(
name=name,
event_type=event_type,
recurrence=periodicity,
periodicity=period,
location=location,
starting_time=starting_time,
link=link,
organizer=organizer,
organizer_web=organizer_web,
description=description,
img=img.read(),
user_id=current_user.get_id())
if date:
event.date = date
db.session.add(event)
db.session.commit()
return redirect(url_for('event', id=event.id))
@app.route("/event/<id>", methods=["GET", "POST"])
def event(id):
event = Event.query.filter_by(id=id).first()
if request.method == "GET":
if current_user.is_authenticated:
user_is_fan = checkFavorites(event)
else:
user_is_fan = False
img = decode_image(event.img)
if event.location:
address = geocoder.google(event.location)
coordinates = address.latlng
else:
coordinates = None
return render_template("event.html", event=event, img=img, user_is_fan=user_is_fan, coordinates=coordinates)
else:
db.session.delete(event)
db.session.commit()
return redirect(url_for("home"))
@app.route("/account")
@login_required
def account():
if current_user.img:
img = decode_image(current_user.img)
return render_template("account.html", img=img)
else:
return render_template("account.html")
@app.route("/edit", methods=["GET", "POST"])
@login_required
def edit():
form = EditForm()
if request.method == "GET":
if current_user.img:
img = decode_image(current_user.img)
else:
img = None
return render_template("edit.html", form=form, img=img)
else:
img = request.files["change"]
if form.validate_on_submit():
if form.name.data:
current_user.name = form.name.data
if form.new_password.data:
if current_user.check_password(form.old_password.data):
current_user.set_password(form.new_password.data)
if img:
current_user.img = img.read()
db.session.commit()
return redirect(url_for("account"))
if img:
current_user.img = img.read()
db.session.commit()
return redirect(url_for("account"))
return redirect(url_for('account'))
@app.route("/favorites")
@login_required
def favorites():
events = Event.query.join(user_favorites).join(User).filter(
(user_favorites.c.user_id == current_user.id)).all()
print(events)
for event in events:
if event.img:
event.image = decode_image(event.img)
event.fan = True
return render_template("favorites.html", title="Favorite Events", events=events)
@app.route("/my-events")
@login_required
def my_events():
events = Event.query.filter_by(user_id=current_user.id).all()
for event in events:
if event.img:
event.image = decode_image(event.img)
user_is_fan = checkFavorites(event)
if user_is_fan:
event.fan = True
return render_template("my-events.html", title="My Events", events=events)
@app.route("/calendar")
@login_required
def calendar():
return render_template("calendar.html")
@app.route("/toggle-favorite", methods=["POST"])
@login_required
def toggle_favorite():
req = request.get_json()
event = Event.query.filter_by(id=req["event"]).first()
user_is_fan = checkFavorites(event)
if user_is_fan:
event.fan = False
event.fans.remove(current_user)
else:
event.fan = True
event.fans.append(current_user)
db.session.commit()
res = make_response(jsonify(event.fan), 200)
return res
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
if __name__ == "__main__":
session.init_app(app)
app.run(debug=True)
| 2.375 | 2 |
finite_field_test.py | a-l-r1/cryptography | 0 | 12766247 | <reponame>a-l-r1/cryptography
from finite_field import *
def main() -> None:
a = GF2Power31(42)
print(a)
fp = get_prime_finite_field_element_class(5, 'fp')
fpn = get_prime_finite_field_power_element_class(3, [fp(1), fp(3), fp(4), fp(2)], 'fpn')
a = fpn([fp(1), fp(2), fp(3)])
b = fpn([fp(2), fp(3), fp(2)])
print(a)
print(b)
print(a * b)
if __name__ == '__main__':
main() | 3.375 | 3 |
devil/devil/android/tools/system_app_test.py | Martijnve23/catapult | 1,894 | 12766248 | <reponame>Martijnve23/catapult<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil import devil_env
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.android.tools import system_app
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock
_PACKAGE_NAME = 'com.android'
_PACKAGE_PATH = '/path/to/com.android.apk'
_PM_LIST_PACKAGES_COMMAND = [
'pm', 'list', 'packages', '-f', '-u', _PACKAGE_NAME
]
_PM_LIST_PACKAGES_OUTPUT_WITH_PATH = [
'package:/path/to/other=' + _PACKAGE_NAME + '.other',
'package:' + _PACKAGE_PATH + '=' + _PACKAGE_NAME
]
_PM_LIST_PACKAGES_OUTPUT_WITHOUT_PATH = [
'package:/path/to/other=' + _PACKAGE_NAME + '.other'
]
class SystemAppTest(unittest.TestCase):
def testDoubleEnableModification(self):
"""Ensures that system app modification logic isn't repeated.
If EnableSystemAppModification uses are nested, inner calls should
not need to perform any of the expensive modification logic.
"""
# pylint: disable=no-self-use,protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
type(mock_device).build_version_sdk = mock.PropertyMock(
return_value=version_codes.LOLLIPOP)
system_props = {}
def dict_setprop(prop_name, value):
system_props[prop_name] = value
def dict_getprop(prop_name):
return system_props.get(prop_name, '')
mock_device.SetProp.side_effect = dict_setprop
mock_device.GetProp.side_effect = dict_getprop
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_called_once_with()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '1')
mock_device.reset_mock()
with system_app.EnableSystemAppModification(mock_device):
self.assertFalse(mock_device.EnableRoot.mock_calls) # assert not called
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
self.assertFalse(mock_device.SetProp.mock_calls) # assert not called
mock_device.reset_mock()
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '0')
def test_GetApplicationPaths_found(self):
"""Path found in output along with another package having similar name."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(
return_value=_PM_LIST_PACKAGES_OUTPUT_WITH_PATH)
paths = system_app._GetApplicationPaths(mock_device, _PACKAGE_NAME)
self.assertEqual([_PACKAGE_PATH], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND, check_return=True)
def test_GetApplicationPaths_notFound(self):
"""Path not found in output, only another package with similar name."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(
return_value=_PM_LIST_PACKAGES_OUTPUT_WITHOUT_PATH)
paths = system_app._GetApplicationPaths(mock_device, _PACKAGE_NAME)
self.assertEqual([], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND, check_return=True)
def test_GetApplicationPaths_noPaths(self):
"""Nothing containing text of package name found in output."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(return_value=[])
paths = system_app._GetApplicationPaths(mock_device, _PACKAGE_NAME)
self.assertEqual([], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND, check_return=True)
def test_GetApplicationPaths_emptyName(self):
"""Called with empty name, should not return any packages."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(
return_value=_PM_LIST_PACKAGES_OUTPUT_WITH_PATH)
paths = system_app._GetApplicationPaths(mock_device, '')
self.assertEqual([], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND[:-1] + [''], check_return=True)
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
projects/flow.qe-minimal/init.py | phmalek/signac-examples | 1 | 12766249 | <gh_stars>1-10
import signac
project = signac.init_project('flow.quantum_espresso-example-project')
# For this example, we only initialize a single job.
project.open_job({
'potential': 'Si.pw-mt_fhi.upf',
'lattice_parameter': 10.2625545471,
'number_of_bands': 20,
}).init()
| 1.804688 | 2 |
sleap/nn/data/training.py | hectorcarrion/sleap | 156 | 12766250 | <reponame>hectorcarrion/sleap
"""Transformers and utilities for training-related operations."""
import numpy as np
import tensorflow as tf
import sleap
from sleap.nn.data.providers import LabelsReader
from sleap.nn.data.utils import expand_to_rank, ensure_list
import attr
from typing import List, Text, Optional, Any, Union, Dict, Tuple, Sequence
from sklearn.model_selection import train_test_split
def split_labels_train_val(
labels: sleap.Labels, validation_fraction: float
) -> Tuple[sleap.Labels, List[int], sleap.Labels, List[int]]:
"""Make a train/validation split from a labels dataset.
Args:
labels: A `sleap.Labels` dataset with labeled frames.
validation_fraction: Fraction of frames to use for validation.
Returns:
A tuple of `(labels_train, idx_train, labels_val, idx_val)`.
`labels_train` and `labels_val` are `sleap.Label` objects containing the
selected frames for each split. Their `videos`, `tracks` and `provenance`
attributes are identical to `labels` even if the split does not contain
instances with a particular video or track.
`idx_train` and `idx_val` are list indices of the labeled frames within the
input labels that were assigned to each split, i.e.:
`labels[idx_train] == labels_train[:]`
If there is only one labeled frame in `labels`, both of the labels will contain
the same frame.
If `validation_fraction` would result in fewer than one label for either split,
it will be rounded to ensure there is at least one label in each.
"""
if len(labels) == 1:
return labels, [0], labels, [0]
# Split indices.
n_val = round(len(labels) * validation_fraction)
n_val = max(min(n_val, len(labels) - 1), 1)
idx_train, idx_val = train_test_split(list(range(len(labels))), test_size=n_val)
# Create labels and keep original metadata.
labels_train = sleap.Labels(labels[idx_train])
labels_train.videos = labels.videos
labels_train.tracks = labels.tracks
labels_train.provenance = labels.provenance
labels_val = sleap.Labels(labels[idx_val])
labels_val.videos = labels.videos
labels_val.tracks = labels.tracks
labels_val.provenance = labels.provenance
return labels_train, idx_train, labels_val, idx_val
def split_labels(
labels: sleap.Labels, split_fractions: Sequence[float]
) -> Tuple[sleap.Labels]:
"""Split a `sleap.Labels` into multiple new ones with random subsets of the data.
Args:
labels: An instance of `sleap.Labels`.
split_fractions: One or more floats between 0 and 1 that specify the fraction of
examples that should be in each dataset. These should add up to <= 1.0.
Fractions of less than 1 element will be rounded up to ensure that is at
least 1 element in each split. One of the fractions may be -1 to indicate
that it should contain all elements left over from the other splits.
Returns:
A tuple of new `sleap.Labels` instances of the same length as `split_fractions`.
Raises:
ValueError: If more than one split fraction is specified as -1.
ValueError: If the splits add up to more than the total available examples.
Note:
Sampling is done without replacement.
"""
# Get indices for labeled frames.
labels_indices = np.arange(len(labels)).astype("int64")
# Compute split sizes.
n_examples = len(labels_indices)
n_examples_per_split = np.array(split_fractions).astype("float64")
if (n_examples_per_split == -1).sum() > 1:
raise ValueError("Only one split fraction can be specified as -1.")
n_examples_per_split[n_examples_per_split == -1] = np.NaN
n_examples_per_split = np.ceil(n_examples_per_split * n_examples)
n_examples_per_split[np.isnan(n_examples_per_split)] = np.maximum(
n_examples - np.nansum(n_examples_per_split), 1
)
n_examples_per_split = n_examples_per_split.astype("int64")
if n_examples_per_split.sum() > n_examples:
raise ValueError("Splits cannot sum to more than the total input labels.")
# Sample and create new Labels instances.
split_labels = []
for n_samples in n_examples_per_split:
# Sample.
sampled_indices = np.random.default_rng().choice(
labels_indices, size=n_samples, replace=False
)
# Create new instance.
split_labels.append(sleap.Labels([labels[int(ind)] for ind in sampled_indices]))
# Exclude the sampled indices from the available indices.
labels_indices = np.setdiff1d(labels_indices, sampled_indices)
return tuple(split_labels)
def split_labels_reader(
labels_reader: LabelsReader, split_fractions: Sequence[float]
) -> Tuple[LabelsReader]:
"""Split a `LabelsReader` into multiple new ones with random subsets of the data.
Args:
labels_reader: An instance of `sleap.nn.data.providers.LabelsReader`. This is a
provider that generates datasets that contain elements read from a
`sleap.Labels` instance.
split_fractions: One or more floats between 0 and 1 that specify the fraction of
examples that should be in each dataset. These should add up to <= 1.0.
Fractions of less than 1 element will be rounded up to ensure that is at
least 1 element in each split. One of the fractions may be -1 to indicate
that it should contain all elements left over from the other splits.
Returns:
A tuple of `LabelsReader` instances of the same length as `split_fractions`. The
indices will be stored in the `example_indices` in each `LabelsReader` instance.
The actual `sleap.Labels` instance will be the same for each instance, only the
`example_indices` that are iterated over will change across splits.
If the input `labels_reader` already has `example_indices`, a subset of these
will be sampled to generate the splits.
Raises:
ValueError: If more than one split fraction is specified as -1.
ValueError: If the splits add up to more than the total available examples.
Note:
Sampling is done without replacement.
"""
# Get available indices.
labels_indices = labels_reader.example_indices
if labels_indices is None:
labels_indices = np.arange(len(labels_reader))
labels_indices = np.array(labels_indices).astype("int64")
# Compute split sizes.
n_examples = len(labels_indices)
n_examples_per_split = np.array(split_fractions).astype("float64")
if (n_examples_per_split == -1).sum() > 1:
raise ValueError("Only one split fraction can be specified as -1.")
n_examples_per_split[n_examples_per_split == -1] = np.NaN
n_examples_per_split = np.ceil(n_examples_per_split * n_examples)
n_examples_per_split[np.isnan(n_examples_per_split)] = np.maximum(
n_examples - np.nansum(n_examples_per_split), 1
)
n_examples_per_split = n_examples_per_split.astype("int64")
if n_examples_per_split.sum() > n_examples:
raise ValueError("Splits cannot sum to more than the total input labels.")
# Sample and create new LabelsReader instances.
split_readers = []
for n_samples in n_examples_per_split:
# Sample.
sampled_indices = np.random.default_rng().choice(
labels_indices, size=n_samples, replace=False
)
# Create new instance.
split_readers.append(
LabelsReader(labels_reader.labels, example_indices=sampled_indices)
)
# Exclude the sampled indices from the available indices.
labels_indices = np.setdiff1d(labels_indices, sampled_indices)
return tuple(split_readers)
@attr.s(auto_attribs=True)
class KeyMapper:
"""Maps example keys to specified outputs.
This is useful for transforming examples into tuples that map onto specific layer
names for training.
Attributes:
key_maps: Dictionary or list of dictionaries with string keys and values of
the form: {input_key: output_key}. If a list, the examples will be in tuples
in the same order.
"""
key_maps: List[Dict[Text, Text]] = attr.ib(
converter=attr.converters.optional(ensure_list)
)
@property
def input_keys(self) -> List[Text]:
"""Return the keys that incoming elements are expected to have."""
input_keys = []
for key_map in self.key_maps:
input_keys.extend(list(key_map.keys()))
return input_keys
@property
def output_keys(self) -> List[Text]:
"""Return the keys that outgoing elements will have. These may be nested."""
output_keys = []
for key_map in self.key_maps:
output_keys.extend(list(key_map.values()))
return output_keys
def transform_dataset(self, ds_input: tf.data.Dataset) -> tf.data.Dataset:
"""Create a dataset with input keys mapped to new key names.
Args:
ds_input: Any `tf.data.Dataset` that generates examples as a dictionary of
tensors with the keys in `input_keys`.
Return:
A dataset that generates examples with the tensors in `input_keys` mapped to
keys in `output_keys` according to the structure in `key_maps`.
"""
def map_keys(example):
"""Local processing function for dataset mapping."""
output_keys = []
for key_map in self.key_maps:
output_keys.append(
{key_out: example[key_in] for key_in, key_out in key_map.items()}
)
return tuple(output_keys)
ds_output = ds_input.map(map_keys)
return ds_output
| 2.90625 | 3 |