content
stringlengths 5
1.05M
|
|---|
# Copyright (c) 2020 Vishnu J. Seesahai
# Use of this source code is governed by an MIT
# license that can be found in the LICENSE file.
import subprocess
import os, sys, rpcworker
from PyQt5.QtCore import *
from config import MIN_CONF, MAX_CONF
from rpcworker import progress_fn, thread_complete
_translate = QCoreApplication.translate
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
def get_balance(uname, pwd, progress_callback):
bal_cmd_result, err = (subprocess.Popen([resource_path('bin/pktctl'), '-u', uname, '-P', pwd, '--wallet', 'getbalance'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate())
bal_cmd_result = bal_cmd_result.decode('utf-8')
print('bal_cmd_result', bal_cmd_result)
err = err.decode('utf-8')
print(bal_cmd_result, err)
if not err:
bal_cmd_result = round(float(bal_cmd_result),8)
return str(bal_cmd_result).rstrip()
def get_balance_for_addr(uname, pwd, addr):
addr_bal_cmd_result, err = (subprocess.Popen([resource_path('bin/pktctl'), '-u', uname, '-P', pwd, '--wallet', 'listunspent', MIN_CONF, MAX_CONF, '[\"'+addr+'\"]'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate())
addr_bal_cmd_result = addr_bal_cmd_result.decode('utf-8')
err = err.decode('utf-8')
if not err:
bal = 0
for itm in addr_bal_cmd_result:
tmp_bal = round(float(itm["amount"]),4)
bal += tmp_bal
return str(bal)
def get_balance_thd(u, p, win, state, pool):
global window, uname, pwd, worker_state_active
window = win
uname = u
pwd = p
worker_state_active = state
threadpool = pool
# Pass the function to execute
if not worker_state_active['GET_BALANCE']:
window.balance_amount.setText(_translate("MainWindow", "Calculating..."))
worker_state_active['GET_BALANCE'] = True
worker = rpcworker.Worker(get_balance, uname, pwd)
worker.signals.result.connect(print_result)
worker.signals.finished.connect(thread_complete)
#worker.signals.progress.connect(progress_fn)
# Execute
threadpool.start(worker)
def print_result(result):
window.balance_amount.clear()
if result:
window.balance_amount.setText(_translate("MainWindow", result))
else:
window.balance_amount.setText(_translate("MainWindow", "Failed to retrieve balance."))
worker_state_active['GET_BALANCE'] = False
|
from __future__ import absolute_import
from .s3 import S3Backend
|
# Copyright (c) 2008 Mikeal Rogers # lint-amnesty, pylint: disable=missing-module-docstring
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOOKUP = {}
from .paths import add_lookup, clear_lookups, lookup_template, save_lookups # lint-amnesty, pylint: disable=wrong-import-position
class Engines:
"""
Aliases for the available template engines.
Note that the preview engine is only configured for cms.
"""
DJANGO = 'django'
MAKO = 'mako'
PREVIEW = 'preview'
|
import werkzeug
from werkzeug.exceptions import HTTPException
_version = werkzeug.__version__.split('.')[0]
if _version in ('0', '1'):
class _HTTPException(HTTPException):
def __init__(self, code, body, headers, response=None):
super(_HTTPException, self).__init__(None, response)
self.code = code
self.body = body
self.headers = headers
def get_body(self, environ=None):
return self.body
def get_headers(self, environ=None):
return self.headers
else:
class _HTTPException(HTTPException):
def __init__(self, code, body, headers, response=None):
super(_HTTPException, self).__init__(None, response)
self.code = code
self.body = body
self.headers = headers
def get_body(self, environ=None, scope=None):
return self.body
def get_headers(self, environ=None, scope=None):
return self.headers
def raise_http_exception(status, body, headers):
raise _HTTPException(status, body, headers)
|
# Copyright © 2016-2021 Medical Image Analysis Laboratory, University Hospital Center and University of Lausanne (UNIL-CHUV), Switzerland
#
# This software is distributed under the open-source license Modified BSD.
"""Module for the super-resolution reconstruction pipeline."""
import os
import sys
import platform
import json
import shutil
import pkg_resources
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
from jinja2 import __version__ as __jinja2_version__
import nibabel as nib
from nipype.info import __version__ as __nipype_version__
from nipype import config
from nipype import logging as nipype_logging
from nipype.interfaces.io import DataGrabber, DataSink
from nipype.pipeline import Node, MapNode, Workflow
from nipype.interfaces.utility import IdentityInterface
# Import the implemented interface from pymialsrtk
import pymialsrtk.interfaces.reconstruction as reconstruction
import pymialsrtk.interfaces.postprocess as postprocess
import pymialsrtk.interfaces.preprocess as preprocess
import pymialsrtk.interfaces.utils as utils
from pymialsrtk.bids.utils import write_bids_derivative_description
# Get pymialsrtk version
from pymialsrtk.info import __version__
class AnatomicalPipeline:
"""Class used to represent the workflow of the Super-Resolution reconstruction pipeline.
Attributes
-----------
bids_dir : string
BIDS root directory (required)
output_dir : string
Output derivatives directory (required)
subject : string
Subject ID (in the form ``sub-XX``)
wf : nipype.pipeline.Workflow
Nipype workflow of the reconstruction pipeline
deltatTV : string
Super-resolution optimization time-step
lambdaTV : float
Regularization weight (default is 0.75)
primal_dual_loops : string
Number of primal/dual loops used in the optimization of the total-variation
super-resolution algorithm.
sr_id : string
ID of the reconstruction useful to distinguish when multiple reconstructions
with different order of stacks are run on the same subject
session : string
Session ID if applicable (in the form ``ses-YY``)
m_stacks : list(int)
List of stack to be used in the reconstruction.
The specified order is kept if `skip_stacks_ordering` is True.
m_masks_derivatives_dir : string
directory basename in BIDS directory derivatives where to search for masks (optional)
m_skip_svr : bool
Weither the Slice-to-Volume Registration should be skipped in the image reconstruction.
(default is False)
m_do_refine_hr_mask : bool
Weither a refinement of the HR mask should be performed. (default is False)
m_skip_nlm_denoising : bool
Weither the NLM denoising preprocessing should be skipped. (default is False)
m_skip_stacks_ordering : bool (optional)
Weither the automatic stacks ordering should be skipped. (default is False)
Examples
--------
>>> from pymialsrtk.pipelines.anatomical.srr import AnatomicalPipeline
>>> # Create a new instance
>>> pipeline = AnatomicalPipeline(bids_dir='/path/to/bids_dir',
output_dir='/path/to/output_dir',
subject='sub-01',
p_stacks=[1,3,2,0],
sr_id=1,
session=None,
paramTV={deltatTV = "0.001",
lambdaTV = "0.75",
primal_dual_loops = "20"},
masks_derivatives_dir="/custom/mask_dir",
masks_desc=None,
p_dict_custom_interfaces=None)
>>> # Create the super resolution Nipype workflow
>>> pipeline.create_workflow()
>>> # Execute the workflow
>>> res = pipeline.run(number_of_cores=1) # doctest: +SKIP
"""
pipeline_name = "srr_pipeline"
run_start_time = None
run_end_time = None
run_elapsed_time = None
bids_dir = None
output_dir = None
subject = None
wf = None
deltatTV = "0.75"
lambdaTV = "0.001"
primal_dual_loops = "20"
sr_id = None
session = None
m_stacks = None
# Custom interfaces options
m_skip_svr = None
m_skip_nlm_denoising = None
m_skip_stacks_ordering = None
m_do_refine_hr_mask = None
m_masks_derivatives_dir = None
use_manual_masks = False
m_masks_desc = None
openmp_number_of_cores = None
nipype_number_of_cores = None
def __init__(
self, bids_dir, output_dir, subject, p_stacks=None, sr_id=1,
session=None, paramTV=None, p_masks_derivatives_dir=None, p_masks_desc=None,
p_dict_custom_interfaces=None,
openmp_number_of_cores=None, nipype_number_of_cores=None
):
"""Constructor of AnatomicalPipeline class instance."""
# BIDS processing parameters
self.bids_dir = bids_dir
self.output_dir = output_dir
self.subject = subject
self.sr_id = sr_id
self.session = session
self.m_stacks = p_stacks
self.openmp_number_of_cores = openmp_number_of_cores
self.nipype_number_of_cores = nipype_number_of_cores
# (default) sr tv parameters
if paramTV is None:
paramTV = dict()
self.deltatTV = paramTV["deltatTV"] if "deltatTV" in paramTV.keys() else 0.01
self.lambdaTV = paramTV["lambdaTV"] if "lambdaTV" in paramTV.keys() else 0.75
self.primal_dual_loops = paramTV["primal_dual_loops"] if "primal_dual_loops" in paramTV.keys() else 10
# Use manual/custom brain masks
# If masks directory is not specified use the automated brain extraction method.
self.m_masks_derivatives_dir = p_masks_derivatives_dir
self.use_manual_masks = True if self.m_masks_derivatives_dir is not None else False
self.m_masks_desc = p_masks_desc if self.use_manual_masks else None
# Custom interfaces and default values.
if p_dict_custom_interfaces is not None:
self.m_skip_svr = p_dict_custom_interfaces['skip_svr'] if 'skip_svr' in p_dict_custom_interfaces.keys() else False
self.m_do_refine_hr_mask = p_dict_custom_interfaces['do_refine_hr_mask'] if 'do_refine_hr_mask' in p_dict_custom_interfaces.keys() else False
self.m_skip_nlm_denoising = p_dict_custom_interfaces['skip_nlm_denoising'] if 'skip_nlm_denoising' in p_dict_custom_interfaces.keys() else False
self.m_skip_stacks_ordering = p_dict_custom_interfaces['skip_stacks_ordering'] if \
((self.m_stacks is not None) and ('skip_stacks_ordering' in p_dict_custom_interfaces.keys())) else False
else:
self.m_skip_svr = False
self.m_do_refine_hr_mask = False
self.m_skip_nlm_denoising = False
self.m_skip_stacks_ordering = False
def create_workflow(self):
"""Create the Niype workflow of the super-resolution pipeline.
It is composed of a succession of Nodes and their corresponding parameters,
where the output of node i goes to the input of node i+1.
"""
sub_ses = self.subject
if self.session is not None:
sub_ses = ''.join([sub_ses, '_', self.session])
if self.session is None:
wf_base_dir = os.path.join(self.output_dir,
'-'.join(["nipype", __nipype_version__]),
self.subject,
"rec-{}".format(self.sr_id))
final_res_dir = os.path.join(self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject)
else:
wf_base_dir = os.path.join(self.output_dir,
'-'.join(["nipype", __nipype_version__]),
self.subject,
self.session,
"rec-{}".format(self.sr_id))
final_res_dir = os.path.join(self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject,
self.session)
if not os.path.exists(wf_base_dir):
os.makedirs(wf_base_dir)
print("Process directory: {}".format(wf_base_dir))
# Initialization (Not sure we can control the name of nipype log)
if os.path.isfile(os.path.join(wf_base_dir, "pypeline.log")):
os.unlink(os.path.join(wf_base_dir, "pypeline.log"))
self.wf = Workflow(name=self.pipeline_name,base_dir=wf_base_dir)
config.update_config(
{
'logging': {
'log_directory': os.path.join(wf_base_dir),
'log_to_file': True
},
'execution': {
'remove_unnecessary_outputs': False,
'stop_on_first_crash': True,
'stop_on_first_rerun': False,
'crashfile_format': "txt",
'use_relative_paths': True,
'write_provenance': False
}
}
)
# Update nypipe logging with config
nipype_logging.update_logging(config)
# config.enable_provenance()
if self.use_manual_masks:
dg = Node(
interface=DataGrabber(outfields=['T2ws', 'masks']),
name='data_grabber'
)
dg.inputs.base_directory = self.bids_dir
dg.inputs.template = '*'
dg.inputs.raise_on_empty = False
dg.inputs.sort_filelist = True
if self.session is not None:
t2ws_template = os.path.join(
self.subject, self.session, 'anat',
'_'.join([sub_ses, '*run-*', '*T2w.nii.gz'])
)
if self.m_masks_desc is not None:
masks_template = os.path.join(
'derivatives', self.m_masks_derivatives_dir, self.subject, self.session, 'anat',
'_'.join([sub_ses, '*_run-*', '_desc-'+self.m_masks_desc, '*mask.nii.gz'])
)
else:
masks_template = os.path.join(
'derivatives', self.m_masks_derivatives_dir, self.subject, self.session, 'anat',
'_'.join([sub_ses, '*run-*', '*mask.nii.gz'])
)
else:
t2ws_template=os.path.join(self.subject, 'anat', sub_ses + '*_run-*_T2w.nii.gz')
if self.m_masks_desc is not None:
masks_template = os.path.join(
'derivatives', self.m_masks_derivatives_dir, self.subject, self.session, 'anat',
'_'.join([sub_ses, '*_run-*', '_desc-'+self.m_masks_desc, '*mask.nii.gz'])
)
else:
masks_template = os.path.join(
'derivatives', self.m_masks_derivatives_dir, self.subject, 'anat',
sub_ses + '*_run-*_*mask.nii.gz'
)
dg.inputs.field_template = dict(T2ws=t2ws_template,
masks=masks_template)
brainMask = MapNode(interface=IdentityInterface(fields=['out_file']),
name='brain_masks_bypass',
iterfield=['out_file'])
if self.m_stacks is not None:
custom_masks_filter = Node(interface=preprocess.FilteringByRunid(),
name='custom_masks_filter')
custom_masks_filter.inputs.stacks_id = self.m_stacks
else:
dg = Node(interface=DataGrabber(outfields=['T2ws']),
name='data_grabber')
dg.inputs.base_directory = self.bids_dir
dg.inputs.template = '*'
dg.inputs.raise_on_empty = False
dg.inputs.sort_filelist = True
dg.inputs.field_template = dict(T2ws=os.path.join(self.subject,
'anat',
sub_ses+'*_run-*_T2w.nii.gz'))
if self.session is not None:
dg.inputs.field_template = dict(T2ws=os.path.join(self.subject,
self.session, 'anat', '_'.join([sub_ses, '*run-*', '*T2w.nii.gz'])))
if self.m_stacks is not None:
t2ws_filter_prior_masks = Node(interface=preprocess.FilteringByRunid(),
name='t2ws_filter_prior_masks')
t2ws_filter_prior_masks.inputs.stacks_id = self.m_stacks
brainMask = MapNode(interface = preprocess.BrainExtraction(),
name='brainExtraction',
iterfield=['in_file'])
brainMask.inputs.bids_dir = self.bids_dir
brainMask.inputs.in_ckpt_loc = pkg_resources.resource_filename(
"pymialsrtk",
os.path.join("data",
"Network_checkpoints",
"Network_checkpoints_localization",
"Unet.ckpt-88000.index")
).split('.index')[0]
brainMask.inputs.threshold_loc = 0.49
brainMask.inputs.in_ckpt_seg = pkg_resources.resource_filename(
"pymialsrtk",
os.path.join("data",
"Network_checkpoints",
"Network_checkpoints_segmentation",
"Unet.ckpt-20000.index")
).split('.index')[0]
brainMask.inputs.threshold_seg = 0.5
t2ws_filtered = Node(interface=preprocess.FilteringByRunid(),
name='t2ws_filtered')
masks_filtered = Node(interface=preprocess.FilteringByRunid(),
name='masks_filtered')
if not self.m_skip_stacks_ordering:
stacksOrdering = Node(interface=preprocess.StacksOrdering(),
name='stackOrdering')
else:
stacksOrdering = Node(interface=IdentityInterface(fields=['stacks_order']),
name='stackOrdering')
stacksOrdering.inputs.stacks_order = self.m_stacks
if not self.m_skip_nlm_denoising:
nlmDenoise = MapNode(interface=preprocess.BtkNLMDenoising(),
name='nlmDenoise',
iterfield=['in_file', 'in_mask'])
nlmDenoise.inputs.bids_dir = self.bids_dir
# Sans le mask le premier correct slice intensity...
srtkCorrectSliceIntensity01_nlm = MapNode(interface=preprocess.MialsrtkCorrectSliceIntensity(),
name='srtkCorrectSliceIntensity01_nlm',
iterfield=['in_file', 'in_mask'])
srtkCorrectSliceIntensity01_nlm.inputs.bids_dir = self.bids_dir
srtkCorrectSliceIntensity01_nlm.inputs.out_postfix = '_uni'
srtkCorrectSliceIntensity01 = MapNode(interface=preprocess.MialsrtkCorrectSliceIntensity(),
name='srtkCorrectSliceIntensity01',
iterfield=['in_file', 'in_mask'])
srtkCorrectSliceIntensity01.inputs.bids_dir = self.bids_dir
srtkCorrectSliceIntensity01.inputs.out_postfix = '_uni'
srtkSliceBySliceN4BiasFieldCorrection = MapNode(interface=preprocess.MialsrtkSliceBySliceN4BiasFieldCorrection(),
name='srtkSliceBySliceN4BiasFieldCorrection',
iterfield=['in_file', 'in_mask'])
srtkSliceBySliceN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir
srtkSliceBySliceCorrectBiasField = MapNode(interface=preprocess.MialsrtkSliceBySliceCorrectBiasField(),
name='srtkSliceBySliceCorrectBiasField',
iterfield=['in_file', 'in_mask', 'in_field'])
srtkSliceBySliceCorrectBiasField.inputs.bids_dir = self.bids_dir
# 4-modules sequence to be defined as a stage.
if not self.m_skip_nlm_denoising:
srtkCorrectSliceIntensity02_nlm = MapNode(interface=preprocess.MialsrtkCorrectSliceIntensity(),
name='srtkCorrectSliceIntensity02_nlm',
iterfield=['in_file', 'in_mask'])
srtkCorrectSliceIntensity02_nlm.inputs.bids_dir = self.bids_dir
srtkIntensityStandardization01_nlm = Node(interface=preprocess.MialsrtkIntensityStandardization(),
name='srtkIntensityStandardization01_nlm')
srtkIntensityStandardization01_nlm.inputs.bids_dir = self.bids_dir
srtkHistogramNormalization_nlm = Node(interface=preprocess.MialsrtkHistogramNormalization(),
name='srtkHistogramNormalization_nlm')
srtkHistogramNormalization_nlm.inputs.bids_dir = self.bids_dir
srtkIntensityStandardization02_nlm = Node(interface=preprocess.MialsrtkIntensityStandardization(),
name='srtkIntensityStandardization02_nlm')
srtkIntensityStandardization02_nlm.inputs.bids_dir = self.bids_dir
# 4-modules sequence to be defined as a stage.
srtkCorrectSliceIntensity02 = MapNode(interface=preprocess.MialsrtkCorrectSliceIntensity(),
name='srtkCorrectSliceIntensity02',
iterfield=['in_file', 'in_mask'])
srtkCorrectSliceIntensity02.inputs.bids_dir = self.bids_dir
srtkIntensityStandardization01 = Node(interface=preprocess.MialsrtkIntensityStandardization(),
name='srtkIntensityStandardization01')
srtkIntensityStandardization01.inputs.bids_dir = self.bids_dir
srtkHistogramNormalization = Node(interface=preprocess.MialsrtkHistogramNormalization(),
name='srtkHistogramNormalization')
srtkHistogramNormalization.inputs.bids_dir = self.bids_dir
srtkIntensityStandardization02 = Node(interface=preprocess.MialsrtkIntensityStandardization(),
name='srtkIntensityStandardization02')
srtkIntensityStandardization02.inputs.bids_dir = self.bids_dir
srtkMaskImage01 = MapNode(interface=preprocess.MialsrtkMaskImage(),
name='srtkMaskImage01',
iterfield=['in_file', 'in_mask'])
srtkMaskImage01.inputs.bids_dir = self.bids_dir
srtkImageReconstruction = Node(interface=reconstruction.MialsrtkImageReconstruction(),
name='srtkImageReconstruction')
srtkImageReconstruction.inputs.bids_dir = self.bids_dir
srtkImageReconstruction.inputs.sub_ses = sub_ses
srtkImageReconstruction.inputs.no_reg = self.m_skip_svr
srtkTVSuperResolution = Node(interface=reconstruction.MialsrtkTVSuperResolution(),
name='srtkTVSuperResolution')
srtkTVSuperResolution.inputs.bids_dir = self.bids_dir
srtkTVSuperResolution.inputs.sub_ses = sub_ses
srtkTVSuperResolution.inputs.in_loop = self.primal_dual_loops
srtkTVSuperResolution.inputs.in_deltat = self.deltatTV
srtkTVSuperResolution.inputs.in_lambda = self.lambdaTV
srtkTVSuperResolution.inputs.use_manual_masks = self.use_manual_masks
srtkN4BiasFieldCorrection = Node(interface=postprocess.MialsrtkN4BiasFieldCorrection(),
name='srtkN4BiasFieldCorrection')
srtkN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir
if self.m_do_refine_hr_mask:
srtkHRMask = Node(interface=postprocess.MialsrtkRefineHRMaskByIntersection(),
name='srtkHRMask')
srtkHRMask.inputs.bids_dir = self.bids_dir
else:
srtkHRMask = Node(interface=postprocess.BinarizeImage(), name='srtkHRMask')
srtkMaskImage02 = Node(interface=preprocess.MialsrtkMaskImage(),
name='srtkMaskImage02')
srtkMaskImage02.inputs.bids_dir = self.bids_dir
# Build workflow : connections of the nodes
# Nodes ready : Linking now
if self.use_manual_masks:
if self.m_stacks is not None:
self.wf.connect(dg, "masks", custom_masks_filter, "input_files")
self.wf.connect(custom_masks_filter, "output_files", brainMask, "out_file")
else:
self.wf.connect(dg, "masks", brainMask, "out_file")
else:
if self.m_stacks is not None:
self.wf.connect(dg, "T2ws", t2ws_filter_prior_masks, "input_files")
self.wf.connect(t2ws_filter_prior_masks, "output_files", brainMask, "in_file")
else:
self.wf.connect(dg, "T2ws", brainMask, "in_file")
if not self.m_skip_stacks_ordering:
self.wf.connect(brainMask, "out_file", stacksOrdering, "input_masks")
self.wf.connect(stacksOrdering, "stacks_order", t2ws_filtered, "stacks_id")
self.wf.connect(dg, "T2ws", t2ws_filtered, "input_files")
self.wf.connect(stacksOrdering, "stacks_order", masks_filtered, "stacks_id")
self.wf.connect(brainMask, "out_file", masks_filtered, "input_files")
if not self.m_skip_nlm_denoising:
self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending), nlmDenoise, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), nlmDenoise, "in_mask") ## Comment to match docker process
self.wf.connect(nlmDenoise, ("out_file", utils.sort_ascending), srtkCorrectSliceIntensity01_nlm, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkCorrectSliceIntensity01_nlm, "in_mask")
self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending), srtkCorrectSliceIntensity01, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkCorrectSliceIntensity01, "in_mask")
if not self.m_skip_nlm_denoising:
self.wf.connect(srtkCorrectSliceIntensity01_nlm, ("out_file", utils.sort_ascending), srtkSliceBySliceN4BiasFieldCorrection, "in_file")
else:
self.wf.connect(srtkCorrectSliceIntensity01, ("out_file", utils.sort_ascending),srtkSliceBySliceN4BiasFieldCorrection, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkSliceBySliceN4BiasFieldCorrection, "in_mask")
self.wf.connect(srtkCorrectSliceIntensity01, ("out_file", utils.sort_ascending), srtkSliceBySliceCorrectBiasField, "in_file")
self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection, ("out_fld_file", utils.sort_ascending), srtkSliceBySliceCorrectBiasField, "in_field")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkSliceBySliceCorrectBiasField, "in_mask")
if not self.m_skip_nlm_denoising:
self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection, ("out_im_file", utils.sort_ascending), srtkCorrectSliceIntensity02_nlm, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkCorrectSliceIntensity02_nlm, "in_mask")
self.wf.connect(srtkCorrectSliceIntensity02_nlm, ("out_file", utils.sort_ascending), srtkIntensityStandardization01_nlm, "input_images")
self.wf.connect(srtkIntensityStandardization01_nlm, ("output_images", utils.sort_ascending), srtkHistogramNormalization_nlm, "input_images")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkHistogramNormalization_nlm, "input_masks")
self.wf.connect(srtkHistogramNormalization_nlm, ("output_images", utils.sort_ascending), srtkIntensityStandardization02_nlm, "input_images")
self.wf.connect(srtkSliceBySliceCorrectBiasField, ("out_im_file", utils.sort_ascending), srtkCorrectSliceIntensity02, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkCorrectSliceIntensity02, "in_mask")
self.wf.connect(srtkCorrectSliceIntensity02, ("out_file", utils.sort_ascending), srtkIntensityStandardization01, "input_images")
self.wf.connect(srtkIntensityStandardization01, ("output_images", utils.sort_ascending), srtkHistogramNormalization, "input_images")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkHistogramNormalization, "input_masks")
self.wf.connect(srtkHistogramNormalization, ("output_images", utils.sort_ascending), srtkIntensityStandardization02, "input_images")
if not self.m_skip_nlm_denoising:
self.wf.connect(srtkIntensityStandardization02_nlm, ("output_images", utils.sort_ascending), srtkMaskImage01, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkMaskImage01, "in_mask")
else:
self.wf.connect(srtkIntensityStandardization02, ("output_images", utils.sort_ascending), srtkMaskImage01, "in_file")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkMaskImage01, "in_mask")
self.wf.connect(srtkMaskImage01, "out_im_file", srtkImageReconstruction, "input_images")
self.wf.connect(masks_filtered, "output_files", srtkImageReconstruction, "input_masks")
self.wf.connect(stacksOrdering, "stacks_order", srtkImageReconstruction, "stacks_order")
self.wf.connect(srtkIntensityStandardization02, "output_images", srtkTVSuperResolution, "input_images")
self.wf.connect(srtkImageReconstruction, ("output_transforms", utils.sort_ascending), srtkTVSuperResolution, "input_transforms")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkTVSuperResolution, "input_masks")
self.wf.connect(stacksOrdering, "stacks_order", srtkTVSuperResolution, "stacks_order")
self.wf.connect(srtkImageReconstruction, "output_sdi", srtkTVSuperResolution, "input_sdi")
if self.m_do_refine_hr_mask:
self.wf.connect(srtkIntensityStandardization02, ("output_images", utils.sort_ascending), srtkHRMask, "input_images")
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), srtkHRMask, "input_masks")
self.wf.connect(srtkImageReconstruction, ("output_transforms", utils.sort_ascending), srtkHRMask, "input_transforms")
self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask, "input_sr")
else:
self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask, "input_image")
self.wf.connect(srtkTVSuperResolution, "output_sr", srtkMaskImage02, "in_file")
self.wf.connect(srtkHRMask, "output_srmask", srtkMaskImage02, "in_mask")
self.wf.connect(srtkTVSuperResolution, "output_sr", srtkN4BiasFieldCorrection, "input_image")
self.wf.connect(srtkHRMask, "output_srmask", srtkN4BiasFieldCorrection, "input_mask")
# Datasinker
finalFilenamesGeneration = Node(interface=postprocess.FilenamesGeneration(),
name='filenames_gen')
finalFilenamesGeneration.inputs.sub_ses = sub_ses
finalFilenamesGeneration.inputs.sr_id = self.sr_id
finalFilenamesGeneration.inputs.use_manual_masks = self.use_manual_masks
self.wf.connect(stacksOrdering, "stacks_order", finalFilenamesGeneration, "stacks_order")
datasink = Node(interface=DataSink(), name='data_sinker')
datasink.inputs.base_directory = final_res_dir
if not self.m_skip_stacks_ordering:
self.wf.connect(stacksOrdering, "report_image", datasink, 'figures.@stackOrderingQC')
self.wf.connect(stacksOrdering, "motion_tsv", datasink, 'anat.@motionTSV')
self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending), datasink, 'anat.@LRmasks')
self.wf.connect(srtkIntensityStandardization02, ("output_images", utils.sort_ascending), datasink, 'anat.@LRsPreproc')
self.wf.connect(srtkImageReconstruction, ("output_transforms", utils.sort_ascending), datasink, 'xfm.@transforms')
self.wf.connect(finalFilenamesGeneration, "substitutions", datasink, "substitutions")
self.wf.connect(srtkMaskImage01, ("out_im_file", utils.sort_ascending), datasink, 'anat.@LRsDenoised')
self.wf.connect(srtkImageReconstruction, "output_sdi", datasink, 'anat.@SDI')
self.wf.connect(srtkN4BiasFieldCorrection, "output_image", datasink, 'anat.@SR')
self.wf.connect(srtkTVSuperResolution, "output_json_path", datasink, 'anat.@SRjson')
self.wf.connect(srtkTVSuperResolution, "output_sr_png", datasink, 'figures.@SRpng')
self.wf.connect(srtkHRMask, "output_srmask", datasink, 'anat.@SRmask')
def run(self, memory=None):
"""Execute the workflow of the super-resolution reconstruction pipeline.
Nipype execution engine will take care of the management and execution of
all processing steps involved in the super-resolution reconstruction pipeline.
Note that the complete execution graph is saved as a PNG image to support
transparency on the whole processing.
Parameters
----------
memory : int
Maximal memory used by the workflow
"""
# Use nipype.interface logger to print some information messages
iflogger = nipype_logging.getLogger('nipype.interface')
iflogger.info("**** Workflow graph creation ****")
self.wf.write_graph(dotfilename='graph.dot', graph2use='colored', format='png', simple_form=True)
# Copy and rename the generated "graph.png" image
src = os.path.join(self.wf.base_dir, self.wf.name, 'graph.png')
if self.session is not None:
dst = os.path.join(
self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject,
self.session,
'figures',
f'{self.subject}_{self.session}_rec-SR_id-{self.sr_id}_desc-processing_graph.png'
)
else:
dst = os.path.join(
self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject,
'figures',
f'{self.subject}_rec-SR_id-{self.sr_id}_desc-processing_graph.png'
)
# Create the figures/ and parent directories if they do not exist
figures_dir = os.path.dirname(dst)
os.makedirs(figures_dir, exist_ok=True)
# Make the copy
iflogger.info(f'\t > Copy {src} to {dst}...')
shutil.copy(src=src, dst=dst)
# Create dictionary of arguments passed to plugin_args
args_dict = {
'raise_insufficient': False,
'n_procs': self.nipype_number_of_cores
}
if (memory is not None) and (memory > 0):
args_dict['memory_gb'] = memory
iflogger.info("**** Processing ****")
# datetime object containing current start date and time
start = datetime.now()
self.run_start_time = start.strftime("%B %d, %Y / %H:%M:%S")
print(f" Start date / time : {self.run_start_time}")
# Execute the workflow
if self.nipype_number_of_cores > 1:
res = self.wf.run(plugin='MultiProc', plugin_args=args_dict)
else:
res = self.wf.run()
# Copy and rename the workflow execution log
src = os.path.join(self.wf.base_dir, "pypeline.log")
if self.session is not None:
dst = os.path.join(
self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject,
self.session,
'logs',
f'{self.subject}_{self.session}_rec-SR_id-{self.sr_id}_log.txt'
)
else:
dst = os.path.join(
self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject,
'logs',
f'{self.subject}_rec-SR_id-{self.sr_id}_log.txt'
)
# Create the logs/ and parent directories if they do not exist
logs_dir = os.path.dirname(dst)
os.makedirs(logs_dir, exist_ok=True)
# Make the copy
iflogger.info(f'\t > Copy {src} to {dst}...')
shutil.copy(src=src, dst=dst)
# datetime object containing current end date and time
end = datetime.now()
self.run_end_time = end.strftime("%B %d, %Y / %H:%M:%S")
print(f" End date / time : {self.run_end_time}")
# Compute elapsed running time in minutes and seconds
duration = end - start
(minutes, seconds) = divmod(duration.total_seconds(), 60)
self.run_elapsed_time = f'{int(minutes)} minutes and {int(seconds)} seconds'
print(f" Elapsed time: {self.run_end_time}")
iflogger.info("**** Write dataset derivatives description ****")
for toolbox in ["pymialsrtk", "nipype"]:
write_bids_derivative_description(
bids_dir=self.bids_dir,
deriv_dir=self.output_dir,
pipeline_name=toolbox
)
iflogger.info("**** Super-resolution HTML report creation ****")
self.create_subject_report()
return res
def create_subject_report(self):
"""Create the HTML report"""
# Set main subject derivatives directory
if self.session is None:
sub_ses = self.subject
final_res_dir = os.path.join(self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject)
else:
sub_ses = f'{self.subject}_{self.session}'
final_res_dir = os.path.join(self.output_dir,
'-'.join(["pymialsrtk", __version__]),
self.subject,
self.session)
# Get the HTML report template
path = pkg_resources.resource_filename(
'pymialsrtk',
"data/report/templates/template.html"
)
jinja_template_dir = os.path.dirname(path)
file_loader = FileSystemLoader(jinja_template_dir)
env = Environment(loader=file_loader)
template = env.get_template('template.html')
# Load main data derivatives necessary for the report
sr_nii_image = os.path.join(
final_res_dir, 'anat',
f'{sub_ses}_rec-SR_id-{self.sr_id}_T2w.nii.gz'
)
img = nib.load(sr_nii_image)
sx, sy, sz = img.header.get_zooms()
sr_json_metadata = os.path.join(
final_res_dir, 'anat',
f'{sub_ses}_rec-SR_id-{self.sr_id}_T2w.json'
)
with open(sr_json_metadata) as f:
sr_json_metadata = json.load(f)
workflow_image = os.path.join(
'..', 'figures',
f'{sub_ses}_rec-SR_id-{self.sr_id}_desc-processing_graph.png'
)
sr_png_image = os.path.join(
'..', 'figures',
f'{sub_ses}_rec-SR_id-{self.sr_id}_T2w.png'
)
motion_report_image = os.path.join(
'..', 'figures',
f'{sub_ses}_rec-SR_id-{self.sr_id}_desc-motion_stats.png'
)
log_file = os.path.join(
'..', 'logs',
f'{sub_ses}_rec-SR_id-{self.sr_id}_log.txt'
)
# Create the text for {{subject}} and {{session}} fields in template
report_subject_text = f'{self.subject.split("-")[-1]}'
if self.session is not None:
report_session_text = f'{self.session.split("-")[-1]}'
else:
report_session_text = None
# Generate the report
report_html_content = template.render(
subject=report_subject_text,
session=report_session_text,
processing_datetime=self.run_start_time,
run_time=self.run_elapsed_time,
log=log_file,
sr_id=self.sr_id,
stacks=self.m_stacks,
svr="on" if not self.m_skip_svr else "off",
nlm_denoising="on" if not self.m_skip_nlm_denoising else "off",
stacks_ordering="on" if not self.m_skip_stacks_ordering else "off",
do_refine_hr_mask="on" if self.m_do_refine_hr_mask else "off",
use_auto_masks="on" if self.m_masks_derivatives_dir is None else "off",
custom_masks_dir=self.m_masks_derivatives_dir if self.m_masks_derivatives_dir is not None else None,
sr_resolution=f"{sx} x {sy} x {sz} mm<sup>3</sup>",
sr_json_metadata=sr_json_metadata,
workflow_graph=workflow_image,
sr_png_image=sr_png_image,
motion_report_image=motion_report_image,
version=__version__,
os=f'{platform.system()} {platform.release()}',
python=f'{sys.version}',
openmp_threads=self.openmp_number_of_cores,
nipype_threads=self.nipype_number_of_cores,
jinja_version=__jinja2_version__
)
# Create the report directory if it does not exist
report_dir = os.path.join(final_res_dir, 'report')
os.makedirs(report_dir, exist_ok=True)
# Save the HTML report file
out_report_filename = os.path.join(report_dir, f'{sub_ses}.html')
print(f'\t* Save HTML report as {out_report_filename}...')
with open(out_report_filename, "w+") as file:
file.write(report_html_content)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" 学堂在线课程下载 """
import re
import os
import sys
import json
import requests
from bs4 import BeautifulSoup
# 基本 URL
BASE_URL = 'http://www.xuetangx.com'
# 定义一个全局的会话
CONNECTION = requests.Session()
CONNECTION.headers.update({'User-Agent': 'Mozilla/5.0'})
# 连续两个以上的空白字符正则表达式
REG_SPACES = re.compile(r'\s+')
# Windows 文件名非法字符的正则表达式
REG_FILE = re.compile(r'[\\/:\*\?"<>\|]')
# 排除默认序号
REG_SORT = re.compile(r'^[第一二三四五六七八九十\d]+[\s\d\._\-章课节讲]*[\.\s、\-]\s*\d*')
def get_book(url):
""" 获得所有的 PDF 电子书 """
# 含有导航条的页面
print('正在获取电子书……')
nav_page = CONNECTION.get(url).text
shelves = set(re.findall(r'/courses/.+/pdfbook/\d/', nav_page))
for shelf_count, shelf in enumerate(shelves, 1):
res = CONNECTION.get(BASE_URL + shelf).text
soup = BeautifulSoup(res, 'lxml')
save_dir = os.path.join(BASE_DIR, 'Books', str(shelf_count))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for book_count, book in enumerate(soup.select('#booknav a'), 1):
print('------>', book.string)
file_name = REG_FILE.sub(' ', book.string) + '.pdf'
pdf = CONNECTION.get(BASE_URL + book['rel'][0]).content
with open(os.path.join(save_dir, file_name), 'wb') as pdf_file:
pdf_file.write(pdf)
def get_handout(url):
""" 从课程信息页面获得课程讲义的 HTML 文件 """
res = CONNECTION.get(url).text
soup = BeautifulSoup(res, 'lxml')
handouts = soup.find(class_='handouts')
for link in handouts.select('a[href^="/"]'):
link['href'] = BASE_URL + link['href']
with open(os.path.join(BASE_DIR, 'Handouts.html'), 'w', encoding='utf-8') as handouts_html:
handouts_html.write('<!DOCTYPE html>\n<html>\n<head>\n<title>讲义</title>\n<meta charset="utf-8">\n</head>\n<body>\n%s</body>\n</html>\n' % handouts.prettify())
def get_video(video_id, file_name):
""" 根据视频 ID 和文件名字获取视频信息 """
res = CONNECTION.get('https://xuetangx.com/videoid2source/' + video_id).text
try:
video_url = json.loads(res)['sources']['quality20'][0]
except:
video_url = json.loads(res)['sources']['quality10'][0]
VIDEOS.write(video_url + '\n')
RENAMER.write('REN "' + re.search(r'(\w+-[12]0.mp4)', video_url).group(1) + '" "%s.mp4"\n' % file_name)
def get_content(url):
""" 获取网页详细内容 """
# 获取课件页面(点击进入学习后的页面)
courseware = CONNECTION.get(url).text
soup = BeautifulSoup(courseware, 'lxml')
# 获取所有章的 DOM 节点
chapters = soup.find(id='accordion').find_all(class_='chapter')
for chapter_count, chapter in enumerate(chapters, 1):
# 章的标题
chapter_title = chapter.h3.a.get_text(strip=True)
print('%s' % chapter_title)
OUTLINE.write('%s {%d}\n' % (chapter_title, chapter_count))
# 获取节的信息,包括 URL 等
sections = chapter.select('ul a')
for section_count, section_info in enumerate(sections, 1):
# 节的地址
section_url = BASE_URL + section_info['href']
# 节的标题
section_title = section_info.p.string.strip()
print(' %s' % section_title)
OUTLINE.write(' %s {%d.%d}\n' % (section_title, chapter_count, section_count))
# 每个节的页面
section_page = CONNECTION.get(section_url).text
soup = BeautifulSoup(section_page, 'lxml')
tabs = soup.find(id='sequence-list').find_all('li')
# 视频的编号每一节从 0 开始
video_sec_count = 0
for tab_count, tab_info in enumerate(tabs, 1):
# 每一个 tab(标签)的标题
# title 可能出现换行符和重复,所以用 data-page-title
tab_title = tab_info.a.get('data-page-title')
print(' %s' % tab_title)
OUTLINE.write(' %s {%d.%d.%d}\n' % (tab_title, chapter_count, section_count, tab_count))
# 获取 tab 的序列号
tab_sequence = tab_info.a.get('aria-controls')
# 获取经过编码后的 tab 内容
tab_escape = soup.find(id=tab_sequence).string
tab = BeautifulSoup(tab_escape, 'lxml').div.div
# tab 中的块
blocks = tab.find_all('div', class_='xblock')
for block in blocks:
try:
# 极少数没有 data-type 属性
block_type = block['data-type']
except:
continue
if block_type == 'Problem' or block_type == 'InlineDiscussion' or block_type == 'HTMLModule':
continue
if block_type == 'Video':
video_sec_count += 1
# 替换连续空格或制表符为单个空格
video_name = REG_SPACES.sub(' ', block.h2.string.strip())
OUTLINE.write(' %s {%d.%d.%d}*\n' % (video_name, chapter_count, section_count, video_sec_count))
video_id = block.div['data-ccsource']
# 文件名
file_name = REG_FILE.sub(' ', video_name)
file_name = REG_SORT.sub('', file_name)
file_name = '%d.%d.%d %s' % (chapter_count, section_count, video_sec_count, file_name)
print('------>', file_name)
get_video(video_id, file_name)
# 可用的字幕
subtitle_available_url = BASE_URL + block.div['data-transcript-available-translations-url']
try:
subtitle_available = CONNECTION.get(subtitle_available_url).json()
except:
continue
base_subtitle_url = BASE_URL + block.div['data-transcript-translation-url'] + '/'
if len(subtitle_available) == 1:
multi_subtitle = False
else:
multi_subtitle = True
for subtitle_url in subtitle_available:
if multi_subtitle:
sub_file_name = file_name + '_' + subtitle_url + '.srt'
else:
sub_file_name = file_name + '.srt'
subtitle_url = base_subtitle_url + subtitle_url
CONNECTION.get(subtitle_url)
subtitle = CONNECTION.get(subtitle_available_url.rstrip('available_translations') + 'download').content
with open(os.path.join(BASE_DIR, sub_file_name), 'wb') as subtitle_file:
subtitle_file.write(subtitle)
def start(url, path='', book=True, cookies={}):
""" 流程控制 """
global BASE_DIR, VIDEOS, RENAMER, OUTLINE
requests.utils.add_dict_to_cookiejar(CONNECTION.cookies, cookies)
status = CONNECTION.get('http://www.xuetangx.com/header_ajax')
if status.json()['login']:
print('验证成功!\n')
else:
print('Cookies 失效,请获取新的 cookies!')
sys.exit(1)
# 课程信息页面
about_page = CONNECTION.get(url).text
soup = BeautifulSoup(about_page, 'lxml')
# 获取课程的标题
course_name = soup.find(id='title1').string
# 获取课程的发布者(一般是大学)
institution = soup.find(class_='courseabout_text').a.string
# 可以用于文件夹名字的标题
dir_name = REG_FILE.sub('', course_name + ' - ' + institution)
print(dir_name)
BASE_DIR = os.path.join(path, dir_name)
# 尝试创建文件夹
try:
os.makedirs(BASE_DIR)
except FileExistsError:
pass
# 课件页面地址
courseware = url.rstrip('about') + 'courseware'
# 课程讲义地址
handout = url.rstrip('about') + 'info'
OUTLINE = open(os.path.join(BASE_DIR, 'Outline.txt'), 'w', encoding='utf-8')
VIDEOS = open(os.path.join(BASE_DIR, 'Videos.txt'), 'w', encoding='utf-8')
RENAMER = open(os.path.join(BASE_DIR, 'Rename.bat'), 'w', encoding='utf-8')
RENAMER.write('CHCP 65001\n\n')
if book:
# 使用 handout 作为入口更快
get_book(handout)
get_handout(handout)
get_content(courseware)
VIDEOS.close()
RENAMER.close()
OUTLINE.close()
if __name__ == '__main__':
# start('http://www.xuetangx.com/courses/course-v1:TsinghuaX+00740043X_2015_T2+sp/about', r'F:\MOOCs', True)
pass
|
import torch
import torch.nn as nn
class Embedding(nn.Module):
def __init__(self, dataset, parameter):
super(Embedding, self).__init__()
self.device = parameter['device']
self.ent2id = dataset['ent2id']
self.es = parameter['embed_dim']
num_ent = len(self.ent2id)
self.embedding = nn.Embedding(num_ent, self.es)
if parameter['data_form'] == 'Pre-Train':
self.ent2emb = dataset['ent2emb']
self.embedding.weight.data.copy_(torch.from_numpy(self.ent2emb))
elif parameter['data_form'] in ['In-Train', 'Discard']:
nn.init.xavier_uniform_(self.embedding.weight)
def forward(self, triples):
idx = [[[self.ent2id[t[0]], self.ent2id[t[2]]] for t in batch] for batch in triples]
idx = torch.LongTensor(idx).to(self.device)
return self.embedding(idx)
|
import nltk
import re
import pandas as pd
import numpy as np
from collections import Counter
from nltk.tokenize.api import TokenizerI
from nltk.corpus import words
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from nltk import WordNetLemmatizer
from nltk import pos_tag
class LegalitySyllableTokenizer(TokenizerI):
"""
Syllabifies words based on the Legality Principle and Onset Maximization.
>>> from nltk.tokenize import LegalitySyllableTokenizer
>>> from nltk import word_tokenize
>>> from nltk.corpus import words
>>> text = "This is a wonderful sentence."
>>> text_words = word_tokenize(text)
>>> LP = LegalitySyllableTokenizer(words.words())
>>> [LP.tokenize(word) for word in text_words]
[['This'], ['is'], ['a'], ['won', 'der', 'ful'], ['sen', 'ten', 'ce'], ['.']]
"""
def __init__(self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=.001):
"""
:param tokenized_source_text: List of valid tokens in the language
:type tokenized_source_text: list(str)
:param vowels: Valid vowels in language or IPA represenation
:type vowels: str
:param legal_frequency_threshold: Lowest frequency of all onsets to be considered a legal onset
:type legal_frequency_threshold: float
"""
self.legal_frequency_threshold = legal_frequency_threshold
self.vowels = vowels
self.legal_onsets = self.find_legal_onsets(tokenized_source_text)
def find_legal_onsets(self, words):
onsets = [self.onset(word) for word in words]
legal_onsets = [k for k,v
in Counter(onsets).items()
if (v / len(onsets)) > self.legal_frequency_threshold]
return set(legal_onsets)
def onset(self, word):
onset = ""
for c in word.lower():
if c in self.vowels:
return onset
else:
onset += c
return onset
def tokenize(self, token):
syllables = []
syllable, current_onset = "", ""
vowel, onset = False, False
for char in token[::-1]:
char_lower = char.lower()
if not vowel:
syllable += char
vowel = bool(char_lower in self.vowels)
else:
if char_lower + current_onset[::-1] in self.legal_onsets:
syllable += char
current_onset += char_lower
onset = True
elif char_lower in self.vowels and not onset:
syllable += char
current_onset += char_lower
else:
syllables.append(syllable)
syllable = char
current_onset = ""
vowel = bool(char_lower in self.vowels)
syllables.append(syllable)
syllables_ordered = [syllable[::-1] for syllable in syllables][::-1]
return syllables_ordered
def get_trained_model():
trained_data=pd.read_csv('trained_model_outlier_free.csv')
#neuro_model
X=trained_data[['lexical_diversity','text_lenght','num_words_per_sent','ave_syllabi_per_word','gsl_calculation_ave']]
y=trained_data['target']
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=0)
linear_reg=LinearRegression().fit(X_train,y_train)
return linear_reg
def process_test_data(test_file):
#read test file
test_data=pd.read_csv(test_file)
#refine test data
test_data['excerpt_new']=['. '.join([re.sub(r'\W+',' ',i) for i in nltk.sent_tokenize(text)])
for text in test_data['excerpt']]
# Dimension 1 text lenght
test_data['text_lenght']=test_data['excerpt_new'].str.len()
#Dimention 2 Average Word lenght
test_data['word_lenght_ave']=[sum([len(word) for word in nltk.word_tokenize(i)])/
len([word for word in nltk.word_tokenize(i)]) for i in test_data['excerpt_new']]
#Dimention 3 lexical diversity of a tex: number of tokens/ number of unique tokens
test_data['words_unique']=[len(set([ word for word in nltk.word_tokenize(i) ])) for i in test_data['excerpt_new']]
test_data['words_number']=[len([ word for word in nltk.word_tokenize(i) ]) for i in test_data['excerpt_new']]
test_data['lexical_diversity']=test_data['words_number']/test_data['words_unique']
# Dimention 4 Average number of words per sentence
test_data['words_count']=[len( [word for word in nltk.word_tokenize(i) ]) for i in test_data['excerpt_new']]
test_data['sent_count']=[len(nltk.sent_tokenize(i)) for i in test_data['excerpt_new']]
test_data['num_words_per_sent']=test_data['words_count']/test_data['sent_count']
#Dimention 5 average syllabus per word
Leg_Syl_tok=LegalitySyllableTokenizer(words.words())
test_data['ave_syllabi_per_word']=[sum([len(Leg_Syl_tok.tokenize(word)) for word in nltk.word_tokenize(text)])/
len([i for i in nltk.word_tokenize(text) ]) for text in test_data['excerpt_new']]
# Dimention 6 GSL calculation but first Lemmatization of text
#lemmatize test data
lemmatizer=nltk.WordNetLemmatizer()
test_data['lemmatized']=[[lemmatizer.lemmatize(i,j[0].lower()) if j[0].lower() in ['a','n','v']
else lemmatizer.lemmatize(i).lower()
for i,j in pos_tag(nltk.word_tokenize(text))] for text in test_data['excerpt_new']]
test_data['lemmatized_set']=[list(set(i)) for i in test_data['lemmatized']]
#General Service List: Read
words_corpus=pd.read_csv('NGSL_Spoken_1.01.csv')
#create GSL corpus dictionary
wc_dict={words_corpus['Lemma'][i]:words_corpus['Cumulative'][i] for i in range(0,len(words_corpus['Lemma']))}
# Dimention 6 GSL score averages
test_data['gsl_calculation']=[[wc_dict.get(word,0) for word in wc_dict.keys() if word in i]
for i in test_data['lemmatized_set']]
test_data['gsl_calculation_ave']=[[sum(i)/len(i)][0] for i in test_data['gsl_calculation']]
return test_data[['lexical_diversity','text_lenght','num_words_per_sent','ave_syllabi_per_word','gsl_calculation_ave']]
def get_predictions(test_file):
predict_values=pd.DataFrame([i for i in pd.read_csv(test_file)['id']]).rename(columns={0:'id'})
predict_values['target']=[round (i,1) for i in get_trained_model().predict(process_test_data(test_file))]
return predict_values.to_csv('submission.csv',index=False)
|
"""helfeedback URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.generic.base import RedirectView
# from feedback.api import APIRouter
from feedback.views import FeedbackView, FeedbackCreate
from django.views.generic import TemplateView
admin.autodiscover()
# router = APIRouter()
urlpatterns = [
url(r'^admin/', admin.site.urls),
# url(r'^v1/', include(router.urls)),
url(r'^v1/', FeedbackView.as_view()),
url(r'add', FeedbackCreate.as_view(), name='feedback-add'),
url(r'thankyou', xframe_options_exempt(TemplateView.as_view(
template_name='feedback/feedback_thankyou.html')), name='feedback-thankyou'),
url(r'^$', RedirectView.as_view(url='v1/')),
]
|
# Python Program to find the area of triangle
a = float(input("Enter first side: "))
b = float(input("Enter second side: "))
c = float(input("Enter third side: "))
# calculate the semi-perimeter
s = (a + b + c) / 2
# calculate the area
area = (s * (s - a) * (s - b) * (s - c)) ** 0.5
print("The area of the triangle is %0.2f" % area)
|
from math import asin,sin
from matplotlib import pyplot as plt
from pylab import linspace
from qc import Calc
#构建点集
h = linspace(0.01, 17, 10000)
#初始化函数
a = Calc()
a.set_raw_values(u1=0, hm=17)
#计算h/hm
y = []
for i in linspace(0.01, 17, 10000):
y.append(i/a.get_raw_values('hm'))
#计算D光
LD = []
DetalLD = []
a.set_raw_values(r1=105.1175, r2=-74.7353, r3= -215.38763374564763)
a.set_raw_values(d1=5.32, d2=2.5)
a.set_raw_values(n1=1, n1s=1.51633, n2=1.51633, n2s=1.6727, n3=1.6727, n3s=1)
a.do_update()
ld = a.get_raw_values('l')
for i in h:
L3s = a.get_L3s(i)
DetalLD.append(L3s-ld)
LD.append(L3s)
print("h/hm=0.707时球差:",a.get_L3s(0.707*a.get_raw_values('hm'))-ld)
print("h/hm=1时球差:",a.get_L3s(a.get_raw_values('hm'))-ld)
#计算C光
LC = []
DetalLC = []
a.set_raw_values(n1=1, n1s=1.51385, n2=1.51385, n2s=1.66662, n3=1.66662, n3s=1)
a.do_update()
lc = a.get_raw_values('l')
for i in h:
L3s = a.get_L3s(i)
DetalLC.append(L3s-ld)
LC.append(L3s)
L3csh = a.get_L3s(a.get_raw_values('hm'))
L3cs = a.get_L3s(0.707*a.get_raw_values('hm'))
#计算F光
LF = []
DetalLF = []
a.set_raw_values(n1=1, n1s=1.52191, n2=1.52191, n2s=1.68749, n3=1.68749, n3s=1)
a.do_update()
lf = a.get_raw_values('l')
for i in h:
L3s = a.get_L3s(i)
DetalLF.append(L3s-ld)
LF.append(L3s)
L3fs = a.get_L3s(0.707*a.get_raw_values('hm'))
L3fsh = a.get_L3s(a.get_raw_values('hm'))
hmsc = L3fsh - L3csh
now_sc = L3fs - L3cs
fc = ((0.707 * a.get_raw_values('hm')) / a.get_mid_values('sinU3s')) - (17 / a.get_mid_values('u3s'))
osc = (fc - now_sc) / (17 / a.get_mid_values('u3s'))
fc2 = ((a.get_raw_values('hm')) / a.get_mid_values('sinU3s')) - (17 / a.get_mid_values('u3s'))
osc2 = (fc2 - hmsc) / (17 / a.get_mid_values('u3s'))
print(r"h/hm = 0.707时色差:",now_sc)
print(r"h/hm = 0.707时正弦差:",osc)
print(r"h/hm = 0.1时色差:",hmsc)
print(r"h/hm = 0.1时正弦差:",osc2)
#计算色差
SC = []
for i in range(len(h)):
SC.append(LF[i] - LC[i])
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(DetalLD,y,'y')
axes.plot(DetalLC,y,'r')
axes.plot(DetalLF,y,'b')
#坐标轴
x2 = linspace(0,0,1000)
y2 = linspace(-0.1,1.2,1000)
plt.plot(x2,y2 ,color='black',linewidth=0.7)
axes.set_xlabel('δL^‘ (mm)')
axes.set_ylabel(' h/hm (mm)')
fig2 = plt.figure()
axes2 = fig2.add_axes([0.1, 0.1, 0.8, 0.8])
axes2.plot(SC,y,'b')
plt.plot(x2,y2 ,color='black',linewidth=0.7)
axes.set_xlabel('δL^‘ (mm)')
axes.set_ylabel(' h/hm (mm)')
plt.show()
|
""" Class for IGMSurvey
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import json
from abc import ABCMeta
import warnings
import pdb
from collections import OrderedDict
from astropy import coordinates as coords
from astropy.io import ascii
from astropy import units as u
from astropy.table import Column, Table, vstack
from astropy.units.quantity import Quantity
from astropy.stats import poisson_conf_interval as aspci
from astropy.coordinates import SkyCoord, match_coordinates_sky
from linetools.isgm import utils as ltiu
from pyigm.abssys.igmsys import IGMSystem
from pyigm.abssys.utils import class_by_type
from pyigm import utils as pyigmu
try:
unic = unicode
except:
unic = str
class IGMSurvey(object):
""" Class for a survey of absorption line systems.
Attributes
----------
abs_type : str, unicode
Type of Absorption system (DLA, LLS)
ref : str, optional
Reference(s) to the Survey
_abs_sys : list
List of AbsSystem objects
mask : bool array, optional
Defines a subset of the systems (e.g. statistical)
sightlines : Table, optional
Table of the sightlines in the survey
_data : Table, optional
Table of 'key' data
_dict : OrderedDict, optional
Nested data
"""
__metaclass__ = ABCMeta
@classmethod
def from_flist(cls, flist, tree=None, **kwargs):
""" Read from list of .dat files (historical JXP format)
Parameters
----------
flist : str
ASCII file including list of .dat files
tree : str, optional
Path to .dat files
kwargs :
Passed to __init__
"""
if tree is None:
tree = ''
# Load up (if possible)
data = ascii.read(tree+flist, data_start=0,
guess=False, format='no_header')
slf = cls(**kwargs)
slf.tree = tree
slf.flist = flist
# Load up
slf.dat_files = list(data['col1'])
# Generate IGMSys list
for dat_file in slf.dat_files:
slf._abs_sys.append(class_by_type(slf.abs_type).from_datfile(dat_file, tree=slf.tree))
print('Read {:d} files from {:s} in the tree {:s}'.format(
slf.nsys, slf.flist, slf.tree))
# Mask
slf.init_mask()
return slf
@classmethod
def from_sfits(cls, summ_fits, coords=None, **kwargs):
"""Generate the Survey from a summary FITS file or Table
Handles SPEC_FILES too.
Parameters
----------
summ_fits : str or Table
Summary FITS file
coords : SkyCoord array
Contains all the coords for all the systems
**kwargs : dict
passed to __init__
"""
# Init
slf = cls(**kwargs)
# Read
if isinstance(summ_fits, Table):
systems = summ_fits
else:
systems = Table.read(summ_fits)
nsys = len(systems)
# Special keys
kdict = dict(NHI=['NHI', 'logNHI', 'LOG_NHI'],
sig_NHI=['sig(logNHI)', 'SIGNHI', 'NHI_ERR'],
name=['Name'], vlim=['vlim'],
zabs=['Z_LLS', 'ZABS', 'zabs'],
zem=['Z_QSO', 'QSO_ZEM', 'ZEM', 'Z_EM'],
RA=['RA'], DEC=['DEC', 'Dec'])
# Parse the Table to make uniform the keys used
for key in kdict.keys():
for ikey in kdict[key]:
if ikey in systems.keys():
if ikey == key:
pass
else:
systems.rename_column(ikey, key)
# Set
slf._data = systems
# vlim
if 'vlim' not in slf._data.keys():
default_vlim = [-1000, 1000.]* u.km / u.s
slf._data['vlim'] = [default_vlim]*nsys
# Coords
if coords is None:
coords = SkyCoord(ra=slf._data['RA'], dec=slf._data['DEC'], unit='deg')
slf.coords = coords
# Mask
slf.mask = None
slf.init_mask()
# Return
return slf
def __init__(self, abs_type, ref='', verbose=False):
""" Initiator
Parameters
----------
abs_type : str, unicode
Type of IGMSystem in the Survey, e.g. MgII, DLA, LLS
ref : string, optional
Reference(s) for the survey
"""
self.abs_type = abs_type
self.ref = ref
self._abs_sys = []
self._data = Table()
self._dict = OrderedDict()
self.sightlines = None
self.coords = None # Intended to be a SkyCoord obj with *all* of the system coordinates
self.verbose=verbose
# Mask
self.mask = None
self.init_mask()
# Init
self.flist = None
@property
def nsys(self):
""" Number of systems
Returns
-------
nsys : int
Number of statistical if mask is set
"""
if self.mask is not None:
return np.sum(self.mask)
elif len(self._data) > 0:
return len(self._data)
elif len(self._dict) > 0:
return len(self._dict)
else:
return len(self._abs_sys)
def sys_idx(self, abssys_name):
""" System index"""
# Index
try:
idx = list(self._dict.keys()).index(abssys_name)
except ValueError:
raise ValueError("System {:s} is not in the _dict".format(abssys_name))
# Return
return idx
def abs_sys(self, inp, fill_coord=True):
""" Return an abs_system by index from the *masked* set
Instantiate as needed
Returns
-------
inp : int
"""
# Mask?
if self.mask is not None:
idx = np.where(self.mask)[0][inp]
else:
idx = inp
# Pull the system
isys = self._abs_sys[idx]
# Add coord
if fill_coord:
isys.coord = self.coords[idx]
return isys
def init_abs_sys(self, clobber=False):
""" Initialize the abs_sys list
"""
if (len(self._abs_sys) == 0) or clobber:
self._abs_sys = [None]*self.nsys
else:
warnings.warn("abs_sys list is already initialized. Use clobber=True to reset")
def init_mask(self):
""" Initialize the mask for abs_sys
"""
if self.nsys > 0:
self.mask = np.array([True]*self.nsys)
def add_abs_sys(self, abs_sys):
""" Add an IGMSys to the Survey
Enables one to add checks
Parameters
----------
abs_sys : IGMSystem
"""
assert self.chk_abs_sys(abs_sys)
# Might check to see if a duplicate exists..
# Append
self._abs_sys.append(abs_sys)
def binned_loz(self, zbins, NHI_mnx=(20.3, 23.00), debug=False):
""" Calculate l(z) empirically in zbins for an interval in NHI
Wrapper on lox
Parameters
----------
zbins : list
Defines redshift intervals
e.g. [2., 2.5, 3., 4.]
NHI_mnx : tuple, optional
min/max of NHI for evaluation
Returns
-------
lz, sig_lz_lower, sig_lz_upper : ndarray
"""
return self.binned_lox(zbins, NHI_mnx=NHI_mnx, use_Dz=True, debug=debug)
def binned_lox(self, zbins, NHI_mnx=(20.3, 23.00), use_Dz=False, debug=False):
""" Calculate l(X) in zbins for an interval in NHI
Parameters
----------
zbins : list
Defines redshift intervals
e.g. [2., 2.5, 3., 4.]
NHI_mnx : tuple, optional
min/max of NHI for evaluation
use_gz : bool, optional
Use gz instead of gX.
This forces the calculation of l(z) instead of l(X)
Returns
-------
lX, sig_lX_lower, sig_lX_upper : ndarray
"""
# assign the nhbins
nhbins = np.array(NHI_mnx)
# generate the fN components
fncomp = self.__generate_fncomp__(nhbins, zbins)
# get the absorption path length
dXtot = self.__find_dXtot__(zbins, calc_Dz=use_Dz)
# total number of absorbers + poisson uncertainty
Ntot = fncomp.sum(axis=0)
Nunc = aspci(Ntot, interval='frequentist-confidence')
# l(X)
if debug:
pdb.set_trace()
lX = Ntot / dXtot
lX_lo = Nunc[0, :] / dXtot
lX_hi = Nunc[1, :] / dXtot
return lX, lX - lX_lo, lX_hi - lX
def binned_fn(self, nhbins, zbins, log=False):
""" Calculate f(N,X) empirically in bins of NHI and z
Parameters
----------
nhbins : list
zbins : list
log : bool, optional
Report log10 values?
Returns
-------
fn : ndarray
log10 f(N,X)
fn_lo : ndarray
error in fn (low side)
fn_hi : ndarray
error in fn (high side)
"""
# generate the fN components
fncomp = self.__generate_fncomp__(nhbins, zbins)
# calculate the uncertainty on the bins
fnunc = aspci(fncomp, interval='frequentist-confidence')
# get the absorption path length
dXtot = self.__find_dXtot__(zbins)
# find the nhi bin size
dNHI = np.power(10, nhbins[1:]) - np.power(10, nhbins[:-1])
# calculate the fN values
fn = np.transpose(np.transpose(fncomp / dXtot) / dNHI)
fn_lo = np.transpose(np.transpose(fnunc[0] / dXtot) / dNHI)
fn_hi = np.transpose(np.transpose(fnunc[1] / dXtot) / dNHI)
if log:
return np.log10(fn), np.log10(fn) - np.log10(fn_lo), np.log10(fn_hi) - np.log10(fn)
else:
return fn, fn - fn_lo, fn_hi - fn
def build_all_abs_sys(self, linelist=None, **kwargs):
""" Build all of the AbsSystem objects from the _dict
or _data if the _dict does not exist!
In that order
Parameters
----------
linelist : LineList, optional
**kwargs : Passed to build_abs_sys_from_dict
"""
# This speeds things up a bunch
if linelist is None:
linelist = default_linelist(self.verbose)
# Loop me
if len(self._dict) > 0:
print("Starting the AbsSystem build for the _dict. Be patient..")
for key in self._dict.keys():
_ = self.build_abs_sys_from_dict(key, linelist=linelist, **kwargs)
elif len(self._data) > 0:
for qq in range(self.nsys):
_ = self.build_abs_sys_from_data(qq)
else:
raise IOError("Nothing to build the systems with!")
# Return
print("Done!")
return
def build_abs_sys_from_data(self, row):
""" Build an AbsSystem from the _data
The item in self._abs_sys is filled and
the system is also returned
Parameters
----------
row : int
Row of the _data table
Ignores any masking -- this may change
Returns
-------
abs_sys : AbsSystem
"""
# vlim -- may make optional
vlim=self._data['vlim'][row]
if self._data['vlim'].unit is not None:
vlim *= self._data['vlim'].unit
else:
vlim = vlim * u.km/u.s
# skwargs
skwargs = {}
for key in ['NHI', 'sig_NHI', 'name', 'zem']:
if key in self._data.keys():
skwargs[key] = self._data[key][row]
# Instantiate
abssys = class_by_type(self.abs_type)(self.coords[row], self._data['zabs'][row], vlim, **skwargs)
# Fill
if len(self._abs_sys) == 0:
self.init_abs_sys()
self._abs_sys[row] = abssys
# Return too
return abssys
def build_abs_sys_from_dict(self, abssys_name, **kwargs):
""" Build an AbsSystem from the _dict
The item in self._abs_sys is filled and
the systems is also returned
Parameters
----------
abssys_name : str
Needs to match a key in the dict
**kwargs
Passed to components_from_dict()
Returns
-------
abs_sys : AbsSystem
"""
# Index
idx = self.sys_idx(abssys_name)
# Instantiate
abssys = class_by_type(self.abs_type).from_dict(self._dict[abssys_name],
coord=self.coords[idx],
**kwargs)
# Fill
if len(self._abs_sys) == 0:
self.init_abs_sys()
self._abs_sys[idx] = abssys
# Return too
return abssys
def calculate_gz(self, zstep=1e-4, zmin=None, zmax=None, key_ZS='Z_START'):
""" Uses sightlines table to generate a g(z) array
Parameters
----------
zstep : float, optional
Step size for g(z) array
zmin : float, optional
Minimum redshift of evaluated array. Default is minimum in the sightlines
zmax : float, optional
Maximum redshift of evaluated array. Default is maximum in the sightlines
Returns
-------
zeval : ndarray
Redshifts where g(z) is evaluate
gz : ndarray
g(z)
"""
if self.sightlines is None:
raise IOError("calculate_gz: Need to set sightlines table")
# zeval
if zmin is None:
zmin = np.min(self.sightlines[key_ZS])
if zmax is None:
zmax = np.max(self.sightlines['Z_END'])
zeval = np.arange(zmin, zmax, step=zstep)
gz = np.zeros_like(zeval).astype(int)
# Evaluate
for row in self.sightlines:
gd = (zeval >= row[key_ZS]) & (zeval <= row['Z_END'])
gz[gd] += 1
# Return
return zeval, gz
def chk_abs_sys(self, abs_sys):
""" Preform checks on input abs_sys
Parameters
----------
abs_sys : IGMSystem
Returns
-------
bool
"""
if not isinstance(abs_sys, IGMSystem):
raise IOError("Must be an IGMSystem object")
return True
def components_from_dict(self, abssys_name, coord=None, linelist=None):
""" Build and return a list of AbsComponent objects
from the dict for a given system
Parameters
----------
abssys_name : str
coord : SkyCoord, optional
coordinates to use for the components
linelist : LineList, optional
Returns
-------
compllist : list of AbsComponent objects
"""
# Do it
if linelist is None:
linelist = default_linelist(self.verbose)
# Components
comps = ltiu.build_components_from_dict(self._dict[abssys_name],
coord=coord, linelist=linelist)
# Return
return comps
def data_from_dict(self):
""" Generate the data Table from the internal dict
"""
from astropy.table import Column
# Table columns
key0 = list(self._dict.keys())[0]
tab_clms = list(self._dict[key0].keys())
# Remove unwanted ones
rmv_keys = ['abs_type', 'components', 'kin', 'Refs']
for rkey in rmv_keys:
if rkey in tab_clms:
tab_clms.remove(rkey)
# Build it
for tclm in tab_clms:
values = []
for key in self._dict.keys():
values.append(self._dict[key][tclm])
# Add column
clm = Column(values, name=tclm)
self._data.add_column(clm)
def fill_ions(self, use_Nfile=False, jfile=None, use_components=False,
verbose=True):
""" Loop on systems to fill in _ionN Table
Parameters
----------
jfile : str, optional
JSON file containing the information
use_Nfile : bool, optional
Use (historic) .clm files?
use_components : bool, optional
Load up the Table with components (recommended)
"""
if jfile is not None:
# Load
with open(jfile) as data_file:
ions_dict = json.load(data_file)
# Loop on systems
for abs_sys in self._abs_sys:
abs_sys.get_ions(idict=ions_dict[abs_sys.name])
elif use_Nfile:
for abs_sys in self._abs_sys:
abs_sys.get_ions(use_Nfile=True, verbose=verbose)
elif use_components:
for abs_sys in self._abs_sys:
abs_sys._ionN = ltiu.table_from_complist(abs_sys._components)
else:
raise ValueError("Not sure how to load the ions")
# Get ions
def ions(self, Zion, Ej=0., skip_null=True, pad_with_nulls=False):
""" Generate a Table of columns and so on
Restrict to those systems where flg_clm > 0
Parameters
----------
Zion : tuple
Z, ion e.g. (6,4) for CIV
Ej : float [1/cm]
Energy of the lower level (0. is resonance)
skip_null : boolean (False)
Skip systems without an entry, else pad with zeros
pad_with_nulls : bool, optional
Pad missing/null systems with empty values. A bit risky
Returns
-------
tbl : MaskedTable of values for the Survey
Systems without the ion have rows masked
"""
from linetools.abund.ions import ion_to_name
if self._abs_sys[0]._ionN is None:
raise IOError("ionN tables are not set. Use fill_ionN")
# Loop me!
tbls = []
names = []
for kk,abs_sys in enumerate(self._abs_sys):
if len(abs_sys._ionN) == 0:
names.append('MASK_ME')
tbls.append(None)
continue
# Parse
mt = (abs_sys._ionN['Z'] == Zion[0]) & (abs_sys._ionN['ion'] == Zion[1]) & (
abs_sys._ionN['Ej'] == Ej)
if np.any(mt):
if np.sum(mt) > 1: # Generally should not get here
warnings.warn("Two components for ion {} for system {}. Taking the first one".format(Zion, abs_sys))
mt[np.where(mt)[0][1:]] = False
tbls.append(abs_sys._ionN[mt])
names.append(abs_sys.name)
else:
if skip_null is True: # This is probably dangerous
continue
else:
if pad_with_nulls:
nulltbl = abs_sys._ionN[:0].copy()
datatoadd = [abs_sys.coord.ra.deg,abs_sys.coord.dec.deg,
'none',Zion[0],Zion[1],Ej,
abs_sys.limits.vmin.value,
abs_sys.limits.vmax.value,
ion_to_name(Zion),0,0,0,'','none',abs_sys.zabs]
nulltbl['ion_name'].dtype = '<U6'
nulltbl.add_row(datatoadd)
tbls.append(nulltbl)
names.append(abs_sys.name)
else:
tbls.append(None)
names.append('MASK_ME')
# Fill in the bad ones
names = np.array(names)
idx = np.where(names != 'MASK_ME')[0]
if len(idx) == 0:
warnings.warn("There were no entries matching your input Ion={}".format(Zion))
return None
bad = np.where(names == 'MASK_ME')[0]
for ibad in bad:
tbls[ibad] = tbls[idx[0]]
# Stack me
try:
tbl = vstack(tbls)
except:
pdb.set_trace()
tbl['abssys_name'] = names
# Mask
tbl = Table(tbl, masked=True)
mask = names == 'MASK_ME'
for key in tbl.keys():
if key == 'flag_N':
tbl[key][mask] = 0
else:
tbl[key].mask = mask
'''
#
keys = [u'abssys_name', ] + list(self._abs_sys[kk]._ionN.keys())
t = Table(self._abs_sys[kk]._ionN[0:1]).copy() # Avoids mixin trouble
t.add_column(Column(['dum']*len(t), name='name', dtype='<U32'))
t = t[keys]
if 'Ej' not in keys:
warnings.warn("Ej not in your ionN table. Ignoring. Be careful..")
# Loop on systems (Masked)
for abs_sys in self._abs_sys:
# Grab
if 'Ej' in keys:
mt = ((abs_sys._ionN['Z'] == iZion[0])
& (abs_sys._ionN['ion'] == iZion[1])
& (abs_sys._ionN['Ej'] == Ej))
else:
mt = ((abs_sys._ionN['Z'] == iZion[0])
& (abs_sys._ionN['ion'] == iZion[1]))
if np.sum(mt) == 1:
irow = abs_sys._ionN[mt]
# Cut on flg_clm
if irow['flag_N'] > 0:
row = [abs_sys.name] + [irow[key][0] for key in keys[1:]]
t.add_row(row) # This could be slow
else:
if skip_null is False:
row = [abs_sys.name] + [0 for key in keys[1:]]
t.add_row(row)
elif np.sum(mt) == 0:
if skip_null is False:
row = [abs_sys.name] + [0 for key in keys[1:]]
t.add_row( row )
continue
else:
pdb.set_trace()
raise ValueError("Multple entries")
'''
# Reorder
all_keys = list(tbl.keys())
all_keys.remove('abssys_name')
all_keys = ['abssys_name']+all_keys
# Return
return tbl[all_keys]
def trans(self, inp):
""" Generate a Table of Data on a given transition, e.g. SiIII 1206
Parameters
----------
inp : str or Quantity
str -- Name of the transition, e.g. 'CII 1334'
Quantity -- Rest wavelength of the transition, e.g. 1334.53*u.AA
to 0.01 precision
Returns
-------
tbl : astropy.Table
"""
attrib = ['sys', 'z', 'flag_EW', 'EW', 'sig_EW', 'flag_N', 'logN', 'sig_logN']
nattrib = len(attrib)
clms = []
for ii in range(nattrib):
clms.append([])
for abs_sys in self._abs_sys:
# Name
clms[0].append(abs_sys.name)
#
aline = abs_sys.get_absline(inp)
if aline is None:
for jj in range(1,nattrib):
clms[jj].append(0)
else:
for jj in range(1,nattrib):
try: # Deal with Quantity
clms[jj].append(aline.attrib[attrib[jj]].value)
except AttributeError:
clms[jj].append(aline.attrib[attrib[jj]])
except KeyError:
clms[jj].append(0)
# Generate the Table
tbl = Table(clms, names=attrib)
# Return
return tbl
# Mask
def update_mask(self, mask, increment=False):
""" Update the Mask for the abs_sys
Parameters
----------
mask : array (usually Boolean)
Mask of systems
increment : bool, optional
Increment the mask (i.e. keep False as False)
"""
if len(mask) == len(self._abs_sys): # Boolean mask
if increment is False:
self.mask = mask
else:
self.mask = self.mask & mask
else:
raise ValueError('abs_survey: Needs developing!')
def write_survey(self, outfile='tmp.tar', tmpdir = 'IGM_JSON', chk_dict=True):
""" Generates a gzipped tarball of JSON files, one per system
Parameters
----------
outfile : str, optional
Output filename
tmpdir : str, optional
chk_dict : bool, optional
Check that the _dict matches what is in the _abs_sys list
Returns
-------
"""
import os, io
import subprocess
try:
os.mkdir(tmpdir)
except OSError:
pass
jfiles = []
# Loop on systems
for igm_abs in self._abs_sys:
# Dict
idict = igm_abs.to_dict()
# Temporary JSON file
json_fil = tmpdir+'/'+igm_abs.name+'.json'
jfiles.append(json_fil)
with io.open(json_fil, 'w', encoding='utf-8') as f:
f.write(unic(json.dumps(idict, sort_keys=True, indent=4,
separators=(',', ': '))))
# Tar
subprocess.call(['tar', '-czf', outfile, tmpdir])
print('Wrote: {:s}'.format(outfile))
# Clean up
for jfile in jfiles:
try:
os.remove(jfile)
except OSError: # Likely a duplicate. This can happen
pass
os.rmdir(tmpdir)
def __generate_fncomp__(self, nhbins, zbins):
""" Generate binned evaluation of f(NHI,X)
Parameters
----------
nhbins : list
Defines NHI bins for f(NHI,X) evaluation, e.g.
[20.3, 20.6, 21.0, 21.5, 23.]
zbins : list
Returns
-------
fncomp : ndarray
f(NHI,X)
"""
# calculate the total absorption path length g(X) from g(z)
#z, gX = self.__calculate_gX__()
# create the fn array
zabs = self.__getattr__('zabs')
nhi = self.__getattr__('NHI')
fncomp = np.histogram2d(nhi, zabs, bins=[nhbins, zbins])[0]
return fncomp
def __find_dXtot__(self, zbins, calc_Dz=False):
""" Calculate DX in zbins
Parameters
----------
zbins : list
calc_Dz : bool, optional
Return Dztot instead of DXtot
Returns
-------
dXtot : ndarray
dX for the full survey
"""
# get z, g(z)
z, gz = self.calculate_gz()
dz = z[1] - z[0]
#
if not calc_Dz:
dXdz = pyigmu.cosm_xz(z, cosmo=self.cosmo, flg_return=1)
else:
dXdz = 1.
dXtot = np.zeros(len(zbins) - 1)
for kk in range(len(zbins) - 1):
# the indices of values within the redshift range
idx = np.where((z >= zbins[kk]) & (z < zbins[kk + 1]))
dXtot[kk] = np.sum((gz*dz*dXdz)[idx])
return dXtot
def __getattr__(self, k):
""" Generate an array of attribute 'k' from the IGMSystems
NOTE: We only get here if the Class doesn't have this attribute set already
The Mask will be applied
Order of search is:
_data
_dict
_abs_sys
Parameters
----------
k : str
Attribute
Returns
-------
numpy array
"""
# Special case(s)
if k == 'coord':
lst = self.coords
if self.mask is not None:
return lst[self.mask]
else:
return lst
elif k in self._data.keys(): # _data
lst = self._data[k]
else:
lst = None
# Now try _dict
if lst is None:
if len(self._dict) > 0:
if k in next(iter(self._dict.items()))[1].keys():
lst = [self._dict[key][k] for key in self._dict.keys()]
# AbsSystem last!
if lst is None:
if len(self._abs_sys) == 0:
raise ValueError("Attribute does not exist anywhere!")
try:
lst = [getattr(abs_sys, k) for abs_sys in self._abs_sys]
except ValueError:
raise ValueError("Attribute does not exist")
# Recast as an array
return lst_to_array(lst, mask=self.mask)
def __add__(self, other, toler=2*u.arcsec):
""" Combine one or more IGMSurvey objects
Routine does a number of checks on the abstype,
the uniqueness of the sightlines and systems, etc.
Parameters
----------
other : IGMSurvey
toler : Angle or Quantity
Tolerance for uniqueness
Returns
-------
combined : IGMSurvey
"""
# Check the Surveys are the same type
if self.abs_type != other.abs_type:
raise IOError("Combined surveys need to be same abs_type")
# Init
combined = IGMSurvey(self.abs_type)
if self.ref is not None:
combined.ref = self.ref + ',' + other.ref
else:
combined.red = None
# Check for unique systems, including masked ones
other_coord = other.coord
for abssys in self._abs_sys:
if np.sum((abssys.coord.separation(other_coord) < toler) & (
np.abs(abssys.zabs-other.zabs) < (1000*(1+abssys.zabs)/3e5))) > 0:
raise NotImplementedError("Need ready to deal with this")
# Combine systems
combined._abs_sys = self._abs_sys + other._abs_sys
if self.mask is not None:
combined.mask = np.concatenate((self.mask, other.mask)).flatten()
else:
combined.mask = None
combined.init_mask()
combined._data = vstack([self._data, other._data])
# Sightlines?
if self.sightlines is not None:
slf_scoord = SkyCoord(ra=self.sightlines['RA'],
dec=self.sightlines['DEC'], unit='deg')
oth_scoord = SkyCoord(ra=other.sightlines['RA'],
dec=other.sightlines['DEC'], unit='deg')
idx, d2d, d3d = coords.match_coordinates_sky(slf_scoord,
oth_scoord, nthneighbor=1)
mt = d2d < toler
if np.sum(mt) > 0:
# Take sightlines from the first survey
warnings.warn("Overlapping sightlines. Am using those in your first entry")
msk = np.array([True]*len(other.sightlines))
msk[idx[mt]] = False
combined.sightlines = vstack([self.sightlines, other.sightlines[msk]])
print("You should probably regenerate the system mask!")
else:
# Combine systems
combined.sightlines = vstack([self.sightlines,
other.sightlines])
# Coords
combined.coords = SkyCoord(ra=combined._data['RA'],
dec=combined._data['DEC'], unit='deg')
# Return
return combined
def __repr__(self):
if self.flist is not None:
return '<IGMSurvey: {:s} {:s}, nsys={:d}, type={:s}, ref={:s}>'.format(
self.tree, self.flist, self.nsys, self.abs_type, self.ref)
else:
repr = '<IGMSurvey: nsys={:d}, type={:s}, ref={:s}'.format(
self.nsys, self.abs_type, self.ref)
if self.sightlines is not None:
repr = repr + ', nsightlines={:d}'.format(len(self.sightlines))
repr = repr +'>'
return repr
class GenericIGMSurvey(IGMSurvey):
"""A simple absorption line survey
"""
def __init__(self, **kwargs):
IGMSurvey.__init__(self, 'Generic', **kwargs)
def lst_to_array(lst, mask=None):
""" Simple method to convert a list to an array
Allows for a list of Quantity objects
Parameters
----------
lst : list
Should be number or Quantities
mask : boolean array, optional
Returns
-------
array or Quantity array
"""
if mask is None:
mask = np.array([True]*len(lst))
if isinstance(lst[0], Quantity):
return Quantity(lst)[mask]
else:
return np.array(lst)[mask]
# Generate the Table
tbl = Table(clms, names=attrib)
# Return
return tbl
def default_linelist(verbose=True):
from linetools.lists.linelist import LineList
if verbose:
print("No LineList input. Assuming you want the ISM list")
linelist = LineList('ISM')
return linelist
|
'''
Created on Feb 19, 2011
Use this module to start Arelle in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
from arelle.XPathParser import parser_unit_test
parser_unit_test()
|
from functools import update_wrapper
import re
class Predicate(object):
"""A Predicate class that represents combining predicates with & and |"""
def __init__(self, predicate):
self.pred = predicate
def __call__(self, obj):
return self.pred(obj)
def __copy_pred(self):
return copy.copy(self.pred)
def __and__(self, predicate):
def func(obj):
return self.pred(obj) and predicate(obj)
return Predicate(func)
def __or__(self, predicate):
def func(obj):
return self.pred(obj) or predicate(obj)
return Predicate(func)
def predicate(func):
'''Decorator for making Predicates.'''
result = Predicate(func)
update_wrapper(result, func)
return result
def has_tag(tag):
def has_tag(q):
'''Return true if question has a specific tag.'''
return tag in q.tags
p = Predicate(has_tag)
update_wrapper(p,has_tag)
return p
def has_matching_tag(pattern):
def has_matching_tag(q):
'''Return true if question has a tag matching a pattern.'''
for tag in q.tags:
if re.match(pattern,tag):
return True
return False
p = Predicate(has_matching_tag)
update_wrapper(p,has_matching_tag)
return p
|
import os
import sys
from itertools import product
filename = __file__[:-5] + '-input'
with open(filename) as f:
lines = f.read().splitlines()
player_positions = list(map(lambda line: int(line[-1]), lines))
turn_rolls = product([1,2,3], repeat=3)
turn_sums = list(map(sum, turn_rolls))
turn_sum_set = set(turn_sums)
turn_sum_occurances = {
s: turn_sums.count(s) for s in turn_sum_set
}
found_outcomes = {}
def player_turn(player_index, player_positions, player_scores):
outcome = [0, 0]
if (player_index, tuple(player_positions), tuple(player_scores)) in found_outcomes:
return found_outcomes[(player_index, tuple(player_positions), tuple(player_scores))]
for roll_sum in turn_sum_set:
local_scores = player_scores[:]
local_positions = player_positions[:]
local_positions[player_index] = (local_positions[player_index] + roll_sum)%10
local_scores[player_index] += local_positions[player_index] if local_positions[player_index] != 0 else 10
local_outcome = [0, 0]
if local_scores[player_index] > 20:
local_outcome[player_index] = turn_sum_occurances[roll_sum]
else:
local_outcome = list(map(lambda out: out*turn_sum_occurances[roll_sum], player_turn((player_index+1)%2, local_positions, local_scores)))
outcome[0] += local_outcome[0]
outcome[1] += local_outcome[1]
found_outcomes[(player_index, tuple(player_positions), tuple(player_scores))] = outcome
return outcome
outcome = player_turn(0, player_positions[:], [0, 0])
print(max(outcome))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as dist
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from torchvision import datasets, transforms
import math
from numpy import prod
from .vae import VAE
from pvae.utils import Constants
from pvae.distributions import RiemannianNormal, WrappedNormal
from torch.distributions import Normal
from pvae import manifolds
from .architectures import EncLinear, DecLinear, EncWrapped, DecWrapped, EncMob, DecMob, DecGeo, DecBernouilliWrapper, EncWrappedConv, DecWrappedConv, EncLinearConv, DecLinearConv, DecGyroConv
from pvae.dataloaders.toy_sampled_triplet_dataset import ToySampledTripletDataset
from pvae.dataloaders.toy_sampled_triplet_test_dataset import ToySampledTripletTestDataset
width = 10
height = 10
depth = 10
data_size = torch.Size([1, width, height, depth])
class ToySampledTripletConv(VAE):
def __init__(self, params):
c = nn.Parameter(params.c * torch.ones(1), requires_grad=False)
manifold = getattr(manifolds, params.manifold)(params.latent_dim, c)
super(ToySampledTripletConv, self).__init__(
eval(params.prior), # prior distribution
eval(params.posterior), # posterior distribution
dist.Normal, # likelihood distribution
eval('Enc' + params.enc)(manifold, data_size, getattr(nn, params.nl)(), params.num_hidden_layers, params.hidden_dim, params.prior_iso),
eval('Dec' + params.dec)(manifold, data_size, getattr(nn, params.nl)(), params.num_hidden_layers, params.hidden_dim),
params
)
self.manifold = manifold
self.c = c
self._pz_mu = nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False)
self._pz_logvar = nn.Parameter(torch.zeros(1, 1), requires_grad=params.learn_prior_std)
self.modelName = 'Toy'
'''for child in self.dec.children():
for param in child.parameters():
param.requires_grad = False'''
@property
def pz_params(self):
return self._pz_mu.mul(1), F.softplus(self._pz_logvar).div(math.log(2)).mul(self.prior_std), self.manifold
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
train_loader = DataLoader(
ToySampledTripletDataset(width, height, depth, no_background=True),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
ToySampledTripletTestDataset(width, height, depth),
batch_size=batch_size, shuffle=False, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N, K = 64, 9
mean, means, samples = super(Tomogram, self).generate(N, K)
save_image(mean.data.cpu(), '{}/gen_mean_{:03d}.png'.format(runPath, epoch))
save_image(means.data.cpu(), '{}/gen_means_{:03d}.png'.format(runPath, epoch))
def reconstruct(self, data, runPath, epoch):
recon = super(Tomogram, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon])
save_image(comp.data.cpu(), '{}/recon_{:03d}.png'.format(runPath, epoch))
def getMus(self, data, runPath):
mus = self.enc(data)
return mus[0]
def forward(self, x, positive_child, negative_child, K=1):
qz_x = self.qz_x(*self.enc(x))
zs = qz_x.rsample(torch.Size([K]))
px_z = self.px_z(*self.dec(zs))
if positive_child == None:
return qz_x, px_z, zs
parent_mu = self.enc(x)[0]
positive_child_mu = self.enc(positive_child)[0]
negative_child_mu = self.enc(negative_child)[0]
return qz_x, px_z, zs, parent_mu, positive_child_mu, negative_child_mu
|
from django.db.models import Q, Manager
from locking import LOCK_TIMEOUT
import datetime
"""
LOCKED
if (now() - self.locked_at).seconds < LOCK_TIMEOUT:
self.locked_at < (NOW - TIMEOUT)
"""
def point_of_timeout():
delta = datetime.timedelta(seconds=LOCK_TIMEOUT)
return now() - delta
class LockedManager(Manager):
def get_query_set(self):
timeout = point_of_timeout()
return super(LockedManager, self).get_query_set().filter(_locked_at__gt=timeout, _locked_at__isnull=False)
class UnlockedManager(Manager):
def get_query_set(self):
timeout = point_of_timeout()
return super(UnlockedManager, self).get_query_set().filter(Q(_locked_at__lte=timeout) | Q(_locked_at__isnull=True))
|
from bases import point, vector, EPSILON
from ray import ray
from sphere import sphere
class Intersection:
def __init__(self, time, object):
self.t = time
self.object = object
# def __str__(self):
# return 'time = {} object = {}'.format(self.t, self.object)
def prepare_computations(self, ray):
computations = {}
computations['time'] = self.t
computations['object'] = self.object
computations['point'] = ray.position(computations['time'])
computations['eyev'] = -ray.Direction()
computations['normalv'] = self.object.normal_at(computations['point'])
computations['inside'] = computations['normalv'].dot(computations['eyev']) < 0
if computations['inside']:
computations['normalv'] = -computations['normalv']
computations['over_point'] = computations['point'] + (computations['normalv'] * EPSILON)
return computations
class Intersections:
def __init__(self, *intersections):
self.sorted_intersections = sorted(intersections[::], key=lambda i: i.t)
self.count = len(self)
def __len__(self):
return len(self.sorted_intersections)
def __getitem__(self, key):
return self.sorted_intersections[key]
def hit(self):
xs = list(filter(lambda x: x.t >= 0, self.sorted_intersections))
if len(xs) < 1:
return None
return xs[0]
# if __name__ == '__main__':
# r = ray(point(0, 0, -5), vector(0, 0, 1))
# shape = sphere()
# i = Intersection(4, shape)
# comps = i.prepare_computations(r)
# print(comps['time'])
# print(comps['object'])
# print(comps['point'])
# print(comps['eyev'])
# print(comps['normalv'])
# print(comps['inside'])
|
#!/usr/bin/env python
""" Refine 2D triangulation
"""
import argparse
import pymesh
import numpy as np
from numpy.linalg import norm
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--engine", help="Triangulation engine",
choices=("triangle_refiner", "mmg_delaunay"), default="auto");
parser.add_argument("input_mesh");
parser.add_argument("output_mesh");
return parser.parse_args();
def compute_metric(mesh):
if mesh.has_attribute("cell"):
cell_ids = mesh.get_attribute("cell").astype(int).ravel();
cut_mesh = pymesh.cut_mesh(mesh, cell_ids);
else:
cut_mesh = mesh;
tree = pymesh.AABBTree2();
tree.load_data(cut_mesh.vertices, cut_mesh.boundary_edges);
sq_dist, indices = tree.look_up(mesh.vertices);
dist = np.sqrt(sq_dist);
bbox_min, bbox_max = mesh.bbox;
tol = norm(bbox_max - bbox_min) / 20.0;
metric = np.clip(dist / tol, 0.1, 1.0) * tol;
return metric;
def main():
args = parse_args();
mesh = pymesh.load_mesh(args.input_mesh, drop_zero_dim=True);
metric = compute_metric(mesh);
mesh = pymesh.refine_triangulation(mesh, metric, engine=args.engine);
pymesh.save_mesh(args.output_mesh, mesh);
if __name__ == "__main__":
main();
|
import unittest
import sys
import StringIO
import __builtin__
import os
from distutils import log
# Reload module to run its global section under coverage supervision
import distcovery.coverage_wrapper
reload(distcovery.coverage_wrapper)
from distcovery.coverage_wrapper import _DummyCoverage, Coverage, \
_NO_COVERAGE_PACKAGE_WARNING
class _MockDistribution(object):
def __init__(self, py_modules=None, packages=None):
self.py_modules = py_modules if py_modules else []
self.packages = packages if packages else []
class Test_DummyCoverage(unittest.TestCase):
def test_creation(self):
coverage = _DummyCoverage(source=[])
self.assertTrue(isinstance(coverage, _DummyCoverage))
def test_start(self):
coverage = _DummyCoverage()
self.assertEqual(coverage.start(), None)
def test_stop(self):
coverage = _DummyCoverage()
self.assertEqual(coverage.stop(), None)
def test_report(self):
coverage = _DummyCoverage()
self.assertEqual(coverage.report(), None)
class _MockCoverageModule(object):
def __init__(self, coverage):
self.coverage = coverage
class _MockCoverage(object):
def __init__(self):
self.creations = []
self.starts = 0
self.stops = 0
def __call__(self, *args, **kwargs):
self.creations.append((args, kwargs))
return self
def start(self):
self.starts +=1
def stop(self):
self.stops += 1
def report(self):
print '\tThe report'
class TestCoverage(unittest.TestCase):
def setUp(self):
super(TestCoverage, self).setUp()
self.__threshold = log.set_threshold(log.INFO)
self.__stdout = sys.stdout
self.stdout = StringIO.StringIO()
sys.stdout = self.stdout
self.__stderr = sys.stderr
self.stderr = StringIO.StringIO()
sys.stderr = self.stderr
self.__import = __builtin__.__import__
def tearDown(self):
__builtin__.__import__ = self.__import
sys.stderr = self.__stderr
sys.stdout = self.__stdout
log.set_threshold(self.__threshold)
super(TestCoverage, self).tearDown()
def __no_coverage_import(self, name, *args):
if name == 'coverage':
raise ImportError('test')
return self.__import(name, *args)
def __mock_coverage_import(self, name, *args):
if name == 'coverage':
return _MockCoverageModule(self.__coverage)
return self.__import(name, *args)
def test_creation_disabled(self):
__builtin__.__import__ = self.__no_coverage_import
coverage = Coverage(True, '', _MockDistribution())
self.assertTrue(isinstance(coverage, Coverage))
self.assertEqual(self.stderr.getvalue(), '')
def test_creation_no_coverage(self):
__builtin__.__import__ = self.__no_coverage_import
coverage = Coverage(False, '', _MockDistribution())
self.assertTrue(isinstance(coverage, Coverage))
self.assertEqual(self.stderr.getvalue(),
_NO_COVERAGE_PACKAGE_WARNING % 'test' + '\n')
def test_creation(self):
self.__coverage = _MockCoverage()
__builtin__.__import__ = self.__mock_coverage_import
coverage = Coverage(False, 'test',
_MockDistribution(['xxx', 'yyy', 'zzz'],
['xxx', 'xxx.yyy', 'yyy']))
self.assertTrue(isinstance(coverage, Coverage))
self.assertEqual(self.__coverage.creations,
[((), {'source': [os.path.join('test', 'xxx.py'),
os.path.join('test', 'yyy.py'),
os.path.join('test', 'zzz.py'),
os.path.join('test', 'xxx'),
os.path.join('test', 'yyy')]})])
def test_context(self):
self.__coverage = _MockCoverage()
__builtin__.__import__ = self.__mock_coverage_import
first_path = sys.path[0]
test_path = os.path.join(first_path, 'test')
coverage = Coverage(False, test_path, _MockDistribution(['xxx']))
self.assertEqual(self.__coverage.starts, 0)
self.assertEqual(self.__coverage.stops, 0)
with coverage:
self.assertEqual(sys.path[0], test_path)
self.assertEqual(self.__coverage.starts, 1)
self.assertEqual(self.__coverage.stops, 0)
self.assertEqual(sys.path[0], first_path)
self.assertEqual(self.__coverage.starts, 1)
self.assertEqual(self.__coverage.stops, 1)
def test_report(self):
self.__coverage = _MockCoverage()
__builtin__.__import__ = self.__mock_coverage_import
coverage = Coverage(False, 'test', _MockDistribution(['xxx']))
coverage.report()
self.assertEqual(self.stdout.getvalue(),
'\nCoverage report:\n\tThe report\n')
def test_report_coverage_disabled(self):
coverage = Coverage(True, '', _MockDistribution())
coverage.report()
self.assertEqual(self.stdout.getvalue(), '')
if __name__ == '__main__':
unittest.main()
|
# Code is from: https://github.com/interaction-dataset/interaction-dataset.
DELTA_TIMESTAMP_MS = 100 # similar throughout the whole dataset
class MotionState:
def __init__(self, time_stamp_ms):
assert isinstance(time_stamp_ms, int)
self.time_stamp_ms = time_stamp_ms
self.x = None
self.y = None
self.vx = None
self.vy = None
self.psi_rad = None
def __str__(self):
return "MotionState: " + str(self.__dict__)
class Track:
def __init__(self, id):
# assert isinstance(id, int)
self.track_id = id
self.agent_type = None
self.length = None
self.width = None
self.time_stamp_ms_first = None
self.time_stamp_ms_last = None
self.motion_states = dict()
def __str__(self):
string = "Track: track_id=" + str(self.track_id) + ", agent_type=" + str(self.agent_type) + \
", length=" + str(self.length) + ", width=" + str(self.width) + \
", time_stamp_ms_first=" + str(self.time_stamp_ms_first) + \
", time_stamp_ms_last=" + str(self.time_stamp_ms_last) + \
"\n motion_states:"
for key, value in sorted(self.motion_states.items()):
string += "\n " + str(key) + ": " + str(value)
return string
|
import torch
from torch import nn
import functools
from adaptor.basic_adaptor import NormalizeLayer
from models.test_model import Flatten
import numpy as np
from numba import jit, njit
class FlattenConv2D(nn.Module):
"""
Transforms the 2D convolutional layer to fully-connected layer with fixed weights.
"""
def __init__(self, orgnl_layer, shape, load_weight=True):
super(FlattenConv2D, self).__init__()
self.orgnl_layer = orgnl_layer
assert isinstance(self.orgnl_layer, nn.modules.conv.Conv2d)
# currently only support these parameters
# moreover, suppose there is channel-wise bias
# be future work to support more
assert self.orgnl_layer.dilation == (1, 1)
assert self.orgnl_layer.groups == 1
self.orgnl_layer = orgnl_layer
self.in_shape = shape
self.out_shape = [self.orgnl_layer.out_channels,
(self.in_shape[1] + 2 * self.orgnl_layer.padding[0] - self.orgnl_layer.kernel_size[0]) // self.orgnl_layer.stride[0] + 1,
(self.in_shape[2] + 2 * self.orgnl_layer.padding[1] - self.orgnl_layer.kernel_size[1]) // self.orgnl_layer.stride[1] + 1]
conv_weight = self.orgnl_layer.weight
conv_bias = self.orgnl_layer.bias
padding = self.orgnl_layer.padding
stride = self.orgnl_layer.stride
kernel_size = self.orgnl_layer.kernel_size
weight_shape = list(conv_weight.size())
in_cells = self.in_shape[0] * self.in_shape[1] * self.in_shape[2]
out_cells = self.out_shape[0] * self.out_shape[1] * self.out_shape[2]
w_cells = torch.numel(conv_weight)
b_cells = torch.numel(conv_bias)
self.in_numel = in_cells
self.in_features = in_cells
self.out_numel = out_cells
self.out_features = out_cells
conv_weight = conv_weight.contiguous().view(w_cells)
weight = torch.zeros((out_cells, in_cells), dtype=torch.double).cuda()
bias = torch.zeros((out_cells,), dtype=torch.double).cuda()
if load_weight:
conv_weight = conv_weight.detach().cpu().numpy()
conv_bias = conv_bias.detach().cpu().numpy()
weight = np.zeros((out_cells, in_cells), np.double)
bias = np.zeros((out_cells,), np.double)
FlattenConv2D.load_weight(self.in_shape, self.out_shape, padding, stride, kernel_size, weight_shape, conv_weight, conv_bias, weight, bias)
weight = torch.Tensor(weight).cuda()
bias = torch.Tensor(bias).cuda()
# cnt = 0
# for o in range(self.out_shape[0]):
# for j in range(self.out_shape[1]):
# for k in range(self.out_shape[2]):
# out_idx = o * self.out_shape[1] * self.out_shape[2] + j * self.out_shape[2] + k
# for jj in range(kernel_size[0]):
# for kk in range(kernel_size[1]):
# in_x = - padding[0] + stride[0] * j + jj
# in_y = - padding[1] + stride[1] * k + kk
# if in_x < 0 or in_x >= self.in_shape[1] or in_y < 0 or in_y >= self.in_shape[2]:
# continue
# for i in range(self.in_shape[0]):
# in_idx = i * self.in_shape[1] * self.in_shape[2] + in_x * self.in_shape[2] + in_y
# w_idx = o * weight_shape[1] * weight_shape[2] * weight_shape[3] + \
# i * weight_shape[2] * weight_shape[3] + \
# jj * weight_shape[3] + \
# kk
# weight[out_idx][in_idx] += conv_weight[w_idx]
# cnt += 1
# for o in range(self.out_shape[0]):
# for j in range(self.out_shape[1]):
# for k in range(self.out_shape[2]):
# out_idx = o * self.out_shape[1] * self.out_shape[2] + j * self.out_shape[2] + k
# bias[out_idx] = conv_bias[o]
self.weight = weight
self.bias = bias
@jit(nopython=True)
def load_weight(in_shape,
out_shape,
padding, stride, kernel_size, weight_shape,
conv_weight, conv_bias,
weight, bias):
cnt = 0
for o in range(out_shape[0]):
for j in range(out_shape[1]):
for k in range(out_shape[2]):
out_idx = o * out_shape[1] * out_shape[2] + j * out_shape[2] + k
for jj in range(kernel_size[0]):
for kk in range(kernel_size[1]):
in_x = - padding[0] + stride[0] * j + jj
in_y = - padding[1] + stride[1] * k + kk
if in_x < 0 or in_x >= in_shape[1] or in_y < 0 or in_y >= in_shape[2]:
continue
for i in range(in_shape[0]):
in_idx = i * in_shape[1] * in_shape[2] + in_x * in_shape[2] + in_y
w_idx = o * weight_shape[1] * weight_shape[2] * weight_shape[3] + \
i * weight_shape[2] * weight_shape[3] + \
jj * weight_shape[3] + \
kk
weight[out_idx][in_idx] += conv_weight[w_idx]
cnt += 1
for o in range(out_shape[0]):
for j in range(out_shape[1]):
for k in range(out_shape[2]):
out_idx = o * out_shape[1] * out_shape[2] + j * out_shape[2] + k
bias[out_idx] = conv_bias[o]
return weight, bias
def forward(self, x):
return torch.matmul(x, self.weight.t()) + self.bias
def model_transform(model, in_shape, load_weight=True):
new_layers = [Flatten()]
now_shape = in_shape
for l in model:
if isinstance(l, nn.modules.conv.Conv2d):
new_layer = FlattenConv2D(l, now_shape, load_weight)
now_shape = new_layer.out_shape
new_layers.append(new_layer)
elif isinstance(l, nn.modules.activation.ReLU):
new_layers.append(nn.ReLU())
elif isinstance(l, Flatten):
now_shape = [functools.reduce(lambda x, y: x * y, now_shape, 1)]
elif isinstance(l, nn.modules.linear.Linear):
try:
assert now_shape == [l.in_features]
except:
print('Error: shape size does not match.')
raise Exception()
now_shape = [l.out_features]
new_layers.append(l)
elif isinstance(l, NormalizeLayer):
# ignore normalized layer, but need to guarantee that the std per channel is consistent
assert max(l.orig_sds) == min(l.orig_sds)
elif isinstance(l, nn.Dropout):
# skip dropout layers since it is ignored in eval mode
pass
else:
raise Exception(f"Unsupported layer type: {type(l)}")
new_model = nn.Sequential(*new_layers)
return new_model
|
import collections
import itertools
from typing import Callable, Iterable, Sequence, Union
concat = itertools.chain.from_iterable
def identity(x):
return x
def nth(n):
"""Returns the nth element in a sequence.
>>> nth(1, 'ABC')
['B']
"""
def nth(seq):
if isinstance(seq, (tuple, list, Sequence)):
return seq[n]
return next(itertools.islice(seq, n, None))
return nth
def count(seq: Iterable) -> int:
"""Counts the number of items in `seq`. Similar to `len` but works on lazy sequences."""
if hasattr(seq, "__len__"):
return len(seq) # type: ignore
return sum(1 for _ in seq)
def attrgetter(attr):
"""Access the object attribute by its name `attr`.
>>> attrgetter("lower")("ASD")()
'asd'
"""
def attrgetter(obj):
return getattr(obj, attr)
return attrgetter
def equals(x):
def equals(y):
return x == y
return equals
def not_equals(x):
"""A functional !=.
>>> not_equals(2)(2)
False
>>> not_equals("David")("Michael")
True
"""
def not_equals(y):
return x != y
return not_equals
def contains(x):
"""Contains operator.
>>> contains([1, 2, 3])(2)
True
>>> contains("David")("x")
False
"""
def contains(y):
return y in x
return contains
def add(x):
"""Addition operator.
>>> add(1)(2)
3
>>> add(["c"])(["a", "b"])
['a', 'b', 'c']
"""
def add(y):
return y + x
return add
def greater_than(x):
"""Greater than operator.
>>> greater_than(1)(2)
True
>>> greater_than(1)(0)
False
"""
def greater_than(y):
return y > x
return greater_than
def greater_equals(x):
"""Greater than or equal operator.
>>> greater_equals(1)(1)
True
>>> greater_equals(1)(0)
False
"""
def greater_equals(y):
return y >= x
return greater_equals
def less_than(x):
"""Less than operator.
>>> less_than(1)(1)
False
"""
def less_than(y):
return y < x
return less_than
def less_equals(x):
"""Less than or equal operator.
>>> less_equals(1)(1)
True
>>> less_equals(1)(3)
False
"""
def less_equals(y):
return y <= x
return less_equals
def multiply(x):
"""Multiply operator.
>>> multiply(2)(1)
2
"""
def multiply(y):
return y * x
return multiply
def divide_by(x):
def divide_by(y):
return y / x
return divide_by
def inside(val):
"""A functional `in` operator.
>>> inside(1)([0, 1, 2])
True
>>> inside("a", "word")
False
"""
def inside(container):
return val in container
return inside
def len_equals(length: int):
"""Measures if the length of a sequence equals to a given length.
>>> len_equals(3)([0, 1, 2])
True
"""
def len_equals(x: Iterable) -> bool:
return count(x) == length
return len_equals
def len_greater(length: int):
"""Measures if the length of a sequence is greater than a given length.
>>> len_greater(2)([0, 1, 2])
True
"""
def len_greater(seq):
return count(seq) > length
return len_greater
def len_smaller(length: int) -> Callable:
"""Measures if the length of a sequence is smaller than a given length.
>>> len_smaller(2)([0, 1, 2])
False
"""
def len_smaller(seq):
return count(seq) < length
return len_smaller
def between(low: int, high: int):
def between(number: Union[int, float]):
return low <= number < high
return between
def empty(seq):
try:
next(iter(seq))
except StopIteration:
return True
return False
def nonempty(seq):
return not empty(seq)
def head(seq):
"""Returns the first element in a sequence.
>>> first('ABC')
'A'
"""
return next(iter(seq))
def second(seq):
"""Returns the second element in a sequence.
>>> second('ABC')
'B'
"""
seq = iter(seq)
next(seq)
return next(seq)
def tail(n):
"""Returns the last n elements of a sequence.
>>> tail(2, [10, 20, 30, 40, 50])
[40, 50]
"""
def tail(seq):
try:
return seq[-n:]
except (TypeError, KeyError):
return tuple(collections.deque(seq, n))
return tail
def last(seq):
"""Returns the last element in a sequence
>>> last('ABC')
'C'
"""
return tail(1)(seq)[0]
def pack(*stuff):
"""Returns a list generated from the provided input.
>>> pack(1, 2, 3)
(1, 2, 3)
"""
return stuff
def is_instance(the_type):
"""Returns if `the_value` is an instance of `the_type`.
>>> is_instance(str)("hello")
True
>>> is_instance(int)("a")
False
"""
def is_instance(the_value):
return isinstance(the_value, the_type)
return is_instance
def is_iterable(x):
"""Determines whether the element is iterable.
>>> isiterable([1, 2, 3])
True
>>> isiterable('abc')
True
>>> isiterable(5)
False"""
try:
iter(x)
return True
except TypeError:
return False
|
import torch
from .Criterion import Criterion
class MarginRankingCriterion(Criterion):
def __init__(self, margin=1, sizeAverage=True):
super(MarginRankingCriterion, self).__init__()
self.margin = margin
self.sizeAverage = sizeAverage
self.gradInput = [torch.Tensor(), torch.Tensor()]
self._output = None
self.dist = None
self.mask = None
def updateOutput(self, input, y):
if input[0].size(0) == 1:
self.output = max(0, -y * (input[0][0] - input[1][0]) + self.margin)
else:
if self._output is None:
self._output = input[0].clone()
self._output.resize_as_(input[0])
self._output.copy_(input[0])
self._output.add_(-1, input[1])
self._output.mul_(-1).mul_(y)
self._output.add_(self.margin)
self._output.clamp_(min=0)
self.output = self._output.sum()
if self.sizeAverage:
self.output = self.output / y.size(0)
return self.output
def updateGradInput(self, input, y):
if input[0].size(0) == 1:
dist = -y * (input[0][0] - input[1][0]) + self.margin
if dist < 0:
self.gradInput[0][0] = 0
self.gradInput[1][0] = 0
else:
self.gradInput[0][0] = -y
self.gradInput[1][0] = y
else:
if self.dist is None:
self.dist = input[0].new()
self.dist = self.dist.resize_as_(input[0]).copy_(input[0])
dist = self.dist
dist.add_(-1, input[1])
dist.mul_(-1).mul_(y)
dist.add_(self.margin)
if self.mask is None:
self.mask = input[0].new()
self.mask = self.mask.resize_as_(input[0]).copy_(dist)
mask = self.mask
torch.ge(dist, 0, out=mask)
self.gradInput[0].resize_(dist.size())
self.gradInput[1].resize_(dist.size())
self.gradInput[0].copy_(mask)
self.gradInput[0].mul_(-1).mul_(y)
self.gradInput[1].copy_(mask)
self.gradInput[1].mul_(y)
if self.sizeAverage:
self.gradInput[0].div_(y.size(0))
self.gradInput[1].div_(y.size(0))
return self.gradInput
|
"""
This module contains various contants and utilities for both internal and external use.
"""
import aiohttp
import asyncio
import datetime
import typing
#: This constant is used in the various ``edit()`` methods.
#: It's used to indicate that there should be no change to the value of a field,
#: in the cases where ``None`` is a valid value.
NO_CHANGE = type('no_change', (), {"__repr__": lambda x: "NO_CHANGE"})()
TYPE_USER = "users"
TYPE_FORUM = "forums"
TYPE_TEAM = "workrooms"
TYPE_TOPIC = "posts"
TYPE_TOPIC_REPLY = "postComments"
TYPE_NOTIFICATION = "userNotifications"
TYPE_GROUPCHAT_MEMBER = "workroomMembers"
TYPE_FILE = "files"
TYPE_STORAGE = "storage"
TYPE_TASK_BOARD = "taskBoards"
TYPE_TASK_CATEGORY = "taskCategories"
TYPE_TASK = "tasks"
TYPE_TASK_COMMENT = "taskComments"
ENTITY_TYPES = {
TYPE_USER: "Entity.User",
TYPE_FORUM: "Entity.Forum",
TYPE_TEAM: "Entity.Workroom",
TYPE_TOPIC: "Entity.Post",
TYPE_TOPIC_REPLY: "Entity.Post.Comment",
TYPE_NOTIFICATION: "Entity.UserNotification",
TYPE_GROUPCHAT_MEMBER: "Entity.Workroom.Member",
TYPE_FILE: "Entity.File",
TYPE_STORAGE: "Entity.Storage",
TYPE_TASK_BOARD: "Entity.Tasks.TaskBoard",
TYPE_TASK_CATEGORY: "Entity.Tasks.TaskCategory",
TYPE_TASK: "Entity.Tasks.Task",
TYPE_TASK_COMMENT: "Entity.Tasks.TaskComment",
}
# Field names for get_obj_by_field
FIELD_USERNAME = "username"
FIELD_EMAIL_ADDR = "emailAddress"
FIELD_DISPLAY_NAME = "displayName"
FIELD_NAME = "name"
FIELD_NICKNAME = "nickname"
FIELD_ID = "id"
FIELD_JID = "jid"
FIELD_NAMES = {
"id": FIELD_ID,
"email": FIELD_EMAIL_ADDR,
"username": FIELD_USERNAME,
"display_name": FIELD_DISPLAY_NAME,
"jid": FIELD_JID,
"nickname": FIELD_NICKNAME,
"name": FIELD_NAME,
}
# Notification predicates
# Here only for backwards compatibility, use the ones in the notification class
NOTIF_PREDICATE_MENTION = "chat_mention"
NOTIF_PREDICATE_GROUP_MENTION = "group_mention"
NOTIF_PREDICATE_COMMENT = "commented_on"
NOTIF_PREDICATE_TASK_COMPLETED = "completed"
def get_type_from_entity(entity_type: str) -> typing.Optional[str]:
"""
Gets the object type from the entity type.
Note that it doesn't actually return a class, just the string.
.. warning::
This function is intended for internal use only.
:param entity_type: The entity type of the object.
:return: The regular type of the object, or None if an invalid type.
"""
for t, e in ENTITY_TYPES.items():
if e == entity_type:
return t
return None
_T = typing.TypeVar("T")
async def retry_until_available(action: typing.Callable[..., typing.Awaitable[_T]], *args,
timeout: typing.Optional[float] = None, retry_delay: float = 0.5, **kwargs) -> _T:
"""
Repeatedly tries to do some action (usually getting a resource) until the
resource becomes available or a timeout elapses.
This function will try to run the given coroutine once every ``retry_delay`` seconds.
If it results in a 404, the function tries again. Otherwise, the exception is
raised.
If it times out, an :py:exc:`asyncio.TimeoutError` will be raised.
``args`` and ``kwargs`` are passed to the coroutine.
For example, this snippet will try to get a message from a chat by ID with a timeout
of 5 seconds, retrying after 1 second if a 404 occurs:
.. code-block:: python
message = await pyryver.retry_until_available(chat.get_message, message_id, timeout=5.0, retry_delay=1.0)
.. note::
Do not "call" the coro first and pass a future to this function; instead, pass
a reference to the coro directly, as seen in the example. This is done because
a single future cannot be awaited multiple times, so a new one is created each
time the function retries.
:param action: The coroutine to run.
:param timeout: The timeout in seconds, or None for no timeout (optional).
:param retry_delay: The duration in seconds to wait before trying again (optional).
:return: The return value of the coroutine.
"""
async def _retry_inner():
try:
while True:
try:
return await action(*args, **kwargs)
except aiohttp.ClientResponseError as e:
if e.status == 404:
await asyncio.sleep(retry_delay)
else:
raise e
except asyncio.CancelledError:
pass
return await asyncio.wait_for(_retry_inner(), timeout)
def iso8601_to_datetime(timestamp: str) -> datetime.datetime:
"""
Convert an ISO 8601 timestamp as returned by the Ryver API into a datetime.
.. warning::
This function does not handle *all* valid ISO 8601 timestamps; it only tries to
handle the ones returned by the Ryver API. It uses the simple format string
``"%Y-%m-%dT%H:%M:%S%z"`` to parse the timestamp.
Therefore, this function should **not** be used for parsing any ISO timestamp;
to do that, consider using ``dateutil.parser``, or some alternative method.
:param timestamp: The ISO 8601 timestamp.
"""
if timestamp.endswith("Z"):
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S").replace(tzinfo=datetime.timezone.utc)
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S%z")
def datetime_to_iso8601(timestamp: datetime.datetime) -> str:
"""
Convert a datetime into an ISO 8601 timestamp as used by the Ryver API.
:param timestamp: The datetime to convert.
"""
return timestamp.strftime("%Y-%m-%dT%H:%M:%S%z")
|
from __future__ import print_function
from flask import Flask, jsonify, request
from flask.ext.pymongo import PyMongo
from sh import git, sh, sudo, ErrorReturnCode
import config, os, sys
app = Flask(__name__)
app.config.from_object(config)
mongo = PyMongo(app)
if app.config.get('ALLOWED_RANGES', None):
ALLOWED_RANGES = app.config['ALLOWED_RANGES'] or []
"""
Mongo collections:
- sites: {_id: 'the site name', path: 'the absolute path on this server'}
- deploys: {site: site._id, git_revs: [start, end], 'git_output': git stdout/err, 'deploy': deploy script strerr/out,
'service': service strerr/out}
"""
@app.route('/push/<name>', methods=['POST'])
def push(name):
# a terrible CIDR check
if not any([request.remote_addr.startswith(x) for x in ALLOWED_RANGES]):
return jsonify({'status': 'access denied'}), 401
site = mongo.db.sites.find_one({'_id': name})
if site == None:
return jsonify({'status': 'invalid repo spec'}), 404
rev = str(git('rev-parse', 'origin/master', _cwd=site['path']))
sudo.git.fetch(_cwd=site['path'])
after_rev = str(git('rev-parse', 'origin/master', _cwd=site['path']))
if rev != after_rev:
git_output, deploy, restart, err = [''] * 4
try:
git_output = str(sudo.git.pull(_cwd=site['path'])).strip()
deploy = str(sh('./deploy.sh', _cwd=site['path'])).strip()
restart = str(sudo('service', name, 'restart'))
except ErrorReturnCode as e:
err = str(e)
print('weird error', err)
output = {
'site': name,
'git_revs': [rev, after_rev],
'git_output': git_output,
'deploy': deploy,
'restart': restart,
'err': err,
}
mongo.db.deploys.insert(output)
output['_id'] = str(output['_id'])
return jsonify(output), 201
return jsonify({
'status': 'same',
'rev': rev or '...',
}), 200
if __name__ == '__main__':
app.run(host=app.config.get('HOSTNAME', '0.0.0.0'), port=app.config.get('PORT', 5010))
|
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import six
import six.moves.urllib.parse as urlparse
from stevedore import enabled
from sahara import conductor as cond
from sahara import exceptions as ex
from sahara.i18n import _
conductor = cond.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class JobBinaryManager(object):
def __init__(self):
self.job_binaries = {}
self._load_job_binaries()
def _load_job_binaries(self):
config_jb = CONF.job_binary_types
extension_manager = enabled.EnabledExtensionManager(
check_func=lambda ext: ext.name in config_jb,
namespace='sahara.job_binary.types',
invoke_on_load=True
)
for ext in extension_manager.extensions:
if ext.name in self.job_binaries:
raise ex.ConfigurationError(
_("Job binary with name '%s' already exists.") %
ext.name)
ext.obj.name = ext.name
self.job_binaries[ext.name] = ext.obj
LOG.info("Job binary name {jb_name} loaded {entry_point}".format(
jb_name=ext.name, entry_point=ext.entry_point_target))
if len(self.job_binaries) < len(config_jb):
loaded_jb = set(six.iterkeys(self.job_binaries))
requested_jb = set(config_jb)
raise ex.ConfigurationError(
_("Job binaries couldn't be loaded: %s") %
", ".join(requested_jb - loaded_jb))
def get_job_binaries(self):
config_jb = CONF.job_binary_types
return [self.get_job_binary(name).name for name in config_jb]
def get_job_binary(self, name):
res = self.job_binaries.get(name)
if res is None:
raise ex.InvalidDataException(_("Invalid job binary"))
return res
def get_job_binary_by_url(self, url):
url = urlparse.urlparse(url)
if not url.scheme:
raise ex.InvalidDataException(
_("Job binary url must have a scheme"))
return self.get_job_binary(url.scheme)
JOB_BINARIES = None
def setup_job_binaries():
global JOB_BINARIES
JOB_BINARIES = JobBinaryManager()
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ncclient
import ncclient.manager
import ncclient.xml_
from os_ken import exception as os_ken_exc
from os_ken.lib import of_config
from os_ken.lib.of_config import constants as ofc_consts
from os_ken.lib.of_config import classes as ofc
# TODO: When we re-organize ncclient, its NCClientError will be
# subclass of OSKenException.
class OFConfigCapableSwitchNotFound(os_ken_exc.OSKenException,
ncclient.NCClientError):
message = 'OpenFlow Capable Switch not found'
def get_ns_tag(tag):
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
return (None, tag)
class OFCapableSwitch(object):
def __init__(self, connect_method='connect_ssh', *args, **kwargs):
super(OFCapableSwitch, self).__init__()
self._connect_method = connect_method
self._connect_args = args
self._connect_kwargs = kwargs
self.version = None
self.namespace = None
connect = getattr(ncclient.manager, self._connect_method)
self.netconf = connect(*self._connect_args, **self._connect_kwargs)
def close_session(self):
if self.netconf:
self.netconf.close_session()
self.netconf = None
def __enter__(self):
return self
def __exit__(self):
self.close_session()
def client_capabilities(self):
return self.netconf.client_capabilities
def server_capabilities(self):
return self.netconf.server_capabilities
def _find_capable_switch(self, tree):
capable_switch = None
for element in tree:
ns, tag = get_ns_tag(element.tag)
if tag != ofc_consts.CAPABLE_SWITCH:
continue
# assumes that <get> returns only single capable switch
assert capable_switch is None
capable_switch = element
if not self.version:
versions = [(version, ns_) for version, ns_ in
of_config.OFCONFIG_YANG_NAMESPACES.items()
if ns == ns_]
if versions:
assert len(versions) == 1
version = versions[0]
self.version, self.namespace = version
if not capable_switch:
raise OFConfigCapableSwitchNotFound()
return capable_switch
def _find_capable_switch_xml(self, tree):
return ncclient.xml_.to_xml(self._find_capable_switch(tree))
def raw_get(self, filter=None):
reply = self.netconf.get(filter)
return self._find_capable_switch_xml(reply.data_ele)
def raw_get_config(self, source, filter=None):
reply = self.netconf.get_config(source, filter)
return self._find_capable_switch_xml(reply.data_ele)
def raw_edit_config(self, target, config, default_operation=None,
test_option=None, error_option=None):
self.netconf.edit_config(target, config,
default_operation, test_option, error_option)
def get(self):
return ofc.OFCapableSwitchType.from_xml(self.raw_get())
def get_config(self, source):
return ofc.OFCapableSwitchType.from_xml(self.raw_get_config(source))
def edit_config(self, target, capable_switch, default_operation=None):
xml = ofc.NETCONF_Config(capable_switch=capable_switch).to_xml()
self.raw_edit_config(target, xml, default_operation)
def delete_config(self, source):
self.netconf.delete_config(source)
def copy_config(self, source, target):
self.netconf.copy_config(source, target)
def commit(self):
self.netconf.commit()
def discard_changes(self):
self.netconf.discard_changes()
# TODO: more netconf operations
# TODO: convinience(higher level) methods
|
import discord
from discord.ext import commands
import urllib
import urllib.parse
import json
import asyncio
import random
import asyncio
import psutil
import pybithumb
from discord.ext import commands
from datetime import datetime
class _help(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(name="명령어", aliases=['help', '도움말', 'commands'])
async def _help(self, ctx):
embedhelp=(
discord.Embed(
title="Wonder-Music-Studico-Proj 도움말",
description="Wonder-Music-Studico-Proj help",
color=0xabcee9
)
.add_field(
name=";명령어 기타",
value="기타 추가된 명령어를 확인 할 수 있습니다."
)
)
if ctx.invoked_subcommand is None:
await ctx.send(
embed=embedhelp
)
@_help.command(name="기타", aliases=["etc"])
async def _helpetc(self, ctx):
embedetc=(
discord.Embed(
title="기타 명령어",
description="etc. Command",
color=0xabcee9
)
.add_field(
name=";서버",
value="현재 서버컴퓨터 자원 사용량을 확인합니다"
)
.add_field(
name=";ping",
value="봇 레이턴시를 확인합니다."
)
)
await ctx.send(
embed=embedetc
)
@_help.command(name="학교", aliases=["school"])
async def _helpschool(self, ctx):
embedschool=(
discord.Embed(
title="학교관련 명령어",
description="school about Command",
color=0xabcee9
)
.add_field(
name=";급식",
value="전국 학교 급식을 조회합니다"
)
.add_field(
name=";시간표 < 학교 >",
value="해당 학교를 시간표를 조회 합니다."
)
)
await ctx.send(
embed=embedschool
)
|
# AUTOGENERATED FROM "edb/api/types.txt" WITH
# $ edb gen-types
from __future__ import annotations
from typing import *
import uuid
from edb.common import uuidgen
UUID: Type[uuid.UUID] = uuidgen.UUID
TYPE_IDS = {
'anytype': UUID('00000000-0000-0000-0000-000000000001'),
'anytuple': UUID('00000000-0000-0000-0000-000000000002'),
'std': UUID('00000000-0000-0000-0000-0000000000f0'),
'empty-tuple': UUID('00000000-0000-0000-0000-0000000000ff'),
'std::uuid': UUID('00000000-0000-0000-0000-000000000100'),
'std::str': UUID('00000000-0000-0000-0000-000000000101'),
'std::bytes': UUID('00000000-0000-0000-0000-000000000102'),
'std::int16': UUID('00000000-0000-0000-0000-000000000103'),
'std::int32': UUID('00000000-0000-0000-0000-000000000104'),
'std::int64': UUID('00000000-0000-0000-0000-000000000105'),
'std::float32': UUID('00000000-0000-0000-0000-000000000106'),
'std::float64': UUID('00000000-0000-0000-0000-000000000107'),
'std::decimal': UUID('00000000-0000-0000-0000-000000000108'),
'std::bool': UUID('00000000-0000-0000-0000-000000000109'),
'std::datetime': UUID('00000000-0000-0000-0000-00000000010a'),
'std::duration': UUID('00000000-0000-0000-0000-00000000010e'),
'std::json': UUID('00000000-0000-0000-0000-00000000010f'),
'std::bigint': UUID('00000000-0000-0000-0000-000000000110'),
'cal::local_datetime': UUID('00000000-0000-0000-0000-00000000010b'),
'cal::local_date': UUID('00000000-0000-0000-0000-00000000010c'),
'cal::local_time': UUID('00000000-0000-0000-0000-00000000010d'),
}
|
from django.conf.urls import url
from . import api_views
urlpatterns = [
url(r'login/$', api_views.LoginAPIView.as_view(), name='login'),
url(r'social-auth/$', api_views.SocialAuthAPIView.as_view(), name='social-auth'),
url(r'sign-up/$', api_views.SignupAPIView.as_view(), name='sign-up'),
url(r'change-password/$', api_views.ChangePasswordAPIView.as_view(), name='change-password'),
url(r'forget-password/$', api_views.ForgotPasswordAPIView.as_view(), name='forget-password'),
url(r'reset-password/$', api_views.ResetPasswordAPIView.as_view(), name='reset-password'),
]
|
import numpy as np
random = np.random.RandomState(2016)
std_list = np.array(range(2,10))/100
print(std_list)
def gaussian_stds_generater(random,num=4):
candid_list = []
candid_list.append(random.uniform(0.01, 0.1))
while len(candid_list)<num:
std_add = np.random.choice(std_list)
candid_list.append(candid_list[-1]+std_add)
return candid_list
def gaussian_weight_generater(random,num=4):
candid_list = []
while len(candid_list)<num:
candid_list.append(random.uniform(0.1, 1))
candid_list= np.array(candid_list)
candid_list = candid_list/candid_list.sum()
return list(candid_list)
num=4
num_std=15
num_weight=10
std_matrix = np.zeros((num_std,num))
weight_matrix = np.zeros((num_std*num_weight,num))
for i in range(num_std):
std_res = gaussian_stds_generater(random,num)
std_matrix[i,:]= std_res
for j in range(num_weight):
idx= i* num_weight + j
weight_res = gaussian_weight_generater(random, num)
weight_matrix[idx,:] = weight_res
np.save('stds.npy',std_matrix)
np.save('weights.npy',weight_matrix)
|
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
class MeanModel(models.BaseModel):
"""Mean model."""
def create_model(self, model_input, **unused_params):
"""Creates a logistic model.
model_input: 'batch' x 'num_features' x 'num_methods' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
output = tf.reduce_mean(model_input, axis=2)
return {"predictions": output}
|
import hashlib
import os
import re
def find_files(directory, regex):
for root, dirs, files in os.walk(directory):
for basename in files:
if re.search(regex, basename):
fq_path = os.path.join(root, basename)
yield fq_path, root, basename
def _get_md5_hash(filename):
BLOCKSIZE = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
def calculate_hash(filename):
return _get_md5_hash(filename)
|
# The MIT License (MIT)
#
# Copyright (c) 2015-2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import six
MAX_VALUE = (16 ** 12 - 1)
def encode(value):
"""
Encodes the number *value* to a MAC address ASCII string in binary form.
Raises a #ValueError if *value* is a negative number or exceeds the MAC
address range.
"""
if value > MAX_VALUE:
raise ValueError('value {!r} exceeds MAC address range'.format(value))
if value < 0:
raise ValueError('value must not be negative')
# todo: convert to the right byte order. the resulting
# mac address is reversed on my machine compared to the
# mac address displayed by the hello-myo SDK sample.
# See issue #7
string = ('%x' % value).rjust(12, '0')
assert len(string) == 12
result = ':'.join(''.join(pair) for pair in zip(*[iter(string)]*2))
return result.upper()
def decode(bstr):
"""
Decodes an ASCII encoded binary MAC address tring into a number.
"""
bstr = bstr.replace(b':', b'')
if len(bstr) != 12:
raise ValueError('not a valid MAC address: {!r}'.format(bstr))
try:
return int(bstr, 16)
except ValueError:
raise ValueError('not a valid MAC address: {!r}'.format(bstr))
class MacAddress(object):
"""
Represents a MAC address. Instances of this class are immutable.
"""
def __init__(self, value):
if isinstance(value, six.integer_types):
if value < 0 or value > MAX_VALUE:
raise ValueError('value {!r} out of MAC address range'.format(value))
elif isinstance(value, six.string_to_int):
if isinstance(value, six.text_type):
value = value.encode('ascii')
value = decode(value)
else:
msg = 'expected string, bytes or int for MacAddress, got {}'
return TypeError(msg.format(type(value).__name__))
self._value = value
self._string = None
def __str__(self):
if self._string is None:
self._string = encode(self._value)
return self._string
def __repr__(self):
return '<MAC {}>'.format(self)
@property
def value(self):
return self._value
|
import base64
import json
import unittest
from wk_client import create_app, db
from wk_client.config import TestConfig
class AppTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def open_with_auth(client, url, method, username, password):
return client.open(
url,
method=method,
headers={
'Authorization': encode_username_password(username, password)
},
)
def post_json(client, url, data, username=None, password=None, timestamp=None):
params = {'method': 'post', 'content_type': 'application/json', 'data':json.dumps(data)}
headers = params.get('headers', {})
if username and password:
headers['Authorization'] = encode_username_password(username, password)
if timestamp:
headers['Timestamp'] = timestamp.isoformat()
params['headers'] = headers
return client.open(url, **params)
def encode_username_password(username, password):
return b'Basic ' + base64.b64encode(username + b":" + password)
|
import enum
from typing import Union
class InvalidCallableValue(Exception):
pass
class Result:
def __init__(self, pre_subst, post_subst, variables):
self.pre_subst = pre_subst
self.post_subst = post_subst
self.variables = variables
def __repr__(self):
return "Result('{}', '{}')".format(self.pre_subst, self.post_subst)
def __str__(self):
return str(self.post_subst)
def needs_rebuild(self, variables):
return not self.variables == variables
def vars(self):
return self.variables.keys()
class Token(enum.Enum):
variable = 0
literal = 1
class Lexer:
def __init__(self, s):
self.length = len(s)
self.data = s
self.pos = 0
self.read_pos = 0
self.char = ""
self._read_char()
def __iter__(self):
"""Return self, for use with for loops."""
return self
def __next__(self):
self._skip_whitespace()
if self.char == "":
raise StopIteration
if self.char == "$":
self._read_char()
if self.char == "$":
return (Token.literal, self._read_word())
elif self.char == "{":
self._read_char()
varname = self._read(lambda x: x == "}")
self._read_char()
return (Token.variable, varname)
else:
return (Token.variable, self._read_word())
return (Token.literal, self._read_word())
def _skip_whitespace(self):
while self.char.isspace():
self._read_char()
def _read_word(self):
return self._read(lambda x: not x.isspace())
def _read(self, valid):
"""
Read characters in the lexer until valid returns False.
Returns the full string which matched the valid function.
"""
start = self.pos
while valid(self.char) and self.pos < self.length:
self._read_char()
return self.data[start : self.pos]
def _read_char(self):
"""Read a character from input advancing the cursor."""
if self.read_pos >= len(self.data):
self.char = ""
else:
self.char = self.data[self.read_pos]
self.pos = self.read_pos
self.read_pos += 1
def _peek_char(self):
"""Return the next character."""
if self.read_pos > self.length:
return ""
return self.data[self.read_pos]
def needs_further_subst(value):
if isinstance(value, str) and "$" in value:
return True
if isinstance(value, Result):
return True
return False
def to_expansion_list(bs, expanded, variables, targets, sources):
expansion_list = []
for item in expanded:
if needs_further_subst(item):
sub_results, sub_variables = subst(bs, item, recursive=True)
variables.update(sub_variables)
expansion_list.extend(reversed(sub_results))
else:
expansion_list.append(str(item))
return expansion_list
def subst(bs, s, for_command=False, recursive=False, targets=None, sources=None):
if not recursive and bs.is_mutable():
bs["targets"] = targets
if targets:
bs["target"] = targets[0]
bs["sources"] = sources
if isinstance(s, Result):
variables = {key: bs.get(key) for key in s.vars()}
if not s.needs_rebuild(variables):
print("CACHED!")
return s.post_subst
s = s.pre_subst
variables = {}
lexer = Lexer(s)
result = []
for token, value in lexer:
if token == Token.literal:
print("LITERAL", token, value)
result.append(value)
continue
expanded = bs.get(value, None)
if expanded is None:
continue
if needs_further_subst(expanded):
sub_results, sub_variables = subst(bs, expanded, recursive=True)
variables.update(sub_variables)
print("EXPANDED", expanded, "TO", sub_results)
result.extend(sub_results)
elif isinstance(expanded, list):
expansion_list = to_expansion_list(
bs, expanded, variables, targets, sources
)
variables[value] = expansion_list
result.extend(expansion_list)
elif callable(expanded):
called_value = expanded(bs.immutable())
if not called_value:
continue
if needs_further_subst(called_value):
sub_results, sub_variables = subst(bs, called_value, recursive=True)
variables.update(sub_variables)
result.extend(sub_results)
elif isinstance(called_value, str):
result.append(str(called_value))
elif isinstance(called_value, list):
expansion_list = to_expansion_list(
bs, called_value, variables, targets, sources
)
variables[value] = expansion_list
result.extend(expansion_list)
else:
raise InvalidCallableValue(
"Expansion function {} returned an invalid value: {}".format(
str(expanded), str(called_value)
)
)
else:
variables[value] = expanded
result.append(str(expanded))
if s not in ["targets", "target", "sources"] and bs.is_mutable():
bs[s] = Result(pre_subst=s, post_subst=result, variables=variables)
if recursive:
return result, variables
if bs.is_mutable():
bs.delete("targets")
bs.delete("target")
bs.delete("sources")
if not for_command:
return " ".join(result)
return result
|
# This is a sample program that shows downloading and dynamically importing a module.
__author__ = 'David Manouchehri (david@davidmanouchehri.com)'
try: # Python 3
import urllib.request as urllib2
from urllib.parse import urlparse
except ImportError: # Python 2
import urlparse
import urllib2
def grab_module(url, name=''):
print('Downloading ' + url + '...')
content = urllib2.urlopen(url).read().decode('utf-8') # Decode is not needed in Python 2.
if not name:
import os
# Split the URL, get the name of the file and slice the extension off.
name = os.path.splitext(os.path.basename(urlparse.urlsplit(url).path))[0]
print('Name not given, importing as ' + name)
try:
import sys
import imp # imp is not as reliable, and may fail.
module = imp.new_module(name)
exec(content, module.__dict__)
sys.modules[name] = module
globals()[name] = __import__(name)
except ImportError:
c = {}
exec(content, c)
class Holder(object):
pass
tester = Holder()
for key, value in c.iteritems():
setattr(tester, key, value)
globals()[name] = tester
print('Successfully imported ' + name)
if __name__ == '__main__':
# Simply give the full URL and the name you want to give to the module.
grab_module('https://gist.githubusercontent.com/Manouchehri/dc93dc6f5d1ce7a16862/'
'raw/331d708a272b13576fc6bd526b906043c54c2feb/test.py', 'tester')
tester.printhello()
grab_module('http://svn.python.org/projects/python/trunk/Lib/subprocess.py', 'suby')
suby.call(["df", "-h"])
# Alternatively, you can leave out the name and the name will be automatically set to the filename.
grab_module('http://svn.python.org/projects/python/trunk/Lib/subprocess.py')
subprocess.call(["df", "-h"])
|
#
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json, urllib2
def call_rest(url, payload=None, with_payload_method='PUT'):
"""
REST call with JSON decoding of the response and JSON payloads.
"""
if payload:
if not isinstance(payload, basestring):
payload = json.dumps(payload)
# PUT or POST
response = urllib2.urlopen(MethodRequest(url, payload, {'Content-Type': 'application/json'}, method=with_payload_method))
else:
# GET
response = urllib2.urlopen(url)
response = response.read().decode()
return json.loads(response)
#
# Utils
#
class MethodRequest(urllib2.Request):
"""
Workaround to support all HTTP methods.
From `here <https://gist.github.com/logic/2715756>`__.
"""
def __init__(self, *args, **kwargs):
if 'method' in kwargs:
self._method = kwargs['method']
del kwargs['method']
else:
self._method = None
return urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self, *args, **kwargs):
return self._method if self._method is not None else urllib2.Request.get_method(self, *args, **kwargs)
|
### como leer archivos en python ###
file_open = open("lorem_ipsum.txt", "r")
file_read = file_open.read()
file_open.close()
print(file_read)
### como escribir archivos en python ###
file_write = open("demo.txt", "w")
file_write.write(file_read)
file_write.close()
### como utilizar el with en archivos usando python ###
with open("demo.txt", 'a', encoding='utf-8') as f:
f.write("\nlinea 1\n")
f.write("linea 2\n")
f.write("linea 3\n")
|
import sqlite3
def add_variableName(variable):
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute("""
INSERT INTO tbl_opcua_data (variableName) VALUES (?)
""", (variable, ))
banco.commit()
banco.close()
def get_variableName():
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute('SELECT variableName FROM tbl_opcua_data')
retornar = cursor.fetchall()
return retornar[0][0]
def add_ns(ns):
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute("""
INSERT INTO tbl_opcua_data(ns) VALUES(?)
""", (ns, ))
banco.commit()
banco.close()
def get_ns():
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute('SELECT ns FROM tbl_opcua_data')
retornar = cursor.fetchall()
banco.commit()
banco.close()
return retornar[0][0]
def add_type(type):
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute("""
INSERT INTO tbl_opcua_data(type) VALUES(?)
""", (type, ))
banco.commit()
banco.close()
def get_type():
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute('SELECT type FROM tbl_opcua_data')
retornar = cursor.fetchall()
banco.commit()
banco.close()
return retornar[0][0]
def mostrar():
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute('SELECT * FROM tbl_opcua_data')
print(cursor.fetchall())
def add_(variableName, ns, type):
add_variableName(variableName)
add_ns(ns)
add_type(type)
# cursor.execute("INSERT INTO pessoas VALUES('anderson', 18, 'anderson@outlook.com')")
if __name__ == '__main__':
mostrar()
print()
add_variableName('algo')
print(get_variableName())
|
import pytest
def test_claim(user, lp_3crv, vault, accounts):
three_gauge = accounts.at("0xbFcF63294aD7105dEa65aA58F8AE5BE2D9d0952A", force=True)
lp_3crv.transfer(vault, "1000 ether", {"from": three_gauge})
before = lp_3crv.balanceOf(user)
vault.claim({"from": user})
assert lp_3crv.balanceOf(user) > before
def test_backzapper(backzapper, user, crv, vesting, vault, minter, gauges):
before = vault.balanceOf(user)
assert vesting.balanceOf(user) > 0
minter.toggle_approve_mint(backzapper, {"from": user})
crv.approve(backzapper, 2 ** 256 - 1, {"from": user})
backzapper.zap(gauges, {"from": user})
assert vault.balanceOf(user) > before
assert crv.balanceOf(user) == 0
assert vesting.balanceOf(user) == 0
assert crv.balanceOf(backzapper) == 0
def test_3crv_zapper(zap_3crv, lp_3crv, y3crv, vault, accounts, user):
gauge_3crv = accounts.at("0xbFcF63294aD7105dEa65aA58F8AE5BE2D9d0952A", force=True)
lp_3crv.transfer(vault, "1000 ether", {"from": gauge_3crv})
before = y3crv.balanceOf(user)
lp_3crv.approve(zap_3crv, 2 ** 256 - 1, {"from": user})
zap_3crv.zap({"from": user})
assert y3crv.balanceOf(user) > before
assert lp_3crv.balanceOf(user) == 0
assert lp_3crv.balanceOf(zap_3crv) == 0
assert y3crv.balanceOf(zap_3crv) == 0
|
import csv
import datetime
import shutil
import os
from tempfile import NamedTemporaryFile
#file_item_path = os.path.join(os.getcwd(), "data.csv")
file_item_path = os.path.join(os.path.dirname(__file__), "data.csv")
def read_data(user_id=None, email=None):
filename = file_item_path
with open(filename, "r") as csvfile:
reader = csv.DictReader(csvfile)
items = []
unknown_user_id = None
unknown_email = None
for row in reader:
if user_id is not None:
if int(user_id) == int(row.get("id")):
return row
else:
unknown_user_id = user_id
if email is not None:
if email == row.get("email"):
return row
else:
unknown_email = email
if unknown_user_id is not None:
return "User id {user_id} not found".format(user_id=user_id)
if unknown_email is not None:
return "Email {email} not found".format(email=email)
return None
|
#!/usr/bin/env python3
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import init
from tqdm import tqdm
import random
import math
import operator
import matplotlib.pyplot as plt
import numpy as np
use_cuda = torch.cuda.is_available()
class Net(nn.Module):
def __init__(self, num_input_channels, num_output_channels, out2, pool_size, stride, linear_nodes):
super(Net, self).__init__()
self.num_input_channels = num_input_channels
self.num_output_channels = num_output_channels
self.out2 = out2
self.pool_size = pool_size
self.stride = stride
self.linear_nodes = linear_nodes
#layers
self.conv1 = nn.Conv2d(num_input_channels, num_output_channels, stride)
self.pool = nn.MaxPool2d(pool_size, pool_size)
self.conv2 = nn.Conv2d(num_output_channels, out2, stride)
self.fc1 = nn.Linear(out2*(stride+2)**2*(stride+num_output_channels), linear_nodes[0])
self.fc2 = nn.Linear(linear_nodes[0], linear_nodes[1])
self.fc3 = nn.Linear(linear_nodes[1], linear_nodes[2])
def forward(self, input):
res = self.pool(F.relu(self.conv1(input)))
res = self.pool(F.relu(self.conv2(res)))
res = res.view(-1, self.out2*(self.stride+2)**2*(self.stride+self.num_output_channels))
res = F.relu(self.fc1(res))
res = F.relu(self.fc2(res))
res = self.fc3(res)
return res
def getParams(self):
return self.num_input_channels, self.num_output_channels, self.out2, self.pool_size, self.stride, self.linear_nodes
#makes a bunch of different nets
def makeNets(numNets):
nets = []
print("MAKING NETS")
for i in tqdm(range(numNets)):
num_input_channels = 3
num_output_channels = random.randrange(1, 100)
out2 = random.randrange(1, 100)
pool_size = random.randrange(1, 5)
stride = random.randrange(1,5)
linear_nodes = [random.randrange(100,300), random.randrange(30, 100), 2]
net = Net(num_input_channels, num_output_channels, out2, pool_size, stride, linear_nodes)
nn.init.xavier_uniform(net.conv1.weight)
nn.init.xavier_uniform(net.conv2.weight)
nn.init.xavier_uniform(net.fc1.weight)
nn.init.xavier_uniform(net.fc2.weight)
nn.init.xavier_uniform(net.fc3.weight)
nn.init.uniform(net.conv1.bias)
nn.init.uniform(net.conv2.bias)
nn.init.uniform(net.fc1.bias)
nn.init.uniform(net.conv1.bias)
nn.init.uniform(net.fc2.bias)
nn.init.uniform(net.fc3.bias)
net = net.cuda() if use_cuda else net
nets.append(net)
return nets
#running nets in environment
def inference(nets):
env = gym.make("Pong-v0")
num_episodes = 5
num_iters = 10000
rewards = np.array([])
for net in nets:
total_reward = 0
observation = env.reset()
for i_episode in tqdm(range(num_episodes)):
for _ in range(num_iters):
res = net(Variable(observation.view(1, 3, 210, 160)))
action = 2 if res.data[0][0] > res.data[0][1] else 3
observation, reward, done, info = env.step(action)
total_reward += reward
if done:
print("Finished after %d timesteps", _)
break
if i_episode == num_iters - 1:
print("gone through %d iters", num_iters)
np.append(rewards, total_reward)
return rewards
#Editing Nets based off of results
def evolution(rewards, nets, survival_rate, exploration_rate, combine_rate):
evolved_nets = []
numNets = len(nets)
numSurvivors = math.floor(numNets*survival_rate)
numRescued = math.floor(numNets*exploration_rate)
numCombined = math.floor(numNets*combine_rate)
numMutated = numNets - numSurvivors - numRescued - numCombined
def naturalSelection():
index, value = max(enumerate(rewards), key=operator.itemgetter(1))
evolved_nets.append(nets[index])
rewards.pop(index)
nets.pop(index)
def combine(tensor1, tensor2): #cross products
size1 = tensor1.size()
size2 = tensor2.size()
tensor1 = tensor1.view(1, -1)
tensor2 = tensor2.view(1, -1)
tensor1Len = tensor1.size()[1]
tensor2Len = tensor2.size()[1]
if tensor1Len > tensor2Len:
res = torch.cat(torch.cross(tensor1[:,:tensor2Len + 1], tensor2), tensor1[:,tensor2Len + 1:]).view(size1)
elif tensor1Len < tensor2Len:
res = torch.cat(torch.cross(tensor2[:,:tensor1Len + 1], tensor1), tensor2[:,tensor1Len + 1:]).view(size2)
else:
res = torch.cross(tensor1, tensor2).view(size1)
return res
def mutate(tensor):
size = tensor.size()
tensor = tensor.view(1, -1)
for element in tensor:
element = random.random()
return tensor.view(size)
#pick survivors
for i in range(numSurvivors):
naturalSelection()
#Combine some
for i in range(numCombined):
net1 = random.choice(evolved_nets)
net2 = random.choice(evolved_nets)
net1Params = net1.getParams()
net2Params = net2.getParams()
newNet = Net(3, max(net1Params[1], net2Params[1]), max(net1Params[2], net2Params[2]), max(net1Params[3], net2Params[3]), max(net1Params[4], net2Params[4]), max(net1Params[5], net2Params[5]))
newNet.conv1.weight = combine(net1.conv1.weight, net2.conv1.weight)
newNet.conv2.weight = combine(net1.conv2.weight, net2.conv2.weight)
newNet.fc1.weight = combine(net1.fc1.weight, net2.fc1.weight)
newNet.fc2.weight = combine(net1.fc2.weight, net2.fc2.weight)
newNet.fc3.weight = combine(net1.fc3.weight, net2.fc3.combine)
newNet.conv1.bias = combine(net1.conv1.bias, net2.conv1.bias)
newNet.conv2.bias = combine(net1.conv2.bias, net2.conv2.bias)
newNet.fc1.bias = combine(net1.fc1.bias, net2.fc1.bias)
newNet.fc2.bias = combine(net1.fc2.bias, net2.fc2.bias)
newNet.fc3.bias = combine(net1.fc3.bias, net2.fc3.bias)
newNet = newNet.cuda() if use_cuda else newNet
evolved_nets.append(newNet)
#pick Rescued
for i in range(numRescued):
rescuee = random.choice(nets)
idx = nets.index(rescuee)
evolved_nets.append(rescuee)
nets.pop(rescuee)
#mutate Some
for i in range(numMutated):
chosenNet = random.choice(nets)
idx = nets.index(chosenNet)
chosenNet.conv2.weight = mutate(chosenNet.conv2.weight)
chosenNet.fc1.weight = mutate(chosenNet.fc1.weight)
chosenNet.fc2.weight = mutate(chosenNet.fc2.weight)
chosenNet.fc3.weight = mutate(chosenNet.fc3.weight)
chosenNet.conv1.bias = mutate(chosenNet.conv1.bias)
chosenNet.conv2.bias = mutate(chosenNet.conv2.bias)
chosenNet.fc1.bias = mutate(chosenNet.fc1.bias)
chosenNet.fc2.bias = mutate(chosenNet.fc2.bias)
chosenNet.fc3.bias = mutate(chosenNet.fc3.bias)
evolved_nets.append(chosenNet)
nets.pop(idx)
return evolved_nets
#TRAINING
survival_rate = 0.4
exploration_rate = 0.3
combine_rate = 0.2
numEvolutions = 10000
numNets = 10000
def train(survival_rate, exploration_rate, combine_rate, numEvolutions=10000, numNets=10000):
avgRewards = np.array([])
nets = makeNets(numNets)
print("TRAINING")
#analyzation
def stats(rewards, iteration, print_every=500):
index, value = max(enumerate(rewards), key=operator.itemgetter(1))
avg_reward = sum(rewards)/float(len(rewards))
np.append(avgRewards, avg_reward)
if iteration % print_every == 0:
print("Average Reward: %f" % avg_reward)
print("Best Net: Net %d\n Score: %f" % (index, value))
iterations = np.array([i for i in range(iteration)])
fig, ax = plt.subplots()
fit = np.polyfit(iterations, avgRewards, deg=1)
ax.plot(x, fit[0] * x + fit[1], color='red')
print("Change in Average Reward per Iteration: %d" % fit[0])
ax.scatter(iterations, avgRewards)
fig.show()
plt.savefig('plt.png')
# EVOLVING
for n_iter in tqdm(range(numEvolutions)):
print("EVOLVING")
rewards = inference(nets)
nets = evolution(rewards, nets, survival_rate, exploration_rate, combine_rate)
stats(rewards, n_iter)
exploration_rate = 0.3 - n_iter/6000
combine_rate = 0.2 + n_iter/9000
totalRewards = np.zeros(numNets)
for n_iter in tqdm(range(numEvolutions/10)):
print("TESTING")
rewards = inference(nets)
totalRewards += rewards
totalRewards /= numEvolutions/10
index, value = max(enumerate(totalRewards), key=operator.itemgetter(1))
bestNet = nets[index]
return bestNet
bestNet = train(survival_rate, exploration_rate, combine_rate)
torch.save(bestNet, 'Pongexpert.pt')
def play(net):
env = gym.make("Pong-v0")
num_episodes = 5
num_iters = 10000
observation = env.reset()
total_reward = 0
for i_episode in tqdm(range(num_episodes)):
for _ in range(num_iters):
res = net(Variable(observation.view(1, 3, 210, 160)))
action = 2 if res.data[0][0] > res.data[0][1] else 3
observation, reward, done, info = env.step(action)
total_reward += reward
if done:
print("Finished after %d timesteps", _)
break
if i_episode == num_iters - 1:
print("gone through %d iters", num_iters)
return total_reward
|
from strategies.strategy import Strategy
from utils.formats import get_usable, pretty_format_fa
from androguard.core.bytecodes.dvm import ClassDefItem
cfs = (
(lambda f: f.get_access_flags_string(), 4),
(lambda f: f.get_size(), 5),
)
class FieldStrategy(Strategy):
def get_types_to_match(self):
for fa in self.r_fas:
f_type = str(fa.get_descriptor()).replace("[", "")
if get_usable(f_type) == "obfuscated.class":
yield self.dx.get_class_analysis(f_type)
def try_resolve_fs(self):
candidates_fs = {}
for fa in self.r_fas:
if get_usable(str(fa.get_class_name())) == "obfuscated.class" \
and fa.get_class_name() not in self.accumulator.matching_cs:
print("Class of field", pretty_format_fa(fa), "not matched.")
continue
ca = self.dx.get_class_analysis(fa.get_class_name()).get_class()
ca2 = self.dx2.get_class_analysis(self.accumulator.matching_cs[ca.name]).get_class()
assert type(ca) == ClassDefItem and type(ca2) == ClassDefItem, \
f"Tried to match field of external class ({pretty_format_fa(fa)})"
f2s = tuple(ca2.get_fields())
# Filtering by Type
arr, f2_type = self.get_usable_f2_type(str(fa.get_descriptor()))
if f2_type is not None:
desc2 = arr + f2_type
f2s = tuple((fa2 for fa2 in f2s if fa2.get_descriptor() == desc2))
if not f2s:
continue
# Filtering by Score and Compare Functions
scores = {fa2: sum(((cf(fa) == cf(fa2)) * score) for cf, score in cfs) for fa2 in f2s}
m = max(scores.values())
f2s = tuple((fa2 for fa2, score in scores.items() if score == m))
if tuple((get_usable(str(f.get_descriptor())) for f in ca.get_fields())) == \
tuple((get_usable(str(f2.get_descriptor())) for f2 in ca2.get_fields())):
fa2 = list(ca2.get_fields())[list(ca.get_fields()).index(fa)]
if fa2 in f2s:
f2s = (fa2,)
else:
print(".. Tried to use index, but not in filtered fields!")
candidates_fs[fa] = set(f2s)
return candidates_fs
def get_usable_f2_type(self, desc):
*arr, c_name = desc.rpartition("[")
arr = arr[0] + arr[1]
if get_usable(c_name) == "obfuscated.class":
if c_name in self.accumulator.matching_cs:
f2_type = self.accumulator.matching_cs[c_name]
else:
f2_type = None
else:
f2_type = desc
return arr, f2_type
|
from __future__ import division
import numpy as np
from cea.optimization.constants import ACT_FIRST, HP_SEW_ALLOWED,T_LAKE, HP_LAKE_ALLOWED, CC_ALLOWED, BOILER_MIN, ACT_SECOND, ACT_THIRD, ACT_FOURTH
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
from cea.technologies.heatpumps import GHP_op_cost, HPSew_op_cost, HPLake_op_cost, GHP_Op_max
from cea.technologies.furnace import furnace_op_cost
from cea.technologies.cogeneration import calc_cop_CCGT
from cea.technologies.boiler import cond_boiler_op_cost
__author__ = "Sreepathi Bhargava Krishna"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = [ "Sreepathi Bhargava Krishna"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "thomas@arch.ethz.ch"
__status__ = "Production"
def heating_source_activator(Q_therm_req_W, hour, master_to_slave_vars, mdot_DH_req_kgpers, tdhsup_K, tdhret_req_K, TretsewArray_K,
gv, prices, lca, T_ground):
"""
:param Q_therm_req_W:
:param hour:
:param context:
:type Q_therm_req_W: float
:type hour: int
:type context: list
:return: cost_data_centralPlant_op, source_info, Q_source_data, E_coldsource_data, E_PP_el_data, E_gas_data, E_wood_data, Q_excess
:rtype:
"""
current_source = ACT_FIRST # Start with first source, no cost yet
Q_therm_req_W_copy = Q_therm_req_W
# Initializing resulting values (necessairy as not all of them are over-written):
Q_uncovered_W = 0
cost_HPSew_USD, cost_HPLake_USD, cost_GHP_USD, cost_CHP_USD, cost_Furnace_USD, cost_BaseBoiler_USD, cost_PeakBoiler_USD = 0, 0, 0, 0, 0, 0, 0
# initialize all sources to be off = 0 (turn to "on" with setting to 1)
source_HP_Sewage = 0
source_HP_Lake = 0
source_GHP = 0
source_CHP = 0
source_Furnace = 0
source_BaseBoiler = 0
source_PeakBoiler = 0
Q_excess_W = 0
Q_HPSew_gen_W, Q_HPLake_gen_W, Q_GHP_gen_W, Q_CHP_gen_W, Q_Furnace_gen_W, Q_BaseBoiler_gen_W, Q_PeakBoiler_gen_W = 0, 0, 0, 0, 0, 0, 0
E_HPSew_req_W, E_HPLake_req_W, E_GHP_req_W, E_CHP_gen_W, E_Furnace_gen_W, E_BaseBoiler_req_W, E_PeakBoiler_req_W = 0, 0, 0, 0, 0, 0, 0
Gas_used_HPSew_W, Gas_used_HPLake_W, Gas_used_GHP_W, Gas_used_CHP_W, Gas_used_Furnace_W, Gas_used_BaseBoiler_W, Gas_used_PeakBoiler_W = 0, 0, 0, 0, 0, 0, 0
Wood_used_HPSew_W, Wood_used_HPLake_W, Wood_used_GHP_W, Wood_used_CHP_W, Wood_used_Furnace_W, Wood_used_BaseBoiler_W, Wood_used_PeakBoiler_W = 0, 0, 0, 0, 0, 0, 0
Q_coldsource_HPSew_W, Q_coldsource_HPLake_W, Q_coldsource_GHP_W, Q_coldsource_CHP_W, \
Q_coldsource_Furnace_W, Q_coldsource_BaseBoiler_W, Q_coldsource_PeakBoiler_W = 0, 0, 0, 0, 0, 0, 0
while Q_therm_req_W > 1E-1: # cover demand as long as the supply is lower than demand!
if current_source == 'HP': # use heat pumps available!
if (master_to_slave_vars.HP_Sew_on) == 1 and Q_therm_req_W > 0 and HP_SEW_ALLOWED == 1: # activate if its available
source_HP_Sewage = 0
cost_HPSew_USD = 0.0
Q_HPSew_gen_W = 0.0
E_HPSew_req_W = 0.0
Q_coldsource_HPSew_W = 0.0
if Q_therm_req_W > master_to_slave_vars.HPSew_maxSize_W:
Q_therm_Sew_W = master_to_slave_vars.HPSew_maxSize_W
mdot_DH_to_Sew_kgpers = mdot_DH_req_kgpers * Q_therm_Sew_W / Q_therm_req_W.copy() # scale down the mass flow if the thermal demand is lowered
else:
Q_therm_Sew_W = float(Q_therm_req_W.copy())
mdot_DH_to_Sew_kgpers = float(mdot_DH_req_kgpers.copy())
C_HPSew_el_pure, C_HPSew_per_kWh_th_pure, Q_HPSew_cold_primary_W, Q_HPSew_therm_W, E_HPSew_req_W = HPSew_op_cost(mdot_DH_to_Sew_kgpers, tdhsup_K, tdhret_req_K, TretsewArray_K,
lca, Q_therm_Sew_W)
Q_therm_req_W -= Q_HPSew_therm_W
# Storing data for further processing
if Q_HPSew_therm_W > 0:
source_HP_Sewage = 1
cost_HPSew_USD = float(C_HPSew_el_pure)
Q_HPSew_gen_W = float(Q_HPSew_therm_W)
E_HPSew_req_W = float(E_HPSew_req_W)
Q_coldsource_HPSew_W = float(Q_HPSew_cold_primary_W)
if (master_to_slave_vars.GHP_on) == 1 and hour >= master_to_slave_vars.GHP_SEASON_ON and hour <= master_to_slave_vars.GHP_SEASON_OFF and Q_therm_req_W > 0 and not np.isclose(
tdhsup_K, tdhret_req_K):
# activating GHP plant if possible
source_GHP = 0
cost_GHP_USD = 0.0
Q_GHP_gen_W = 0.0
E_GHP_req_W = 0.0
Q_coldsource_GHP_W = 0.0
Q_max_W, GHP_COP = GHP_Op_max(tdhsup_K, T_ground, master_to_slave_vars.GHP_number)
if Q_therm_req_W > Q_max_W:
mdot_DH_to_GHP_kgpers = Q_max_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K))
Q_therm_req_W -= Q_max_W
else: # regular operation possible, demand is covered
mdot_DH_to_GHP_kgpers = Q_therm_req_W.copy() / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K))
Q_therm_req_W = 0
C_GHP_el, E_GHP_req_W, Q_GHP_cold_primary_W, Q_GHP_therm_W = GHP_op_cost(mdot_DH_to_GHP_kgpers, tdhsup_K, tdhret_req_K, GHP_COP, lca)
# Storing data for further processing
source_GHP = 1
cost_GHP_USD = C_GHP_el
Q_GHP_gen_W = Q_GHP_therm_W
E_GHP_req_W = E_GHP_req_W
Q_coldsource_GHP_W = Q_GHP_cold_primary_W
if (master_to_slave_vars.HP_Lake_on) == 1 and Q_therm_req_W > 0 and HP_LAKE_ALLOWED == 1 and not np.isclose(tdhsup_K,
tdhret_req_K): # run Heat Pump Lake
source_HP_Lake = 0
cost_HPLake_USD = 0
Q_HPLake_gen_W = 0
E_HPLake_req_W = 0
Q_coldsource_HPLake_W = 0
if Q_therm_req_W > master_to_slave_vars.HPLake_maxSize_W: # Scale down Load, 100% load achieved
Q_therm_HPL_W = master_to_slave_vars.HPLake_maxSize_W
mdot_DH_to_Lake_kgpers = Q_therm_HPL_W / (
HEAT_CAPACITY_OF_WATER_JPERKGK * (
tdhsup_K - tdhret_req_K)) # scale down the mass flow if the thermal demand is lowered
Q_therm_req_W -= master_to_slave_vars.HPLake_maxSize_W
else: # regular operation possible
Q_therm_HPL_W = Q_therm_req_W.copy()
mdot_DH_to_Lake_kgpers = Q_therm_HPL_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K))
Q_therm_req_W = 0
C_HPL_el, E_HPLake_req_W, Q_HPL_cold_primary_W, Q_HPL_therm_W = HPLake_op_cost(mdot_DH_to_Lake_kgpers, tdhsup_K, tdhret_req_K, T_LAKE, lca)
# Storing Data
source_HP_Lake = 1
cost_HPLake_USD = C_HPL_el
Q_HPLake_gen_W = Q_therm_HPL_W
E_HPLake_req_W = E_HPLake_req_W
Q_coldsource_HPLake_W = Q_HPL_cold_primary_W
if current_source == 'CHP' and Q_therm_req_W > 0: # start activating the combined cycles
# By definition, one can either activate the CHP (NG-CC) or ORC (Furnace) BUT NOT BOTH at the same time (not activated by Master)
Cost_CC = 0.0
source_CHP = 0
cost_CHP_USD = 0.0
Q_CHP_gen_W = 0.0
Gas_used_CHP_W = 0.0
E_CHP_gen_W = 0
if (master_to_slave_vars.CC_on) == 1 and Q_therm_req_W > 0 and CC_ALLOWED == 1: # only operate if the plant is available
CC_op_cost_data = calc_cop_CCGT(master_to_slave_vars.CC_GT_SIZE_W, tdhsup_K, master_to_slave_vars.gt_fuel,
prices, lca) # create cost information
Q_used_prim_CC_fn_W = CC_op_cost_data['q_input_fn_q_output_W']
cost_per_Wh_CC_fn = CC_op_cost_data['fuel_cost_per_Wh_th_fn_q_output_W'] # gets interpolated cost function
q_output_CC_min_W = CC_op_cost_data['q_output_min_W']
Q_output_CC_max_W = CC_op_cost_data['q_output_max_W']
eta_elec_interpol = CC_op_cost_data['eta_el_fn_q_input']
if Q_therm_req_W > q_output_CC_min_W: # operation Possible if above minimal load
if Q_therm_req_W < Q_output_CC_max_W: # Normal operation Possible within partload regime
cost_per_Wh_CC = cost_per_Wh_CC_fn(Q_therm_req_W)
Q_used_prim_CC_W = Q_used_prim_CC_fn_W(Q_therm_req_W)
Q_CC_delivered_W = Q_therm_req_W.copy()
Q_therm_req_W = 0
E_CHP_gen_W = np.float(eta_elec_interpol(Q_used_prim_CC_W)) * Q_used_prim_CC_W
else: # Only part of the demand can be delivered as 100% load achieved
cost_per_Wh_CC = cost_per_Wh_CC_fn(Q_output_CC_max_W)
Q_used_prim_CC_W = Q_used_prim_CC_fn_W(Q_output_CC_max_W)
Q_CC_delivered_W = Q_output_CC_max_W
Q_therm_req_W -= Q_output_CC_max_W
E_CHP_gen_W = np.float(eta_elec_interpol(Q_output_CC_max_W)) * Q_used_prim_CC_W
Cost_CC = cost_per_Wh_CC * Q_CC_delivered_W
source_CHP = 1
cost_CHP_USD = Cost_CC
Q_CHP_gen_W = Q_CC_delivered_W
Gas_used_CHP_W = Q_used_prim_CC_W
if (master_to_slave_vars.Furnace_on) == 1 and Q_therm_req_W > 0: # Activate Furnace if its there. By definition, either ORC or NG-CC!
Q_Furn_therm_W = 0
source_Furnace = 0
cost_Furnace_USD = 0.0
Q_Furnace_gen_W = 0.0
Wood_used_Furnace_W = 0.0
Q_Furn_prim_W = 0.0
if Q_therm_req_W > (
gv.Furn_min_Load * master_to_slave_vars.Furnace_Q_max_W): # Operate only if its above minimal load
if Q_therm_req_W > master_to_slave_vars.Furnace_Q_max_W: # scale down if above maximum load, Furnace operates at max. capacity
Furnace_Cost_Data = furnace_op_cost(master_to_slave_vars.Furnace_Q_max_W, master_to_slave_vars.Furnace_Q_max_W, tdhret_req_K,
master_to_slave_vars.Furn_Moist_type, gv)
C_Furn_therm = Furnace_Cost_Data[0]
Q_Furn_prim_W = Furnace_Cost_Data[2]
Q_Furn_therm_W = master_to_slave_vars.Furnace_Q_max_W
Q_therm_req_W -= Q_Furn_therm_W
E_Furnace_gen_W = Furnace_Cost_Data[4]
else: # Normal Operation Possible
Furnace_Cost_Data = furnace_op_cost(Q_therm_req_W, master_to_slave_vars.Furnace_Q_max_W, tdhret_req_K,
master_to_slave_vars.Furn_Moist_type, gv)
Q_Furn_prim_W = Furnace_Cost_Data[2]
C_Furn_therm = Furnace_Cost_Data[0]
Q_Furn_therm_W = Q_therm_req_W.copy()
E_Furnace_gen_W = Furnace_Cost_Data[4]
Q_therm_req_W = 0
source_Furnace = 1
cost_Furnace_USD = C_Furn_therm.copy()
Q_Furnace_gen_W = Q_Furn_therm_W
Wood_used_Furnace_W = Q_Furn_prim_W
if current_source == 'BoilerBase' and Q_therm_req_W > 0:
Q_therm_boiler_W = 0
if (master_to_slave_vars.Boiler_on) == 1:
source_BaseBoiler = 0
cost_BaseBoiler_USD = 0.0
Q_BaseBoiler_gen_W = 0.0
Gas_used_BaseBoiler_W = 0.0
E_BaseBoiler_req_W = 0.0
if Q_therm_req_W >= BOILER_MIN * master_to_slave_vars.Boiler_Q_max_W: # Boiler can be activated?
# Q_therm_boiler = Q_therm_req
if Q_therm_req_W >= master_to_slave_vars.Boiler_Q_max_W: # Boiler above maximum Load?
Q_therm_boiler_W = master_to_slave_vars.Boiler_Q_max_W
else:
Q_therm_boiler_W = Q_therm_req_W.copy()
C_boil_therm, C_boil_per_Wh, Q_primary_W, E_aux_Boiler_req_W = cond_boiler_op_cost(Q_therm_boiler_W, master_to_slave_vars.Boiler_Q_max_W, tdhret_req_K, \
master_to_slave_vars.BoilerType, master_to_slave_vars.EL_TYPE, gv, prices, lca)
source_BaseBoiler = 1
cost_BaseBoiler_USD = C_boil_therm
Q_BaseBoiler_gen_W = Q_therm_boiler_W
Gas_used_BaseBoiler_W = Q_primary_W
E_BaseBoiler_req_W = E_aux_Boiler_req_W
Q_therm_req_W -= Q_therm_boiler_W
if current_source == 'BoilerPeak' and Q_therm_req_W > 0:
if (master_to_slave_vars.BoilerPeak_on) == 1:
source_PeakBoiler = 0
cost_PeakBoiler_USD = 0.0
Q_PeakBoiler_gen_W = 0.0
Gas_used_PeakBoiler_W = 0
E_PeakBoiler_req_W = 0
if Q_therm_req_W > 0: # gv.Boiler_min*master_to_slave_vars.BoilerPeak_Q_max: # Boiler can be activated?
if Q_therm_req_W > master_to_slave_vars.BoilerPeak_Q_max_W: # Boiler above maximum Load?
Q_therm_boilerP_W = master_to_slave_vars.BoilerPeak_Q_max_W
Q_therm_req_W -= Q_therm_boilerP_W
else:
Q_therm_boilerP_W = Q_therm_req_W.copy()
Q_therm_req_W = 0
C_boil_thermP, C_boil_per_WhP, Q_primaryP_W, E_aux_BoilerP_W = cond_boiler_op_cost(Q_therm_boilerP_W, master_to_slave_vars.BoilerPeak_Q_max_W, tdhret_req_K, \
master_to_slave_vars.BoilerPeakType, master_to_slave_vars.EL_TYPE, gv, prices, lca)
source_PeakBoiler = 1
cost_PeakBoiler_USD = C_boil_thermP
Q_PeakBoiler_gen_W = Q_therm_boilerP_W
Gas_used_PeakBoiler_W = Q_primaryP_W
E_PeakBoiler_req_W = E_aux_BoilerP_W
Q_excess_W = 0
if np.floor(Q_therm_req_W) > 0:
if current_source == ACT_FIRST:
current_source = ACT_SECOND
elif current_source == ACT_SECOND:
current_source = ACT_THIRD
elif current_source == ACT_THIRD:
current_source = ACT_FOURTH
else:
Q_uncovered_W = Q_therm_req_W
break
elif round(Q_therm_req_W, 0) != 0:
Q_uncovered_W = 0 # Q_therm_req
Q_excess_W = -Q_therm_req_W
Q_therm_req_W = 0
# break
else:
Q_therm_req_W = 0
source_info = source_HP_Sewage, source_HP_Lake, source_GHP, source_CHP, source_Furnace, source_BaseBoiler, source_PeakBoiler
Q_source_data_W = Q_HPSew_gen_W, Q_HPLake_gen_W, Q_GHP_gen_W, Q_CHP_gen_W, Q_Furnace_gen_W, Q_BaseBoiler_gen_W, Q_PeakBoiler_gen_W, Q_uncovered_W
E_PP_el_data_W = E_HPSew_req_W, E_HPLake_req_W, E_GHP_req_W, E_CHP_gen_W, E_Furnace_gen_W, E_BaseBoiler_req_W, E_PeakBoiler_req_W
E_gas_data_W = Gas_used_HPSew_W, Gas_used_HPLake_W, Gas_used_GHP_W, Gas_used_CHP_W, Gas_used_Furnace_W, Gas_used_BaseBoiler_W, Gas_used_PeakBoiler_W
E_wood_data_W = Wood_used_HPSew_W, Wood_used_HPLake_W, Wood_used_GHP_W, Wood_used_CHP_W, Wood_used_Furnace_W, Wood_used_BaseBoiler_W, Wood_used_PeakBoiler_W
E_coldsource_data_W = Q_coldsource_HPSew_W, Q_coldsource_HPLake_W, Q_coldsource_GHP_W, Q_coldsource_CHP_W, \
Q_coldsource_Furnace_W, Q_coldsource_BaseBoiler_W, Q_coldsource_PeakBoiler_W
opex_output = {'Opex_var_HP_Sewage_USD':cost_HPSew_USD,
'Opex_var_HP_Lake_USD': cost_HPLake_USD,
'Opex_var_GHP_USD': cost_GHP_USD,
'Opex_var_CHP_USD': cost_CHP_USD,
'Opex_var_Furnace_USD': cost_Furnace_USD,
'Opex_var_BaseBoiler_USD': cost_BaseBoiler_USD,
'Opex_var_PeakBoiler_USD': cost_PeakBoiler_USD}
source_output = {'HP_Sewage': source_HP_Sewage,
'HP_Lake': source_HP_Lake,
'GHP': source_GHP,
'CHP': source_CHP,
'Furnace': source_Furnace,
'BaseBoiler': source_BaseBoiler,
'PeakBoiler': source_PeakBoiler}
Q_output = {'Q_HPSew_gen_W': Q_HPSew_gen_W,
'Q_HPLake_gen_W': Q_HPLake_gen_W,
'Q_GHP_gen_W': Q_GHP_gen_W,
'Q_CHP_gen_W': Q_CHP_gen_W,
'Q_Furnace_gen_W': Q_Furnace_gen_W,
'Q_BaseBoiler_gen_W': Q_BaseBoiler_gen_W,
'Q_PeakBoiler_gen_W': Q_PeakBoiler_gen_W,
'Q_uncovered_W': Q_uncovered_W}
E_output = {'E_HPSew_req_W': E_HPSew_req_W,
'E_HPLake_req_W': E_HPLake_req_W,
'E_GHP_req_W': E_GHP_req_W,
'E_CHP_gen_W': E_CHP_gen_W,
'E_Furnace_gen_W': E_Furnace_gen_W,
'E_BaseBoiler_req_W': E_BaseBoiler_req_W,
'E_PeakBoiler_req_W': E_PeakBoiler_req_W}
Gas_output = {'Gas_used_HPSew_W': Gas_used_HPSew_W,
'Gas_used_HPLake_W': Gas_used_HPLake_W,
'Gas_used_GHP_W': Gas_used_GHP_W,
'Gas_used_CHP_W': Gas_used_CHP_W,
'Gas_used_Furnace_W': Gas_used_Furnace_W,
'Gas_used_BaseBoiler_W': Gas_used_BaseBoiler_W,
'Gas_used_PeakBoiler_W': Gas_used_PeakBoiler_W}
Wood_output = {'Wood_used_HPSew_W': Wood_used_HPSew_W,
'Wood_used_HPLake_W': Wood_used_HPLake_W,
'Wood_used_GHP_W': Wood_used_GHP_W,
'Wood_used_CHP_W': Wood_used_CHP_W,
'Wood_used_Furnace_W': Wood_used_Furnace_W,
'Wood_used_BaseBoiler_W': Wood_used_BaseBoiler_W,
'Wood_used_PeakBoiler_W': Wood_used_PeakBoiler_W}
coldsource_output = {'Q_coldsource_HPSew_W': Q_coldsource_HPSew_W,
'Q_coldsource_HPLake_W': Q_coldsource_HPLake_W,
'Q_coldsource_GHP_W': Q_coldsource_GHP_W,
'Q_coldsource_CHP_W': Q_coldsource_CHP_W,
'Q_coldsource_Furnace_W': Q_coldsource_Furnace_W,
'Q_coldsource_BaseBoiler_W': Q_coldsource_BaseBoiler_W,
'Q_coldsource_PeakBoiler_W': Q_coldsource_PeakBoiler_W}
return opex_output, source_output, Q_output, E_output, Gas_output, Wood_output, coldsource_output, Q_excess_W
|
import hashlib
import uuid
import requests
from behave import step
from tenacity import retry, wait_fixed, stop_after_delay
from acceptance_tests.utilities.exception_manager_helper import quarantine_bad_messages_check_and_reset
from acceptance_tests.utilities.pubsub_helper import publish_to_pubsub
from acceptance_tests.utilities.test_case_helper import test_helper
from config import Config
ALL_INCOMING_TOPICS = [Config.PUBSUB_RECEIPT_TOPIC, Config.PUBSUB_REFUSAL_TOPIC, Config.PUBSUB_INVALID_CASE_TOPIC,
Config.PUBSUB_DEACTIVATE_UAC_TOPIC, Config.PUBSUB_PRINT_FULFILMENT_TOPIC,
Config.PUBSUB_UPDATE_SAMPLE_SENSITIVE_TOPIC, Config.PUBSUB_UAC_AUTHENTICATION_TOPIC]
@step("a bad json msg is sent to every topic consumed by RM")
def put_a_bad_msg_on_every_topic_on(context):
context.message_hashes = []
for topic in ALL_INCOMING_TOPICS:
message = 'not even close to json' + str(uuid.uuid4())
publish_to_pubsub(message,
Config.PUBSUB_PROJECT,
topic)
context.message_hashes.append(hashlib.sha256(message.encode('utf-8')).hexdigest())
context.sent_messages.append(message)
@step('each bad msg is seen by exception manager with the message containing "{expected_exception_msg}"')
def look_for_each_bad_msg(context, expected_exception_msg):
for message_hash in context.message_hashes:
_check_message_exception_as_expected(message_hash, expected_exception_msg)
response = requests.get(f'{Config.EXCEPTION_MANAGER_URL}/badmessages/summary')
response.raise_for_status()
bad_messages = response.json()
test_helper.assertEqual(len(bad_messages), len(context.message_hashes),
msg='actual number of bad msgs does not match expected number of hashes'
f'bad_messages: {bad_messages}, context.message_hashes: {context.message_hashes}')
@retry(wait=wait_fixed(1), stop=stop_after_delay(30))
def _check_message_exception_as_expected(bad_message_hash, expected_exception):
response = requests.get(f'{Config.EXCEPTION_MANAGER_URL}/badmessage/{bad_message_hash}')
response.raise_for_status()
message_details = response.json()
test_helper.assertIn(expected_exception, message_details[0]['exceptionReport']['exceptionMessage'],
msg='Exception manager 1st exception message differs from expected message'
f'All message_details from exception manager: {message_details}')
test_helper.assertGreater(message_details[0]['stats']['seenCount'], 1,
msg='Seen count is not greater than 1'
f'All message_details from exception manager: {message_details}')
@step('a bad message appears in exception manager with exception message containing "{expected_exception_msg}"')
def bad_message_appears_in_exception_manager(context, expected_exception_msg):
look_for_each_bad_msg(context, expected_exception_msg)
@step("each bad msg can be successfully quarantined")
def each_bad_msg_can_be_successfully_quarantined(context):
quarantine_bad_messages_check_and_reset(context.message_hashes)
|
"""A framework for restful APIs."""
# -----------------------------------------------------------------------------
# Module: dpa.restful
# Author: Josh Tomlinson (jtomlin)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports:
# -----------------------------------------------------------------------------
import copy
from dpa.restful.client import RestfulClient, RestfulClientError
# -----------------------------------------------------------------------------
# Public Classes
# -----------------------------------------------------------------------------
class CreateMixin(object):
# -------------------------------------------------------------------------
# Class methods:
# -------------------------------------------------------------------------
@classmethod
def create(cls, data, data_server=None):
try:
data = RestfulClient(data_server=data_server).execute_request(
'create', cls.data_type, data=data)
except RestfulClientError as e:
raise cls.exception_class(e)
# XXX
# get or create cache based on cls.__name__
# get object
return cls(data)
# -----------------------------------------------------------------------------
class DeleteMixin(object):
# -------------------------------------------------------------------------
# Class methods:
# -------------------------------------------------------------------------
@classmethod
def delete(cls, primary_key, data_server=None):
# XXX
# get cache based on cls.__name__
# remove by primary_key and id
try:
return RestfulClient(data_server=data_server).execute_request(
'delete', cls.data_type, primary_key=primary_key)
except RestfulClientError as e:
raise cls.exception_class(e)
# -----------------------------------------------------------------------------
class GetMixin(object):
# -------------------------------------------------------------------------
# Class methods:
# -------------------------------------------------------------------------
@classmethod
def get(cls, primary_key, data_server=None, **filters):
# XXX
# get or create cache based on cls.__name__
# get object for primary_key
# if exists, and not expired, return it
# if doesn't exist, query
# cache by primary_key and id
try:
data = RestfulClient(data_server=data_server).execute_request(
'get', cls.data_type, primary_key=primary_key, params=filters)
except RestfulClientError as e:
raise cls.exception_class(e)
return cls(data)
# -----------------------------------------------------------------------------
class ListMixin(object):
# -------------------------------------------------------------------------
# Class methods:
# -------------------------------------------------------------------------
# XXX cache method based on supplied filters with reasonable expiration
@classmethod
def list(cls, data_server=None, **filters):
try:
data_list = RestfulClient(data_server=data_server).execute_request(
'list', cls.data_type, params=filters)
except RestfulClientError as e:
raise cls.exception_class(e)
# XXX
# get or create cache based on cls.__name__
# for each piece of data returned:
# if object exist in cache for "id", update with new data
# otherwise, add new object to cache
return [cls(data) for data in data_list]
# -----------------------------------------------------------------------------
class UpdateMixin(object):
# -------------------------------------------------------------------------
# Public Methods:
# -------------------------------------------------------------------------
def update(self, primary_key, data, data_server=None):
cls = self.__class__
update_data = copy.deepcopy(self._data.data_dict)
# update the dictionary with the new data
for key, val in data.items():
if key not in update_data.keys():
raise cls.exception_class(
"Invalid key '{k}' supplied for update.".format(k=key)
)
if val is not None:
update_data[key] = val
try:
db_data = RestfulClient(data_server=data_server).execute_request(
'update', cls.data_type, primary_key=primary_key, data=update_data)
except RestfulClientError as e:
raise cls.exception_class(e)
# XXX handle cache update
tmp_obj = cls(db_data)
self._data = tmp_obj._data
|
import tensorflow as tf
import os
import horovod.tensorflow as hvd
config = {
'rank_size': 1,
'shard': False,
# ======= basic config ======= #
'mode':'train', # "train","evaluate","train_and_evaluate"
# modify here for train_and_evaluate mode
'epochs_between_evals': 1, #used if mode is "train_and_evaluate"
'num_epochs': None,
# modify here for train mode
'max_train_steps': 100,
'data_url': '/data/slimImagenet',
'dtype': tf.float32,
'use_nesterov': True,
'label_smoothing':0.1, #If greater than 0 then smooth the labels.
'weight_decay': 0.0001,
'batch_size': 32, #minibatch size per node, total batchsize = batch_size*hvd.size()*itersize
'momentum': [0.9],
'lr': 0.001,
'max_epoch': 150,
#======= logger config =======
'display_every': 10,
'log_name': 'vgg16.log',
'log_dir': 'ckpt',
}
def vgg16_config():
hvd.init()
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['do_checkpoint'] = (hvd.rank() == 0)
return config
|
import connexion
import logging
import datetime
from connexion import NoContent
# our memory-only pet storage
PETS = {}
def get_pets(limit, animal_type=None):
return {'pets': [pet for pet in PETS.values() if not animal_type or pet['animal_type'] == animal_type][:limit]}
def get_pet(pet_id):
pet = PETS.get(pet_id)
return pet or ('Not found', 404)
def put_pet(pet_id, pet):
exists = pet_id in PETS
pet['id'] = pet_id
if exists:
logging.info('Updating pet %s...', pet_id)
PETS[pet_id].update(pet)
else:
logging.info('Creating pet %s...', pet_id)
pet['created'] = datetime.datetime.utcnow()
PETS[pet_id] = pet
return NoContent, (200 if exists else 201)
def delete_pet(pet_id):
if pet_id in PETS:
logging.info('Deleting pet %s...', pet_id)
del PETS[pet_id]
return NoContent, 204
else:
return NoContent, 404
logging.basicConfig(level=logging.INFO)
app = connexion.FlaskApp(__name__, specification_dir='openapi/')
app.add_api('api.yaml')
app.run(port=8080)
application = app.app
|
#!/usr/bin/env python
# This is written with python3 syntax
import csv
import os
import json
INPUT_FILE_NAME = 'lookup.csv'
INPUT_FILE_PATH = os.path.join(os.getcwd(), INPUT_FILE_NAME)
CSV_DELIMITER = ','
OUTPUT_FILE_NAME = 'lookup.json'
OUTPUT_FILE_PATH = os.path.join(os.getcwd(), OUTPUT_FILE_NAME)
LOOKUP_COL = "lookup_id"
# Each CSV line will be converted into a dictionary object, and pushed
# onto an array. This ensures that the generated json
# will have the same order as the lines in the CSV file.
array_of_ordered_dict = []
# function to convert the CSV into an array that contains a json-like
# dictionary for each line in the CSV file
def create_ordered_dict_from_input():
with open(INPUT_FILE_PATH) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
print("Reading %s" % INPUT_FILE_PATH)
for row in csv_reader:
array_of_ordered_dict.append(row)
print("Finished reading %s" % INPUT_FILE_PATH)
return array_of_ordered_dict;
# Convert the array of dictionary objects into a json object.
def convert_array_of_ordered_dict_to_json(array_of_ordered_dict):
print("Creating %s" % OUTPUT_FILE_PATH)
f = open(OUTPUT_FILE_PATH, "w")
# Create the json lookup table
f.write("{\n")
arr_len = len(array_of_ordered_dict)
for idx, row in enumerate(array_of_ordered_dict):
lookup_id = row[LOOKUP_COL]
del row[LOOKUP_COL]
# lookup_id is a dictionary key, with a json dict as the value
json_element = '"{0}" : {1}'.format(lookup_id, json.dumps(row))
# If this is the last json element, then the dictionary should be closed rather than
# adding a trailing comma.
json_line = ''.join([json_element, "\n}\n"]) if (idx+1) == arr_len else ''.join([json_element, ",\n"])
f.write(json_line)
print("Finished writing %s" % OUTPUT_FILE_PATH)
return 0
if __name__ == "__main__":
array_of_ordered_dict = create_ordered_dict_from_input()
convert_array_of_ordered_dict_to_json(array_of_ordered_dict)
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/12/8 下午3:02
# 6.3 使用门函数和激励函数
# 1. 导入必要的编程库,初始化一个计算图会话。对于学习在TensorFlow中如何设置随机种子而言,这也是一个很好的例子。这里将使用TensorFlow和Numpy模块和随机数生成器。对于相同的随机种子集,我们应该能够复现
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
sess = tf.Session()
tf.set_random_seed(5)
np.random.seed(42)
# 2. 声明批量大小、模型变量、数据集和占位符。在计算图中为两个相似的神经网络模型(仅激励函数不同)传入正态分布数据
batch_size = 50
a1 = tf.Variable(tf.random_normal(shape=[1,1]))
b1 = tf.Variable(tf.random_uniform(shape=[1,1]))
a2 = tf.Variable(tf.random_normal(shape=[1,1]))
b2 = tf.Variable(tf.random_uniform(shape=[1,1]))
x = np.random.normal(2, 0.1, 500)
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# 3. 声明两个训练模型,即sigmoid激励模型和ReLU激励模型
sigmoid_activation = tf.sigmoid(tf.add(tf.matmul(x_data, a1), b1))
relu_activation = tf.nn.relu(tf.add(tf.matmul(x_data, a2), b2))
# 4. 损失函数都采用模型输出和预期值0.75之间的差值的L2范数平均
loss1 = tf.reduce_mean(tf.square(tf.subtract(sigmoid_activation, 0.75)))
loss2 = tf.reduce_mean(tf.square(tf.subtract(relu_activation, 0.75)))
# 5. 声明优化算法
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step_sigmoid = my_opt.minimize(loss1)
train_step_relu = my_opt.minimize(loss2)
init = tf.global_variables_initializer()
sess.run(init)
# 6. 遍历迭代训练模型,每个模型迭代750次。保存损失函数输出和激励函数的返回值,以便后续绘图
loss_vec_sigmoid = []
loss_vec_relu = []
activation_sigmoid = []
activation_relu = []
for i in range(750):
rand_indices = np.random.choice(len(x), size=batch_size)
x_vals = np.transpose([x[rand_indices]])
sess.run(train_step_sigmoid, feed_dict={x_data: x_vals})
sess.run(train_step_relu, feed_dict={x_data: x_vals})
loss_vec_sigmoid.append(sess.run(loss1, feed_dict={x_data: x_vals}))
loss_vec_relu.append(sess.run(loss2, feed_dict={x_data: x_vals}))
activation_sigmoid.append(np.mean(sess.run(sigmoid_activation, feed_dict={x_data: x_vals})))
activation_relu.append(np.mean(sess.run(relu_activation, feed_dict={x_data: x_vals})))
# 7. 下面是绘制损失函数和激励函数的代码
plt.plot(activation_sigmoid, 'k-', label='Sigmoid Activation')
plt.plot(activation_relu, 'r--', label='Relu Activation')
plt.ylim([0, 1.0])
plt.title('Activation Outputs')
plt.xlabel('Generation')
plt.ylabel('Outputs')
plt.legend(loc='upper right')
plt.show()
plt.plot(loss_vec_sigmoid, 'k-', label='Sigmoid Loss')
plt.plot(loss_vec_relu, 'r--', label='Relu Loss')
plt.ylim([0, 1.0])
plt.title('Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.show()
|
#!/usr/bin/env python3
from aws_cdk import core
import os
from serverless_pipeline.serverless_pipeline_stack import ServerlessPipelineStack
app = core.App()
ServerlessPipelineStack(app, "serverless-pipeline", env=core.Environment(
account=os.getenv('AWS_CDK_DEFAULT_ACCOUNT'),
region=os.getenv('AWS_CDK_DEFAULT_REGION'),
))
app.synth()
|
from pathlib import Path
examples_folder = Path(__file__).parent.parent.parent / "examples" / "etl"
examples = {
file.stem: open(file).read()
for file in examples_folder.iterdir()
if file.suffix == ".py"
}
print(list(examples.keys()))
template = open(Path(__file__).parent / "README.template.md").read()
open(Path(__file__).parent / "README.md", "w").write(template.format(**examples))
|
#!/usr/bin/env python3
#
# SMNCopyNumberCaller
# Copyright 2019-2020 Illumina, Inc.
# All rights reserved.
#
# Author: Xiao Chen <xchen2@illumina.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import sys
import argparse
import json
import logging
import datetime
from collections import namedtuple
import pysam
from depth_calling.snp_count import get_supporting_reads, get_fraction, get_snp_position
from depth_calling.gmm import Gmm
from depth_calling.utilities import (
parse_gmm_file,
parse_region_file,
open_alignment_file,
)
from depth_calling.bin_count import (
get_normed_depth,
get_normed_depth_from_count,
get_read_length,
)
from caller.call_smn12 import get_smn12_call
MAD_THRESHOLD = 0.11
def load_parameters():
"""Return parameters."""
parser = argparse.ArgumentParser(
description="Call Copy number of full-length SMN1, full-length SMN2 and \
SMN* (Exon7-8 deletion) from a WGS bam file."
)
parser.add_argument(
"--manifest",
help="Manifest listing absolute paths to input BAM/CRAM files",
required=True,
)
parser.add_argument(
"--genome", help="Reference genome, select from 19, 37, or 38", required=True
)
parser.add_argument("--outDir", help="Output directory", required=True)
parser.add_argument("--prefix", help="Prefix to output file", required=True)
parser.add_argument(
"--threads",
help="Number of threads to use. Default is 1",
type=int,
default=1,
required=False,
)
parser.add_argument(
"--reference",
help="Optional path to reference fasta file for CRAM",
required=False,
)
parser.add_argument(
"--countFilePath", help="Optional path to count files", required=False
)
args = parser.parse_args()
if args.genome not in ["19", "37", "38"]:
raise Exception("Genome not recognized. Select from 19, 37, or 38")
return args
def smn_cn_caller(
bam,
region_dic,
gmm_parameter,
snp_db,
variant_db,
threads,
count_file=None,
reference_fasta=None,
):
"""Return SMN CN calls for each sample."""
# 1. read counting, normalization
if count_file is not None:
bamfile = open_alignment_file(bam, reference_fasta)
reads = bamfile.fetch()
read_length = get_read_length(reads)
bamfile.close()
normalized_depth = get_normed_depth_from_count(
count_file, region_dic, read_length, gc_correct=False
)
else:
normalized_depth = get_normed_depth(
bam, region_dic, threads, reference=reference_fasta, gc_correct=False
)
# 2. GMM and CN call
cn_call = namedtuple("cn_call", "exon16_cn exon16_depth exon78_cn exon78_depth")
gmm_exon16 = Gmm()
gmm_exon16.set_gmm_par(gmm_parameter, "exon1-6")
gcall_exon16 = gmm_exon16.gmm_call(normalized_depth.normalized["exon16"])
gmm_exon78 = Gmm()
gmm_exon78.set_gmm_par(gmm_parameter, "exon7-8")
gcall_exon78 = gmm_exon78.gmm_call(normalized_depth.normalized["exon78"])
raw_cn_call = cn_call(
gcall_exon16.cn,
gcall_exon16.depth_value,
gcall_exon78.cn,
gcall_exon78.depth_value,
)
# 3. Get SNP ratios
smn1_read_count, smn2_read_count = get_supporting_reads(
bam,
snp_db.dsnp1,
snp_db.dsnp2,
snp_db.nchr,
snp_db.dindex,
reference=reference_fasta,
)
smn1_fraction = get_fraction(smn1_read_count, smn2_read_count)
var_ref_count, var_alt_count = get_supporting_reads(
bam,
variant_db.dsnp1,
variant_db.dsnp2,
variant_db.nchr,
variant_db.dindex,
reference=reference_fasta,
)
# 4. Call CN of SMN1 and SMN2
final_call = get_smn12_call(
raw_cn_call,
smn1_read_count,
smn2_read_count,
var_ref_count,
var_alt_count,
normalized_depth.mediandepth,
)
# 5. Prepare final call set
sample_call = namedtuple(
"sample_call",
"Coverage_MAD Median_depth \
Full_length_CN_raw Total_CN_raw \
SMN1_read_support SMN2_read_support SMN1_fraction \
g27134TG_REF_count g27134TG_ALT_count",
)
sample_cn_call = sample_call(
round(normalized_depth.mad, 3),
round(normalized_depth.mediandepth, 2),
raw_cn_call.exon78_depth,
raw_cn_call.exon16_depth,
smn1_read_count,
smn2_read_count,
[round(a, 2) for a in smn1_fraction],
var_ref_count,
var_alt_count,
)
doutput = sample_cn_call._asdict()
doutput.update(final_call._asdict())
return doutput
def write_to_tsv(final_output, out_tsv):
"""Write to tsv output."""
header = [
"Sample",
"isSMA",
"isCarrier",
"SMN1_CN",
"SMN2_CN",
"SMN2delta7-8_CN",
"Total_CN_raw",
"Full_length_CN_raw",
"g.27134T>G_CN",
"SMN1_CN_raw",
]
with open(out_tsv, "w") as tsv_output:
tsv_output.write("\t".join(header) + "\n")
for sample_id in final_output:
final_call = final_output[sample_id]
output_per_sample = [
sample_id,
final_call["isSMA"],
final_call["isCarrier"],
final_call["SMN1"],
final_call["SMN2"],
final_call["SMN2delta78"],
final_call["Total_CN_raw"],
final_call["Full_length_CN_raw"],
final_call["g27134TG_CN"],
",".join([str(a) for a in final_call["SMN1_CN_raw"]]),
]
tsv_output.write("\t".join([str(a) for a in output_per_sample]) + "\n")
def main():
parameters = load_parameters()
manifest = parameters.manifest
outdir = parameters.outDir
genome = parameters.genome
prefix = parameters.prefix
threads = parameters.threads
reference_fasta = parameters.reference
path_count_file = parameters.countFilePath
logging.basicConfig(level=logging.DEBUG)
datadir = os.path.join(os.path.dirname(__file__), "data")
# Region file to use
region_file = os.path.join(datadir, "SMN_region_%s.bed" % genome)
snp_file = os.path.join(datadir, "SMN_SNP_%s.txt" % genome)
variant_file = os.path.join(datadir, "SMN_target_variant_%s.txt" % genome)
gmm_file = os.path.join(datadir, "SMN_gmm.txt")
for required_file in [region_file, snp_file, variant_file, gmm_file]:
if os.path.exists(required_file) == 0:
raise Exception("File %s not found." % required_file)
if os.path.exists(outdir) == 0:
os.makedirs(outdir)
snp_db = get_snp_position(snp_file)
variant_db = get_snp_position(variant_file)
gmm_parameter = parse_gmm_file(gmm_file)
region_dic = parse_region_file(region_file)
out_json = os.path.join(outdir, prefix + ".json")
out_tsv = os.path.join(outdir, prefix + ".tsv")
final_output = {}
with open(manifest) as read_manifest:
for line in read_manifest:
bam_name = line.strip()
sample_id = os.path.splitext(os.path.basename(bam_name))[0]
count_file = None
if path_count_file is not None:
count_file = os.path.join(path_count_file, sample_id + "_count.txt")
if count_file is None and os.path.exists(bam_name) == 0:
logging.warning(
"Input alignmet file for sample %s does not exist.", sample_id
)
elif count_file is not None and os.path.exists(count_file) == 0:
logging.warning(
"Input count file for sample %s does not exist", sample_id
)
else:
logging.info(
"Processing sample %s at %s", sample_id, datetime.datetime.now()
)
smn_call = smn_cn_caller(
bam_name,
region_dic,
gmm_parameter,
snp_db,
variant_db,
threads,
count_file,
reference_fasta,
)
# Use normalized coverage MAD across stable regions
# as a sample QC measure.
if smn_call["Coverage_MAD"] > MAD_THRESHOLD:
logging.warning(
"Sample %s has uneven coverage. CN calls may be \
unreliable.",
sample_id,
)
final_output.setdefault(sample_id, smn_call)
# Write to json
logging.info("Writing to json at %s", datetime.datetime.now())
with open(out_json, "w") as json_output:
json.dump(final_output, json_output)
# Write to tsv
logging.info("Writing to tsv at %s", datetime.datetime.now())
write_to_tsv(final_output, out_tsv)
if __name__ == "__main__":
main()
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import os
from abc import ABCMeta
from typing import Callable
from typing import Dict
from typing import final
from typing import NoReturn
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
from disent.dataset.util.hdf5 import hdf5_resave_file
from disent.util.inout.cache import stalefile
from disent.util.function import wrapped_partial
from disent.util.inout.files import retrieve_file
from disent.util.inout.paths import filename_from_url
from disent.util.inout.paths import modify_file_name
# ========================================================================= #
# data objects #
# ========================================================================= #
class DataFile(object, metaclass=ABCMeta):
"""
base DataFile that does nothing, if the file does
not exist or it has the incorrect hash, then that's your problem!
"""
def __init__(self, file_name: str):
self._file_name = file_name
@final
@property
def out_name(self) -> str:
return self._file_name
def prepare(self, out_dir: str) -> str:
# TODO: maybe check that the file exists or not and raise a FileNotFoundError?
pass
def __repr__(self):
return f'{self.__class__.__name__}(out_name={repr(self.out_name)})'
class DataFileHashed(DataFile, metaclass=ABCMeta):
"""
Abstract Class
- Base DataFile class that guarantees a file to exist,
if the file does not exist, or the hash of the file is
incorrect, then the file is re-generated.
"""
def __init__(
self,
file_name: str,
file_hash: Optional[Union[str, Dict[str, str]]],
hash_type: str = 'md5',
hash_mode: str = 'fast',
):
super().__init__(file_name=file_name)
self._file_hash = file_hash
self._hash_type = hash_type
self._hash_mode = hash_mode
def prepare(self, out_dir: str) -> str:
@stalefile(file=os.path.join(out_dir, self._file_name), hash=self._file_hash, hash_type=self._hash_type, hash_mode=self._hash_mode)
def wrapped(out_file):
self._prepare(out_dir=out_dir, out_file=out_file)
return wrapped()
def _prepare(self, out_dir: str, out_file: str) -> NoReturn:
# TODO: maybe raise a FileNotFoundError or a HashError instead?
raise NotImplementedError
class DataFileHashedDl(DataFileHashed):
"""
Download a file
- uri can also be a file to perform a copy instead of download,
useful for example if you want to retrieve a file from a network drive.
"""
def __init__(
self,
uri: str,
uri_hash: Optional[Union[str, Dict[str, str]]],
uri_name: Optional[str] = None,
hash_type: str = 'md5',
hash_mode: str = 'fast',
):
super().__init__(
file_name=filename_from_url(uri) if (uri_name is None) else uri_name,
file_hash=uri_hash,
hash_type=hash_type,
hash_mode=hash_mode
)
self._uri = uri
def _prepare(self, out_dir: str, out_file: str):
retrieve_file(src_uri=self._uri, dst_path=out_file, overwrite_existing=True)
def __repr__(self):
return f'{self.__class__.__name__}(uri={repr(self._uri)}, out_name={repr(self.out_name)})'
class DataFileHashedDlGen(DataFileHashed, metaclass=ABCMeta):
"""
Abstract class
- download a file and perform some processing on that file.
"""
def __init__(
self,
# download & save files
uri: str,
uri_hash: Optional[Union[str, Dict[str, str]]],
file_hash: Optional[Union[str, Dict[str, str]]],
# save paths
uri_name: Optional[str] = None,
file_name: Optional[str] = None,
# hash settings
hash_type: str = 'md5',
hash_mode: str = 'fast',
):
self._dl_obj = DataFileHashedDl(
uri=uri,
uri_hash=uri_hash,
uri_name=uri_name,
hash_type=hash_type,
hash_mode=hash_mode,
)
super().__init__(
file_name=modify_file_name(self._dl_obj.out_name, prefix='gen') if (file_name is None) else file_name,
file_hash=file_hash,
hash_type=hash_type,
hash_mode=hash_mode,
)
def _prepare(self, out_dir: str, out_file: str):
inp_file = self._dl_obj.prepare(out_dir=out_dir)
self._generate(inp_file=inp_file, out_file=out_file)
def _generate(self, inp_file: str, out_file: str):
raise NotImplementedError
def __repr__(self):
return f'{self.__class__.__name__}(uri={repr(self._dl_obj._uri)}, uri_name={repr(self._dl_obj.out_name)}, out_name={repr(self.out_name)})'
class DataFileHashedDlH5(DataFileHashedDlGen):
"""
Downloads an hdf5 file and pre-processes it into the specified chunk_size.
"""
def __init__(
self,
# download & save files
uri: str,
uri_hash: Optional[Union[str, Dict[str, str]]],
file_hash: Optional[Union[str, Dict[str, str]]],
# h5 re-save settings
hdf5_dataset_name: str,
hdf5_chunk_size: Tuple[int, ...],
hdf5_compression: Optional[str] = 'gzip',
hdf5_compression_lvl: Optional[int] = 4,
hdf5_dtype: Optional[Union[np.dtype, str]] = None,
hdf5_mutator: Optional[Callable[[np.ndarray], np.ndarray]] = None,
hdf5_obs_shape: Optional[Sequence[int]] = None,
# save paths
uri_name: Optional[str] = None,
file_name: Optional[str] = None,
# hash settings
hash_type: str = 'md5',
hash_mode: str = 'fast',
):
super().__init__(
file_name=file_name,
file_hash=file_hash,
uri=uri,
uri_hash=uri_hash,
uri_name=uri_name,
hash_type=hash_type,
hash_mode=hash_mode,
)
self._hdf5_resave_file = wrapped_partial(
hdf5_resave_file,
dataset_name=hdf5_dataset_name,
chunk_size=hdf5_chunk_size,
compression=hdf5_compression,
compression_lvl=hdf5_compression_lvl,
out_dtype=hdf5_dtype,
out_mutator=hdf5_mutator,
obs_shape=hdf5_obs_shape,
)
# save the dataset name
self._dataset_name = hdf5_dataset_name
@property
def dataset_name(self) -> str:
return self._dataset_name
def _generate(self, inp_file: str, out_file: str):
self._hdf5_resave_file(inp_path=inp_file, out_path=out_file)
# ========================================================================= #
# END #
# ========================================================================= #
|
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import json
import os
import pathlib
import platform
import subprocess
import sys
from typing import Any, Dict, List, Tuple, Union, cast
from datasets.load_datasets import try_load_dataset
def filter_stderr(text: str) -> str:
# delete 'Intel(R) Extension for Scikit-learn usage in sklearn' messages
fake_error_message = ('Intel(R) Extension for Scikit-learn* enabled ' +
'(https://github.com/intel/scikit-learn-intelex)')
return ''.join(text.split(fake_error_message))
def filter_stdout(text: str) -> Tuple[str, str]:
verbosity_letters = 'EWIDT'
filtered, extra = '', ''
for line in text.split('\n'):
if line == '':
continue
to_remove = False
for letter in verbosity_letters:
if line.startswith(f'[{letter}]'):
to_remove = True
break
if to_remove:
extra += line + '\n'
else:
filtered += line + '\n'
return filtered, extra
def find_the_dataset(name: str, fullpath: str) -> bool:
return os.path.isfile(fullpath) or try_load_dataset(
dataset_name=name, output_directory=pathlib.Path(fullpath).parent)
def read_output_from_command(command: str,
env: Dict[str, str] = os.environ.copy()) -> Tuple[str, str]:
if "PYTHONPATH" in env:
env["PYTHONPATH"] += ":" + os.path.dirname(os.path.abspath(__file__))
else:
env["PYTHONPATH"] = os.path.dirname(os.path.abspath(__file__))
res = subprocess.run(command.split(' '), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='utf-8', env=env)
return res.stdout[:-1], res.stderr[:-1]
def run_command(command: str,
env: Dict[str, str] = os.environ.copy()) -> None:
if "PYTHONPATH" in env:
env["PYTHONPATH"] += ":" + os.path.dirname(os.path.abspath(__file__))
else:
env["PYTHONPATH"] = os.path.dirname(os.path.abspath(__file__))
res = subprocess.run(command.split(' '), encoding='utf-8', env=env)
def parse_lscpu_lscl_info(command_output: str) -> Dict[str, str]:
res: Dict[str, str] = {}
for elem in command_output.strip().split('\n'):
splt = elem.split(':')
res[splt[0]] = splt[1]
return res
def get_hw_parameters() -> Dict[str, Union[Dict[str, Any], float]]:
if 'Linux' not in platform.platform():
return {}
hw_params: Dict[str, Union[Dict[str, str], float]] = {'CPU': {}}
# get CPU information
lscpu_info, _ = read_output_from_command('lscpu')
lscpu_info = ' '.join(lscpu_info.split())
for line in lscpu_info.split('\n'):
k, v = line.split(": ")[:2]
if k == 'CPU MHz':
continue
cast(Dict[str, str], hw_params['CPU'])[k] = v
# get RAM size
mem_info, _ = read_output_from_command('free -b')
mem_info = mem_info.split('\n')[1]
mem_info = ' '.join(mem_info.split())
hw_params['RAM size[GB]'] = int(mem_info.split(' ')[1]) / 2 ** 30
# get Intel GPU information
try:
lsgpu_info, _ = read_output_from_command(
'lscl --device-type=gpu --platform-vendor=Intel')
device_num = 0
start_idx = lsgpu_info.find('Device ')
while start_idx >= 0:
start_idx = lsgpu_info.find(':', start_idx) + 1
end_idx = lsgpu_info.find('Device ', start_idx)
hw_params[f'GPU Intel #{device_num + 1}'] = parse_lscpu_lscl_info(
lsgpu_info[start_idx: end_idx])
device_num += 1
start_idx = end_idx
except (FileNotFoundError, json.JSONDecodeError):
pass
# get Nvidia GPU information
# try:
# gpu_info, _ = read_output_from_command(
# 'nvidia-smi --query-gpu=name,memory.total,driver_version,pstate '
# '--format=csv,noheader')
# gpu_info_arr = gpu_info.split(', ')
# hw_params['GPU Nvidia'] = {
# 'Name': gpu_info_arr[0],
# 'Memory size': gpu_info_arr[1],
# 'Performance mode': gpu_info_arr[3]
# }
# except (FileNotFoundError, json.JSONDecodeError):
# pass
return hw_params
def get_sw_parameters() -> Dict[str, Dict[str, Any]]:
sw_params = {}
# try:
# gpu_info, _ = read_output_from_command(
# 'nvidia-smi --query-gpu=name,memory.total,driver_version,pstate '
# '--format=csv,noheader')
# info_arr = gpu_info.split(', ')
# sw_params['GPU_driver'] = {'version': info_arr[2]}
# # alert if GPU is already running any processes
# gpu_processes, _ = read_output_from_command(
# 'nvidia-smi --query-compute-apps=name,pid,used_memory '
# '--format=csv,noheader')
# if gpu_processes != '':
# print(f'There are running processes on GPU:\n{gpu_processes}',
# file=sys.stderr)
# except (FileNotFoundError, json.JSONDecodeError):
# pass
# get python packages info from conda
try:
conda_list, _ = read_output_from_command('conda list --json')
needed_columns = ['version', 'build_string', 'channel']
conda_list_json: List[Dict[str, str]] = json.loads(conda_list)
for pkg in conda_list_json:
pkg_info = {}
for col in needed_columns:
if col in pkg:
pkg_info[col] = pkg[col]
sw_params[pkg['name']] = pkg_info
except (FileNotFoundError, json.JSONDecodeError):
pass
return sw_params
def generate_cases(params: Dict[str, Union[List[Any], Any]]) -> List[str]:
'''
Generate cases for benchmarking by iterating the parameter values
'''
commands = ['']
for param, values in params.items():
if isinstance(values, list):
prev_len = len(commands)
commands *= len(values)
dashes = '-' if len(param) == 1 else '--'
for command_num in range(prev_len):
for value_num in range(len(values)):
commands[prev_len * value_num + command_num] += ' ' + \
dashes + param + ' ' + str(values[value_num])
else:
dashes = '-' if len(param) == 1 else '--'
for command_num in range(len(commands)):
commands[command_num] += ' ' + dashes + param + ' ' + str(values)
return commands
|
from django.shortcuts import render
def main_view(request):
return render(request, 'main/main.html')
|
# Generated by Django 2.1.7 on 2019-02-16 10:25
import django.contrib.auth.models
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Marketer',
fields=[
('user_ptr',
models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True,
primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('business_registration_number', models.CharField(max_length=128, unique=True)),
('company_name', models.CharField(max_length=128, null=True)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='customer',
name='nickname',
field=models.CharField(help_text='회원 닉네임', max_length=50),
),
]
|
#!/usr/local/bin/python3.4
# encoding: utf-8
'''
@author: Itay Moav
@copyright: 2014 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
import traceback
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/..')
from app import parser as parser
import app.upgrade
def main(parser):
'''Command line options.'''
try:
# Setup argument parser
parser.add_argument("--limit", dest="limit_files", action="store",nargs='?', help="Number of files to process")
parser.add_argument("--archive", dest="archive_files", action="store_true", help="Archive all successfully processed files")
parser.add_argument("--force_test", dest="test_upgrade", action="store_true", help="Test the upgrade on a test DB before actual run. " + \
"NOTICE! To run just tests, do not use the --all or --limit args")
parser.add_argument("--with_schema",dest="with_schema_checker", \
action="store_true", help="Runs the full schema checker. If u have a test server, " + \
"will do it there first, right after running the tests. " + \
"Otherwise, will run only on real server.")
parser.add_argument("--unblock", dest="file_name_to_unblock", \
action="store",nargs='?', help="DANGEROUS! Takes an upgrade file name as arg. " + \
"If it is not completed, it will remove it from the tracking DB")
parser.add_argument("--mark_completed", dest="file_name_to_mark_complete", \
action="store",nargs='?', help="DANGEROUS! Takes an upgrade file name as arg. " + \
"If it is in the file system, it will mark it completed in the tracking DB")
parser.add_argument("--mark_complete", dest="file_name_to_mark_complete", \
action="store",nargs='?', help="DANGEROUS! Takes an upgrade file name as arg. " + \
"If it is in the file system, it will mark it completed in the tracking DB")
args = app.init(parser)
app.upgrade.run(args)
except Exception:
traceback.print_exc()
return 1
#++++++++++++++++++++++++++++++++++++ MAIN ENTRY POINT ++++++++++++++++++++++++++++++++++
sys.exit(main(parser))
|
import sys
sys.path.append("..")
from common.data_models import Taxon
print(Taxon())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import datetime
from collections import namedtuple
import git
from github.AuthenticatedUser import AuthenticatedUser
from github.Repository import Repository
from bincrafters_conventions.bincrafters_conventions import Command as BincraftersConventionsCommand
from conan_readme_generator.main import run as conan_readme_generator_run
from conan_repo_actions import NAME
from conan_repo_actions.base import ActionBase, ActionInterrupted
from conan_repo_actions.default_branch import WhichBranch
from conan_repo_actions.util import Configuration, chargv, chdir, GithubUser, input_ask_question_yn
from conan_repo_actions.fork_create import fork_create, ForkCreateAction
from conan_repo_actions.default_branch import ConanRepo
from pathlib import Path
import shutil
import typing
def main():
parser = argparse.ArgumentParser(description='Apply bincrafters conventions, update readme and push to a remote')
parser.add_argument('--owner_login', type=str, required=True, help='owner of the repo to clone')
parser.add_argument('--keep_clone', action='store_true', help='do not remove already checked out repos')
parser.add_argument('--git_wd', type=Path, default=None, help='path where to clone the repos to')
parser.add_argument('--interactive', action='store_true', help='interactive')
parser.add_argument('--channel_suffix', default=generate_default_channel_suffix(),
help='suffix to append to the channel')
argparse_add_which_branch_option(parser)
argparse_add_what_conventions(parser)
parser.add_argument('repo_name', type=str, help='name of the repo+branch. Format: REPO[:BRANCH]')
args = parser.parse_args()
c = Configuration()
g = c.get_github()
user_from = g.get_user(args.owner_login)
user_to = g.get_user()
repobranch_from = calculate_repo_branch(user=user_from, repo_branch_name=args.repo_name)
push_data = apply_scripts_and_push2(repobranch_from=repobranch_from,
user_to=user_to,
git_wd=c.git_wd, channel_suffix=args.channel_suffix,
run_conventions=args.apply_conventions, run_readme=args.apply_readme,
keep_clone=args.keep_clone, interactive=args.interactive)
if push_data is not None:
print('Pushed changes to branch "{}" of "{}"'.format(push_data.branch_to, push_data.repo_to.full_name))
else:
print('Scripts did not change anything')
def argparse_add_what_conventions(parser: argparse.ArgumentParser):
group = parser.add_argument_group()
group.add_argument('--do-not-apply-readme', dest='apply_readme', action='store_false',
help='do not run readme generation script')
group.add_argument('--do-not-apply-conventions', dest='apply_conventions', action='store_false',
help='do not run conventions script')
def argparse_add_which_branch_option(parser: argparse.ArgumentParser):
group = parser.add_argument_group('Branch to use when none is specified')
branch_group = group.add_mutually_exclusive_group()
branch_group.add_argument('--default_branch', dest='branch_dest', action='store_const',
const=WhichBranch.DEFAULT, help='use default branch')
branch_group.add_argument('--latest', dest='branch_dest', action='store_const',
const=WhichBranch.LATEST, help='use branch with highest version')
branch_group.add_argument('--latest_stable', dest='branch_dest', action='store_const',
const=WhichBranch.LATEST_STABLE, help='use branch of stable channel with highest version')
branch_group.add_argument('--latest_testing', dest='branch_dest', action='store_const',
const=WhichBranch.LATEST_TESTING,
help='use branch of testing channel with highest version')
branch_group.add_argument('--branch', dest='branch_dest', help='use specified branch')
parser.set_defaults(branch_dest=WhichBranch.DEFAULT)
def calculate_repo_branch(user: GithubUser, repo_branch_name: str) -> 'GithubRepoBranch':
list_repo_branch = repo_branch_name.split(':', 1)
if len(list_repo_branch) == 1:
repo_str, branch = list_repo_branch[0], None
else:
repo_str, branch = list_repo_branch[0], list_repo_branch[1]
repo = user.get_repo(repo_str)
return GithubRepoBranch(repo, branch)
def calculate_branch(repo: Repository, branch_dest: typing.Union[WhichBranch, str]) -> typing.Optional[str]:
if branch_dest == WhichBranch.DEFAULT:
return repo.default_branch
elif branch_dest == WhichBranch.LATEST:
conan_repo = ConanRepo.from_repo(repo)
most_recent_version = conan_repo.most_recent_version()
if most_recent_version is None:
return
return next(conan_repo.get_branches_by_version(most_recent_version)).name
elif branch_dest == WhichBranch.LATEST_STABLE:
conan_repo = ConanRepo.from_repo(repo)
most_recent_branch = conan_repo.most_recent_branch_by_channel('stable')
if most_recent_branch is None:
return
return most_recent_branch.name
elif branch_dest == WhichBranch.LATEST_TESTING:
conan_repo = ConanRepo.from_repo(repo)
most_recent_branch = conan_repo.most_recent_branch_by_channel('testing')
if most_recent_branch is None:
return
return most_recent_branch.name
else:
return branch_dest
def generate_default_channel_suffix():
return datetime.datetime.now().isoformat(timespec='seconds').translate(str.maketrans(':-', '__'))
ConventionsApplyResult = namedtuple('ConventionsApplyresult', ('repo_from', 'branch_from', 'repo_to', 'branch_to', ))
def apply_scripts_and_push(repo_name: str, from_branch: str,
from_user: GithubUser, to_user: AuthenticatedUser, git_wd: Path, channel_suffix: str,
run_conventions: bool=True, run_readme: bool=True,
keep_clone: bool=False) -> typing.Optional[ConventionsApplyResult]:
from_repo, to_repo = fork_create(repo_name, from_user, to_user)
remote_origin = 'origin'
remote_user = 'user'
git_repo_wd = git_wd / repo_name
clone_repo(repo_name, keep_clone, git_wd, from_repo, to_repo, remote_origin, remote_user, from_branch)
repo = git.Repo(git_repo_wd)
updated = False
def commit_changes(repo, message):
nonlocal updated
repo.git.add(all=True)
if repo.is_dirty():
repo.index.commit(message=message)
updated = True
if run_conventions:
print('Running bincrafters-conventions...')
with chdir(git_repo_wd):
cmd = BincraftersConventionsCommand()
cmd.run(['--local', ])
commit_changes(repo, 'Run bincrafters-conventions\n\ncommit by {}'.format(NAME))
if run_readme:
print('Running conan-readme-generator...')
with chdir(git_repo_wd):
with chargv(['']):
conan_readme_generator_run()
commit_changes(repo, 'Run conan-readme-generator\n\ncommit by {}'.format(NAME))
def remote_branch_from_local(local):
try:
channel, version = local.split('/', 1)
return '{}_{}/{}'.format(channel, channel_suffix, version)
except ValueError:
return '{}_{}'.format(local, channel_suffix)
if updated:
remote_branch_name = remote_branch_from_local(repo.active_branch.name)
repo.remote(remote_user).push('{}:{}'.format(repo.active_branch.name, remote_branch_name))
return ConventionsApplyResult(from_repo=from_repo, from_branch=from_branch, to_repo=to_repo, to_branch=remote_branch_name)
else:
return None
class GithubRepoBranch(object):
def __init__(self, repo: typing.Optional[Repository]=None, branch: typing.Optional[str]=None):
self.repo = repo
self.branch = branch
def apply_scripts_and_push2(repobranch_from: GithubRepoBranch, user_to: AuthenticatedUser,
git_wd: Path, channel_suffix: str,
run_conventions: bool=True, run_readme: bool=True,
keep_clone: bool=False, interactive: bool=False) -> typing.Optional[ConventionsApplyResult]:
apply_action = ConventionsApplyAction(repobranch_from=repobranch_from, user_to=user_to,
wd=git_wd, channel_suffix=channel_suffix,
run_conventions=run_conventions, run_readme=run_readme,
keep_clone=keep_clone, interactive=interactive)
apply_action.check()
print(apply_action.description())
apply_action.action()
if apply_action.work_done:
return ConventionsApplyResult(repo_from=repobranch_from.repo, branch_from=repobranch_from.branch,
repo_to=apply_action.repo_to, branch_to=apply_action.branch_to,)
class ConventionsApplyAction(ActionBase):
def __init__(self, repobranch_from: GithubRepoBranch, user_to: AuthenticatedUser,
wd: Path, channel_suffix: str=None, run_conventions: bool=True, run_readme: bool=True,
which_branch: typing.Union[WhichBranch, str]=WhichBranch.DEFAULT, keep_clone: bool=False, interactive: bool=False):
super().__init__()
self._repo_branch_from = repobranch_from
self._repo_to = None
self._branch_to = None
self._user_to = user_to
self._wd = wd
self._channel_suffix = channel_suffix if channel_suffix is None else generate_default_channel_suffix()
self._which_branch = which_branch
self._keep_clone = keep_clone
self._interactive = interactive
self._work_done = None
self._run_conventions = run_conventions
self._run_readme = run_readme
def run_check(self):
if self._repo_branch_from.repo is None:
raise ActionInterrupted()
if self._repo_branch_from.branch is None:
self._repo_branch_from.branch = calculate_branch(self._repo_branch_from.repo, self._which_branch)
if self._repo_branch_from.branch is None:
raise ActionInterrupted('Unknown branch')
if not any((self._run_conventions, self._run_readme, )):
raise ActionInterrupted('Nothing to do...')
def run_action(self):
fork_action = ForkCreateAction(repo_from=self._repo_branch_from.repo, user_to=self._user_to, interactive=self._interactive)
fork_action.action()
self._repo_to = fork_action.repo_to
clone_action = RepoCloneAction(repo_from=self._repo_branch_from.repo, repo_to=self._repo_to,
wd=self._wd, keep_clone=self._keep_clone, branch=self._repo_branch_from.branch)
clone_action.action()
repo = git.Repo(clone_action.repo_wd)
updated = False
def commit_changes(repo, message):
nonlocal updated
repo.git.add(all=True)
if repo.is_dirty():
repo.index.commit(message=message)
updated = True
if self._run_conventions:
if self._interactive:
if not input_ask_question_yn('Run bincrafters-conventions script?', default=True):
raise ActionInterrupted()
print('Running bincrafters-conventions...')
with chdir(clone_action.repo_wd):
cmd = BincraftersConventionsCommand()
cmd.run(['--local', ])
commit_changes(repo, 'Run bincrafters-conventions\n\ncommit by {}'.format(NAME))
if self._run_readme:
if self._interactive:
if not input_ask_question_yn('Run conan-readme-generator script?', default=True):
raise ActionInterrupted()
print('Running conan-readme-generator...')
with chdir(clone_action.repo_wd):
with chargv(['']):
conan_readme_generator_run()
commit_changes(repo, 'Run conan-readme-generator\n\ncommit by {}'.format(NAME))
def remote_branch_from_local(local):
try:
channel, version = local.split('/', 1)
return '{}_{}/{}'.format(channel, self._channel_suffix, version)
except ValueError:
return '{}_{}'.format(local, self._channel_suffix)
self._work_done = False
if updated:
branch_to = remote_branch_from_local(repo.active_branch.name)
if self._interactive:
from .util import editor_interactive_remove_comments
branch_to = editor_interactive_remove_comments(
'{branch}\n\n# Enter the name of the remote branch (repo={repo})'.format(
branch=branch_to, repo=self._repo_to.full_name)).strip()
if not branch_to or not input_ask_question_yn(
'Push changes to remote branch (user={user}) "{branch}"?'.format(
user=self._user_to.login, branch=branch_to), default=True):
raise ActionInterrupted()
repo.remote(clone_action.repo_to_name).push('{}:{}'.format(repo.active_branch.name, branch_to))
self._branch_to = branch_to
self._work_done = True
def run_description(self) -> str:
return 'Fork, clone and run conventions on "{repo_from_name}"'.format(
repo_from_name=self._repo_from.full_name,
)
@property
def repo_from(self) -> Repository:
return self._repo_branch_from.repo
@property
def branch_from(self) -> typing.Optional[str]:
return self._repo_branch_from.branch
@property
def repo_to(self) -> typing.Optional[Repository]:
return self._repo_to
@property
def branch_to(self) -> typing.Optional[str]:
return self._branch_to
@property
def work_done(self) -> typing.Optional[bool]:
return self._work_done
class RepoCloneAction(ActionBase):
def __init__(self, repo_from: Repository, repo_to: Repository, wd: Path, keep_clone: bool=False,
name_from: str='origin', name_to: str='user', branch: typing.Union[str, WhichBranch]=WhichBranch.DEFAULT):
super().__init__()
self._repo_from = repo_from
self._repo_to = repo_to
self._wd = wd
self._repo_wd = wd / repo_from.name
self._keep_clone = keep_clone
self._name_from = name_from
self._name_to = name_to
if isinstance(branch, str):
self._branch = branch
else:
self._branch = ConanRepo.from_repo(self._repo_from).select_branch(branch).name
def run_check(self):
assert self._wd.is_dir()
def run_action(self):
if self._repo_wd.exists():
if not self._keep_clone:
shutil.rmtree(self._repo_wd)
if not self._repo_wd.exists():
r = git.Repo.clone_from(url=self._repo_from.clone_url, to_path=self._repo_wd)
r.remote('origin').rename(self._name_from)
r.git.remote(['add', self._name_to, self._repo_to.ssh_url])
r.remote(self._name_to).update()
r = git.Repo(self._repo_wd)
r.git.checkout('{}/{}'.format(self._name_from, self._branch), B=self._branch, force=True, track=True)
def run_description(self):
return 'Clone remote repository "{name_remote}" ({url_remote}) to local directory "{path_local}". ' \
'Checkout "{branch_local}".'.format(
name_remote=self._repo_from.full_name,
url_remote=self._repo_from.clone_url,
path_local=self._repo_wd,
branch_local=self._branch,
)
@property
def repo_wd(self) -> Path:
return self._repo_wd
@property
def repo_from_name(self) -> str:
return self._name_from
@property
def repo_to_name(self) -> str:
return self._name_to
# def clone_repo(repo_name, keep_clone, wd, from_repo, to_repo, remote_origin, remote_user, which_branch):
# git_repo_wd = wd / repo_name
#
# if git_repo_wd.exists():
# if not keep_clone:
# shutil.rmtree(git_repo_wd)
#
# if not git_repo_wd.exists():
# r = git.Repo.clone_from(url=from_repo.clone_url, to_path=git_repo_wd)
# r.remote('origin').rename(remote_origin)
# r.git.remote(['add', remote_user, to_repo.ssh_url])
# r.remote(remote_user).update()
#
# remote = 'origin'
#
# r = git.Repo(git_repo_wd)
# r.git.checkout('{}/{}'.format(remote, which_branch), B=which_branch, force=True, track=True)
if __name__ == '__main__':
main()
|
#--- Exercicio 5 - Variávies e impressão com interpolacão de string
#--- Imprima os dados de 5 papeis cotatos na bolsa de valors de SP
#--- Os dados dos papeis devem estar em variáveis
#--- Papel: Nome, Tipo, Cotação Atual e Valores Min e Max do dia
#--- A tela deve conter cabeçalho e rodapé
|
#!/usr/bin/env python3
import os
from textwrap import dedent
def wait_for_enter():
input("\nPress enter to continue...")
os.system('clear')
def add_wifi_network(context):
msg = """
Add Wifi Network
================
Add wifi network for the 4G router (if using one), or known wifi address.
Optionally, add your phone hotspot for easier debugging.
Run:
sudo nano /etc/wpa_supplicant/wpa_supplicant.conf
And add the following network config:
network={{
ssid=\"StrathBeacon\"
scan_ssid=1
psk=\"gondor_strath\"
priority=8
}}"""
print(dedent(msg).strip())
wait_for_enter()
def update_network_interface(context):
lines = """
"""
msg = """
Update network interface
========================
Run:
sudo nano /etc/network/interfaces
In the above file, remove the '#' from the start of the following lines:
#allow-hotplug wlan0
#iface wlan0 inet dhcp
# wpa-conf /etc/wpa_supplicant/wpa_supplicant.conf
"""
print(dedent(msg).strip())
wait_for_enter()
def connect_to_beacon(context):
print("Connect to the beacon through putty, ssh, or a wired keyboard and monitor.")
wait_for_enter()
def refresh_wifi_module(context):
msg = """
Ensure the Pi detects the network config
========================================
Run the following commands:
sudo wpa_supplicant -B -i wlan0 -c /etc/wpa_supplicant/wpa_supplicant.conf
sudo wpa_cli -i wlan0 reconfigure
You MIGHT get kicked off wifi and need to reconnect.
"""
print(dedent(msg).strip())
wait_for_enter()
def update_data_sync_frequency(context):
msg = """
Reduce data sync frequency
==========================
Edit the crontab. Run:
crontab -e
Change each rsync line to look like:
15 */4 * * * rsync ...long command for data...
30 */4 * * * rsync ...long command for logs...
"""
print(dedent(msg).strip())
wait_for_enter()
def update_vpn_connection_schedule(context):
msg = """
Reduce frequency of VPN checks
==============================
Edit the root crontab. Run:
sudo crontab -e
And change the line with 'vpn_check' to:
1 */4 * * * /usr/local/bin/vpn_check
"""
print(dedent(msg).strip())
wait_for_enter()
def update_hardware_clock(context):
msg = """
Update hardware clocks
======================
Connect to the pi through something with an outbound internet connection.
Run the commands:
sudo date -s \"$(curl -s --head https://google.com | grep -i ^Date: | sed -e 's/[Dd]ate: //g')\"
sudo hwclock -w
timedatectl
"""
print(dedent(msg).strip())
wait_for_enter()
def reboot_pi(context):
msg = """
Reboot the Pi
=============
Run:
sudo reboot now
"""
print(dedent(msg).strip())
wait_for_enter()
if __name__ == "__main__":
context = {'password': 'CO2Network2012'}
steps = [
connect_to_beacon,
add_wifi_network,
update_network_interface,
refresh_wifi_module,
update_data_sync_frequency,
update_vpn_connection_schedule,
update_hardware_clock,
reboot_pi
]
os.system('clear')
for step in steps:
step(context)
print('Done.')
|
# -*- coding: utf-8 -*-
# imports
import cv2, helper, imutils;
import numpy as np;
# This filter does the average filtering on the image
# using the cv2.blur(frame, kernel_size = (5,5)).
def avgFilter(frame, kernel_size = (5,5)):
return cv2.blur(frame, kernel_size);
#end
# This function does the Gaussian Blurring of the image using
# cv2.GaussianBlur(frame, kernel_size = (5,5), sigmaX = 0, sigmaY = 0).
def gaussianBlur(frame, kernel_size = (5,5), sigmaX = 0, sigmaY = 0):
if sigmaX == 0:
return cv2.GaussianBlur(frame, kernel_size, sigmaX);
else:
return cv2.GaussianBlur(frame, kernel_size, sigmaX, sigmaY);
#end
# This function does the median blurring via the
# cv2.medianBlur(frame, kernel_size = 5);
# ------ Information ------
# Here, the function cv2.medianBlur() takes median of all the pixels under kernel area and central element is
# replaced with this median value. This is highly effective against SALT AND PEPPER NOISE in the images.
# Interesting thing is that, in the above filters, central element is a newly calculated value which may be a pixel
# value in the image or a new value. But in median blurring, central element is always replaced by some pixel value in the image.
# It reduces the noise effectively. Its kernel size should be a positive odd integer.
def medianBlur(frame, kernel_size = 5):
if kernel_size == 1 or kernel_size % 2 == 0:
helper.throwException("Error : Invalid kernel_size for median blur. Must be > 1 and odd number");
else:
return cv2.medianBlur(frame, kernel_size);
#end
# This function does the bilateral Filtering on the image via the
# cv2.bilateralFilter(frame, kernel_size = 9, sigmaColor = 75, signalSpace = 75)
# ------ Information ------
# cv2.bilateralFilter() is highly effective in noise removal while keeping edges sharp.
# But the operation is slower compared to other filters. We already saw that gaussian filter takes
# the a neighbourhood around the pixel and find its gaussian weighted average. This gaussian filter
# is a function of space alone, that is, nearby pixels are considered while filtering. It doesn't
# consider whether pixels have almost same intensity. It doesn't consider whether pixel is an edge
# pixel or not. So it blurs the edges also, which we don't want to do.
#Bilateral filter also takes a gaussian filter in space, but one more gaussian filter which is a function of pixel difference. Gaussian function of space make sure only nearby pixels are considered for blurring while gaussian function of intensity difference make sure only those pixels with similar intensity to central pixel is considered for blurring. So it preserves the edges since pixels at edges will have large intensity variation.
def bilateralFilter(frame, kernel_size = 9, sigmaColor = 75, signalSpace = 75):
return cv2.bilateralFilter(frame, kernel_size, sigmaColor, signalSpace);
#end
# Apply sobel operator (derivative wrt y-axis) on image.
def sobelY(frame, kernel_size = 5):
return cv2.Sobel(frame,cv2.CV_64F,0,1,ksize=kernel_size);
# Apply sobel operator (derivative wrt x-axis) on image.
def sobelX(frame, kernel_size = 5):
return cv2.Sobel(frame,cv2.CV_64F,1,0,ksize=kernel_size);
#Laplacian can be used directly by:
#guassianframe = cv2.Laplacian(filters.gaussianBlur(greyframe), cv2.CV_32F, 5,5);
def laplacian(frame):
return cv2.Laplacian(frame, cv2.CV_32F, 5,5);
#end
# This function apply the primary transforms and
# filtering on the captured frames
def applyPrimaryFilteringOnImage(frame, resize_width = 400, canny_min = 90, canny_max = 200, last_thresh_min = 68, last_thresh_max = 255):
resizedFrameWidth = resize_width;
cannyEdgeDetector_minVal = canny_min;
cannyEdgeDetector_maxVal = canny_max;
orignal = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);
r_orignal = imutils.resize(orignal, width=resizedFrameWidth);
resize_ratio = orignal.shape[0]/float(r_orignal.shape[0]);
orignal = r_orignal;
orignal = gaussianBlur(orignal);
orignal = cv2.adaptiveThreshold(orignal,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
processedframe = cv2.Canny(gaussianBlur(orignal),180,200);
orignal = processedframe1 = avgFilter(processedframe, (29,29));
ret,processedframe1 = cv2.threshold(processedframe1,last_thresh_min,last_thresh_max,cv2.THRESH_BINARY)
orignal = processedframe1;
return resize_ratio,orignal;
|
from PIL import Image, ImageDraw
from os import listdir, chdir
from sys import exit
from time import sleep
from CONFIG import *
from textwrap import wrap
from FUNC import *
#Читем текст для демотиватора
with open('text.txt', encoding='utf-8') as file:
try:
N = int(file.readline())
text = file.readlines()
except ValueError:
print('Проверьте заполнение text.txt согласно образцу')
input()
exit()
ARR = []
for i in range(0,2*N,2):
ARR.append([text[i][:-1:],text[i+1][:-1:]])
# Подсос избражения
chdir('pic')
if len(listdir()) != 1:
print('Папка pic должна содержать только 1 изображение')
sleep(2)
exit()
pic = listdir()[0]
im = Image.open(pic)
# Вкладываем основную пикчу в рамку, рамку+пикчу на демотиватор с надписью
for i in range(N):
height = im.height
width = im.width
HEAD_TEXT = ARR[i][0]
PLAIN_TEXT = ARR[i][1]
background = Image.new('RGB', (width + BORDER, height + BORDER), (0, 0, 0))
HEAD_LIMIT = (background.width - IMAGE_OFFSET_X) // 24
PLAIN_LIMIT = (background.width - IMAGE_OFFSET_X) // 17
#Рамка около пикчи
draw = ImageDraw.Draw(background)
draw.rectangle((0, 0, width + BORDER, height + BORDER), None, WHITE, LINE_SIZE)
background.paste(im, rround(0.5 * BORDER, BORDER * 0.5))
#Продляем пикту под текст
TEXT_OFFSET = int(
len(wrap(HEAD_TEXT, HEAD_LIMIT)) * HEAD_HEIGHT * 1.5 + len(wrap(PLAIN_TEXT, PLAIN_LIMIT)) * PLAIN_HEIGHT * 1.5)
result_image = Image.new('RGB', (
background.width + 2 * IMAGE_OFFSET_X, background.height + 2 * IMAGE_OFFSET_Y + TEXT_OFFSET + INDENTS_OFFSET * 2))
result_image.paste(background, (IMAGE_OFFSET_X, IMAGE_OFFSET_Y))
# Добавляем текст по классике: ЗАГОЛОВОК текст, маштабируем поле ввода вниз, ширина по консту
# Отцентровка расчитываеться отдельно
draw = ImageDraw.Draw(result_image)
textprint(HEAD_TEXT, background.height + IMAGE_OFFSET_Y + INDENTS_OFFSET, HEAD_FONT, result_image, HEAD_LIMIT)
Y_PLAIN = background.height + IMAGE_OFFSET_Y + INDENTS_OFFSET + len(wrap(HEAD_TEXT, HEAD_LIMIT))*HEAD_HEIGHT*1.5
textprint(PLAIN_TEXT, Y_PLAIN + INDENTS_OFFSET, PLAIN_FONT, result_image, PLAIN_LIMIT)
result_image.thumbnail(im.size)
im = result_image
im = Image.open(pic)
result_image.thumbnail(im.size) #Сжимаем под исходный размер
|
#!/usr/bin/env python
"""The ReadLowLevel client action."""
import hashlib
import io
from typing import AnyStr, Optional
import zlib
from grr_response_client import actions
from grr_response_client import comms
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import read_low_level as rdf_read_low_level
# We'll read at most 10 GiB in this flow. If the requested length is greater
# than that, we throw an error.
_READ_BYTES_LIMIT = 10 * 1024 * 1024 * 1024 # 10 GiB
# This will be used for sector alignment (align the offset before reading).
# All Windows versions support hard disk drives with 512-byte sectors.
# We use this value by default, but if the device has a different block size,
# the user can set it in the args.
_DEFAULT_SECTOR_BLOCK_SIZE = 512
# We'll read chunks of `DEFAULT_BLOB_SIZE` at a time when possible, and hash
# and store them in BlobStore. The BlobIDs will be sent back to the flow.
# 4 MiB ia a good size for chunks to be sent to BlobStore efficiently.
_DEFAULT_BLOB_SIZE = 4 * 1024 * 1024 # 4 MiB
class ReadLowLevel(actions.ActionPlugin):
"""Reads `length` bytes from `path` starting at `offset` and returns it."""
in_rdfvalue = rdf_read_low_level.ReadLowLevelRequest
out_rdfvalues = [rdf_read_low_level.ReadLowLevelResult]
def __init__(self, grr_worker: Optional[comms.GRRClientWorker] = None):
super().__init__(grr_worker)
# Extra amount of bytes to be read in case the `offset` is misaligned, or
# the `length` to be read is not aligned with the block size. This will be
# used for updating the `offset` before the read (client action args). It
# will also be later discarded from the `data` read.
self._pre_padding = 0
# Stores a partial file hash for all data read so far.
self._partial_file_hash = hashlib.sha256()
def Run(self, args: rdf_read_low_level.ReadLowLevelRequest) -> None:
"""Reads a buffer, stores it and sends it back to the server."""
# Make sure we limit the size of our output.
if args.length > _READ_BYTES_LIMIT:
raise RuntimeError(f"Can not read buffers this large "
f"({args.length} > {_READ_BYTES_LIMIT} bytes).")
# TODO: Update `blob_size` when `sector_block_size` is set.
# `blob_size` must be a multiple of `sector_block_size` so that reads start
# and _continue_ to be aligned.
# An alternative is to _always_ align (each blob read).
blob_size = args.blob_size or _DEFAULT_BLOB_SIZE
aligned_args = self._AlignArgsAndAdjustPadding(args)
bytes_left_to_read = aligned_args.length
is_first_chunk = True
current_offset = aligned_args.offset
with open(args.path, "rb") as fd:
fd.seek(current_offset, io.SEEK_SET) # absolute file positioning
while bytes_left_to_read > 0:
read_size = min(blob_size, bytes_left_to_read)
data = fd.read(read_size)
# Discard data that we read unnecessarily due to alignment.
# Refer to `_AlignArgs` documentation for more details.
if is_first_chunk:
data = data[self._pre_padding:]
is_first_chunk = False
# Upload the blobs to blobstore using `TransferStore`. Save the buffer
# references so we can report it back in the end.
if data:
# We need to update the offset as-if it had started from 0 all along,
# in order to avoid `InvalidBlobOffsetError` when storing the blobs as
# a file in `file_store`.
reference_offset = (
current_offset - self._pre_padding if current_offset else 0)
self._StoreDataAndHash(data, reference_offset)
current_offset = current_offset + read_size
bytes_left_to_read -= read_size
self.Progress()
# TODO: Refactor to return structured results.
def _AlignArgsAndAdjustPadding(
self, args: rdf_read_low_level.ReadLowLevelRequest
) -> rdf_read_low_level.ReadLowLevelRequest:
"""Aligns the reading offset, updates the reading length and `pre_padding`.
It returns a copy of the flow arguments with the aligned offset value,
updated length. It also updates the class `pre_padding`, used later on to
discard extra data.
The alignment means more data than requested can be read.
From a software architecture point of view, this logic should be
platform-specific than a shared client action. Thus clients in platforms
that require the alignment would have this, and others would not. However,
for simplicity we're going with the same implementation in all platforms.
- Linux does not require sector alignment for reads.
- Windows requires sector alignment for raw device access.
- Mac raw disk devices are not seekable to the end and have no size, so the
alignment logic helps.
Args:
args: Original ReadLowLevelRequest sent to this ClientAction.
Returns:
A copy of the flow args with the aligned offset.
"""
block_size = args.sector_block_size or _DEFAULT_SECTOR_BLOCK_SIZE
original_offset = args.offset
self._pre_padding = original_offset % block_size
# Due to alignment we will read some more data than we need to.
aligned_params = args.Copy()
aligned_params.offset = original_offset - self._pre_padding
aligned_params.length = args.length + self._pre_padding
return aligned_params
def _StoreDataAndHash(self, data: AnyStr, offset: int) -> None:
"""Uploads data as blob and replies hash to flow.
Args:
data: Bytes to be stored as a blob.
offset: Offset where the data was read from.
"""
data_blob = rdf_protodict.DataBlob(
data=zlib.compress(data),
compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)
# Ensure that the buffer is counted against this response. Check network
# send limit.
self.ChargeBytesToSession(len(data))
# Now return the data to the server into the special TransferStore well
# known flow.
self.grr_worker.SendReply(
data_blob, session_id=rdfvalue.SessionID(flow_name="TransferStore"))
# Now report the hash of this blob to our flow as well as the offset and
# length.
digest = hashlib.sha256(data).digest()
buffer_reference = rdf_client.BufferReference(
offset=offset, length=len(data), data=digest)
self._partial_file_hash.update(data)
partial_file_hash = self._partial_file_hash.digest()
self.SendReply(
rdf_read_low_level.ReadLowLevelResult(
blob=buffer_reference, accumulated_hash=partial_file_hash))
|
import os
import json
import math
import torch
import torch.distributions as distributions
from daphne import daphne
from tests import is_tol, run_prob_test,load_truth
import matplotlib.pyplot as plt
# Useful functions
from utils import _hashmap, _vector, _totensor
from utils import _put, _remove, _append, _get
from utils import _squareroot, _mat_repmat, _mat_transpose
# OPS
basic_ops = {'+':torch.add,
'-':torch.sub,
'*':torch.mul,
'/':torch.div
}
math_ops = {'sqrt': lambda x: _squareroot(x)
}
data_struct_ops = {'vector': lambda x: _vector(x),
'hash-map': lambda x: _hashmap(x)
}
data_interact_ops = {'first': lambda x: x[0], # retrieves the first element of a list or vector e
'second': lambda x: x[1], # retrieves the second element of a list or vector e
'last': lambda x: x[-1], # retrieves the last element of a list or vector e
'rest': lambda x: x[1:], # retrieves the rest of the element of a list except the first one
'get': lambda x, idx: _get(x, idx), # retrieves an element at index e2 from a list or vector e1, or the element at key e2 from a hash map e1.
'append': lambda x, y: _append(x, y), # (append e1 e2) appends e2 to the end of a list or vector e1
'remove': lambda x, idx: _remove(x, idx), # (remove e1 e2) removes the element at index/key e2 with the value e2 in a vector or hash-map e1.
'put': lambda x, idx, value: _put(x, idx, value) # (put e1 e2 e3) replaces the element at index/key e2 with the value e3 in a vector or hash-map e1.
}
dist_ops = {"normal":lambda mu, sig: distributions.normal.Normal(loc=mu, scale=sig),
"beta":lambda a, b: distributions.beta.Beta(concentration1=a, concentration0=b),
"gamma": lambda concentration, rate: distributions.gamma.Gamma(concentration=concentration, rate=rate),
"uniform": lambda low, high: distributions.uniform.Uniform(low=low, high=high),
"exponential":lambda rate: distributions.exponential.Exponential(rate=rate),
"discrete": lambda probs: distributions.categorical.Categorical(probs=probs),
"dirichlet": lambda concentration: distributions.dirichlet.Dirichlet(concentration=concentration),
"bernoulli": lambda probs: distributions.bernoulli.Bernoulli(probs=probs),
"flip": lambda probs: distributions.bernoulli.Bernoulli(probs=probs)
}
cond_ops={"<": lambda a, b: a < b,
">": lambda a, b: a > b,
"=": lambda a, b: a == b,
">=": lambda a, b: a >= b,
"<=": lambda a, b: a <= b,
"or": lambda a, b: a or b,
"and": lambda a, b: a and b
}
nn_ops={"mat-tanh": lambda a: torch.tanh(a),
"mat-add": lambda a, b: torch.add(a, b),
"mat-mul": lambda a, b: torch.matmul(a, b),
"mat-repmat": lambda a, b, c: _mat_repmat(a, b, c),
"mat-transpose": lambda a: _mat_transpose(a)
}
# Global vars
global rho;
rho = {}
DEBUG = False # Set to true to see intermediate outputs for debugging purposes
#----------------------------Evaluation Functions -----------------------------#
def evaluate_program(ast, sig={}, l={}):
"""
Evaluate a program as desugared by daphne, generate a sample from the prior
Args:
ast: json FOPPL program
Returns: sample from the prior of ast
"""
# Empty list
if not ast:
return [False, sig]
if DEBUG:
print('Current AST: ', ast)
# import pdb; pdb.set_trace()
if len(ast) == 1:
# Check if a single string ast [['mu']]
single_val = False
if isinstance(ast[0], str):
root = ast[0]
tail = []
single_val = True
else:
ast = ast[0]
if DEBUG:
print('Current program: ', ast)
try:
# Check if a single string such as ast = ['mu']
if not single_val:
if len(ast) == 1:
if isinstance(ast[0], str):
root = ast[0]
tail = []
else:
root, *tail = ast
if DEBUG:
print('Current OP: ', root)
print('Current TAIL: ', tail)
# Basic primitives
if root in basic_ops.keys():
op_func = basic_ops[root]
eval_1, sig = evaluate_program([tail[0]], sig=sig, l=l)
# Make sure in floating point
if torch.is_tensor(eval_1):
eval_1 = eval_1.type(torch.float32)
elif isinstance(eval_1, int):
eval_1 = float(eval_1)
# Evalute tail
eval_2 = evaluate_program(tail[1:], sig=sig, l=l)[0]
# Make sure not list, if evals returned as list
if isinstance(eval_1, list):
eval_1 = eval_1[0]
if isinstance(eval_2, list):
eval_2 = eval_2[0]
if DEBUG:
print('Basic OP eval-1: ', eval_1)
print('Basic OP eval-2: ', eval_2)
op_eval = op_func(eval_1, eval_2)
return [op_eval, sig]
# Math ops
elif root in math_ops.keys():
op_func = math_ops[root]
return [op_func(tail), sig]
# NN ops
elif root in nn_ops.keys():
# import pdb; pdb.set_trace()
op_func = nn_ops[root]
if root == "mat-add" or root == "mat-mul":
e1, e2 = tail
# Operand-1
if isinstance(e1, list) and len(e1) == 1:
a, sig = evaluate_program(e1, sig=sig, l=l)
elif isinstance(e1, list):
a, sig = evaluate_program([e1], sig=sig, l=l)
else:
# Most likely a pre-defined varibale in l
a = l[e1]
# Operand-2
if isinstance(e2, list) and len(e2) == 1:
b, sig = evaluate_program(e2, sig=sig, l=l)
elif isinstance(e2, list):
b, sig = evaluate_program([e2], sig=sig, l=l)
else:
b = l[e2] # Most likely a pre-defined varibale in l
if DEBUG:
print('Evaluated MatMul-1: ', a)
print('Evaluated MatMul-2: ', b)
# OP
return [op_func(a, b), sig]
# ["mat-repmat", "b_0", 1, 5]
elif root == "mat-repmat":
e1, e2, e3 = tail
# Initial MAT
if isinstance(e1, list) and len(e1) == 1:
a, sig = evaluate_program(e1, sig=sig, l=l)
elif isinstance(e1, list):
a, sig = evaluate_program([e1], sig=sig, l=l)
else:
a = l[e1] # Most likely a pre-defined varibale in l
# Repeat axis 1
if isinstance(e2, list) and len(e2) == 1:
b, sig = evaluate_program(e2, sig=sig, l=l)
elif isinstance(e2, list):
b, sig = evaluate_program([e2], sig=sig, l=l)
elif isinstance(e2, float) or isinstance(e2, int):
b = int(e2)
else:
b = l[e2] # Most likely a pre-defined varibale in l
# Repeat axis 2
if isinstance(e3, list) and len(e3) == 1:
c, sig = evaluate_program(e3, sig=sig, l=l)
elif isinstance(e3, list):
c, sig = evaluate_program([e3], sig=sig, l=l)
elif isinstance(e3, float) or isinstance(e3, int):
c = int(e3)
else:
c = l[e3] # Most likely a pre-defined varibale in l
# OP
return [op_func(a, b, c), sig]
else:
e1 = tail
if isinstance(e1, list) and len(e1) == 1:
a, sig = evaluate_program(e1, sig=sig, l=l)
elif isinstance(e1, list):
a, sig = evaluate_program([e1], sig=sig, l=l)
else:
a = l[e1] # Most likely a pre-defined varibale in l
if DEBUG:
print('Evaluated Matrix: ', a)
# OP
return [op_func(a), sig]
# Data structures-- Vector
elif root == "vector":
# import pdb; pdb.set_trace()
op_func = data_struct_ops[root]
if DEBUG:
print('Data Structure data: ', tail)
# Eval tails:
tail_data = torch.zeros(0, dtype=torch.float32)
for T in range(len(tail)):
# Check for single referenced string
if isinstance(tail[T], str):
VT = [tail[T]]
else:
VT = tail[T]
if DEBUG:
print('Pre-Evaluated Data Structure data: ', VT)
eval_T = evaluate_program([VT], sig, l=l)
if DEBUG:
print('Evaluated Data Structure data: ', eval_T)
try:
eval_T = eval_T[0]
except:
# In case of functions returning only a single value & not sigma
pass
# IF sample object then take a sample
try:
eval_T = eval_T.sample()
except:
pass
# Check if not torch tensor
if not torch.is_tensor(eval_T):
if isinstance(eval_T, list):
eval_T = torch.tensor(eval_T, dtype=torch.float32)
else:
eval_T = torch.tensor([eval_T], dtype=torch.float32)
# Check for 0 dimensional tensor
elif eval_T.shape == torch.Size([]):
eval_T = torch.tensor([eval_T.item()], dtype=torch.float32)
# Concat
try:
tail_data = torch.cat((tail_data, eval_T))
except:
raise AssertionError('Cannot append the torch tensors')
if DEBUG:
print('Eval Data Structure data: ', tail_data)
return [tail_data, sig]
# Data structures-- hash-map
elif root == "hash-map":
op_func = data_struct_ops[root]
return [op_func(tail), sig]
# Data structures interaction
elif root in data_interact_ops.keys():
op_func = data_interact_ops[root]
# ['put', ['vector', 2, 3, 4, 5], 2, 3]
if root == 'put':
e1, e2, e3 = tail
if isinstance(e1, list):
get_data_struct, sig = evaluate_program([e1], sig=sig, l=l)
else:
# Most likely a pre-defined varibale in l
get_data_struct = l[e1]
# Get index
if isinstance(e2, list):
e2_idx, sig = evaluate_program([e2], sig=sig, l=l)
elif isinstance(e2, float) or isinstance(e2, int):
e2_idx = int(e2)
else:
# Most likely a pre-defined varibale in l
e2_idx = l[e2]
# Get Value
if isinstance(e3, list):
e3_val, sig = evaluate_program([e3], sig=sig, l=l)
elif isinstance(e3, float) or isinstance(e3, int):
e3_val = e3
else:
# Most likely a pre-defined varibale in l
e3_val = l[e3]
if DEBUG:
print('Data : ', get_data_struct)
print('Index: ', e2_idx)
print('Value: ', e3_val)
return [op_func(get_data_struct, e2_idx, e3_val), sig]
# ['remove'/'get', ['vector', 2, 3, 4, 5], 2]
elif root == 'remove' or root == 'get':
# import pdb; pdb.set_trace()
e1, e2 = tail
if DEBUG:
print('e1: ', e1)
print('e2: ', e2)
if isinstance(e1, list):
get_data_struct, sig = evaluate_program([e1], sig=sig, l=l)
else:
# Most likely a pre-defined varibale in l
get_data_struct = l[e1]
if isinstance(e2, list):
e2_idx, sig = evaluate_program([e2], sig=sig, l=l)
elif isinstance(e2, float) or isinstance(e2, int):
e2_idx = e2
else:
# Otherwise Most likely a pre-defined varibale in l
e2_idx = l[e2]
if isinstance(e2_idx, list):
e2_idx = e2_idx[0]
if DEBUG:
print('Data : ', get_data_struct)
print('Index/Value: ', e2_idx)
# Convert index to type-int
if torch.is_tensor(e2_idx):
e2_idx = e2_idx.long()
else:
e2_idx = int(e2_idx)
return [op_func(get_data_struct, e2_idx), sig]
# ['append', ['vector', 2, 3, 4, 5], 2]
elif root == 'append':
# import pdb; pdb.set_trace()
get_list1, get_list2 = tail
# Evalute exp1
if isinstance(get_list1, list):
get_data_eval_1, sig = evaluate_program([get_list1], sig=sig, l=l)
elif isinstance(get_list1, float) or isinstance(get_list1, int):
get_data_eval_1 = get_list1
else:
get_data_eval_1 = l[get_list1] # Most likely a pre-defined varibale in l
if DEBUG:
print('Op Eval-1: ', get_data_eval_1)
# Evalute exp2
if isinstance(get_list2, list):
get_data_eval_2, sig = evaluate_program([get_list2], sig=sig, l=l)
elif isinstance(get_list2, float) or isinstance(get_list2, int):
get_data_eval_2 = get_list2
else:
get_data_eval_2 = l[get_list2] # Most likely a pre-defined varibale in l
if DEBUG:
print('Op Eval-2: ', get_data_eval_2)
# Check if not torch tensor
if not torch.is_tensor(get_data_eval_1):
if isinstance(get_data_eval_1, list):
get_data_eval_1 = torch.tensor(get_data_eval_1, dtype=torch.float32)
else:
get_data_eval_1 = torch.tensor([get_data_eval_1], dtype=torch.float32)
# Check for 0 dimensional tensor
elif get_data_eval_1.shape == torch.Size([]):
get_data_eval_1 = torch.tensor([get_data_eval_1.item()], dtype=torch.float32)
# Check if not torch tensor
if not torch.is_tensor(get_data_eval_2):
if isinstance(get_data_eval_2, list):
get_data_eval_2 = torch.tensor(get_data_eval_2, dtype=torch.float32)
else:
get_data_eval_2 = torch.tensor([get_data_eval_2], dtype=torch.float32)
# Check for 0 dimensional tensor
elif get_data_eval_2.shape == torch.Size([]):
get_data_eval_2 = torch.tensor([get_data_eval_2.item()], dtype=torch.float32)
# Append
try:
all_data_eval = torch.cat((get_data_eval_1, get_data_eval_2))
except:
raise AssertionError('Cannot append the torch tensors')
if DEBUG:
print('Appended Data : ', all_data_eval)
return [all_data_eval, sig]
else:
# ['First'/'last'/'rest', ['vector', 2, 3, 4, 5]]
e1 = tail
if isinstance(e1, list):
get_data_struct, sig = evaluate_program(e1, sig=sig, l=l)
else:
# Most likely a pre-defined varibale in l
get_data_struct = l[e1]
if DEBUG:
print('Data : ', get_data_struct)
return [op_func(get_data_struct), sig]
# Assign
elif root == 'let':
# (let [params] body)
let_param_name = tail[0][0]
let_param_value = tail[0][1]
let_body = tail[1]
if DEBUG:
print('Let param name: ', let_param_name)
print('Let params value: ', let_param_value)
print('Let body: ', let_body)
# Evaluate params
let_param_value_eval, sig = evaluate_program([let_param_value], sig=sig, l=l)
# Add to local variables
l[let_param_name] = let_param_value_eval
# Check for single instance string
if isinstance(let_body, str):
let_body = [let_body]
if DEBUG:
print('Local Params : ', l)
print('Recursive Body: ', let_body, "\n")
# Evaluate body
return evaluate_program([let_body], sig=sig, l=l)
# Conditonal
elif root == "if":
# (if e1 e2 e3)
if DEBUG:
print('Conditonal Expr1 : ', tail[0])
print('Conditonal Expr2 : ', tail[1])
print('Conditonal Expr3 : ', tail[2])
e1_, sig = evaluate_program([tail[0]], sig, l=l)
if DEBUG:
print('Conditonal eval : ', e1_)
if e1_:
return evaluate_program([tail[1]], sig, l=l)
else:
return evaluate_program([tail[2]], sig, l=l)
# Conditional Evaluation
elif root in cond_ops.keys():
# (< a b)
op_func = cond_ops[root]
if DEBUG:
print('Conditional param-1: ', tail[0])
print('Conditional param-2: ', tail[1])
a = evaluate_program([tail[0]], sig, l=l)
b = evaluate_program([tail[1]], sig, l=l)
# In case of functions returning only a single value & not sigma
try:
a = a[0]
except:
pass
try:
b = b[0]
except:
pass
# If torch tensors convert to python data types for comparison
if torch.is_tensor(a):
a = a.tolist()
if isinstance(a, list):
a = a[0]
if torch.is_tensor(b):
b = b.tolist()
if isinstance(b, list):
b = b[0]
if DEBUG:
print('Eval Conditional param-1: ', a)
print('Eval Conditional param-2: ', b)
return [op_func(a, b), sig]
# Functions
elif root == "defn":
# (defn name[param] body, )
if DEBUG:
print('Defn Tail: ', tail)
try:
fnname = tail[0]
fnparams = tail[1]
fnbody = tail[2]
except:
raise AssertionError('Failed to define function!')
if DEBUG:
print('Function Name : ', fnname)
print('Function Param: ', fnparams)
print('Function Body : ', fnbody)
# Check if already present
if fnname in rho.keys():
return [fnname, sig]
else:
# Define functions
rho[fnname] = [fnparams, fnbody]
if DEBUG:
print('Local Params : ', l)
print('Global Funcs : ', rho, "\n")
return [fnname, sig]
# Get distribution
elif root in dist_ops.keys():
# import pdb; pdb.set_trace()
op_func = dist_ops[root]
if len(tail) == 2:
# Check for single referenced string
if isinstance(tail[0], str):
param1 = [tail[0]]
else:
param1 = tail[0]
if isinstance(tail[1], str):
param2 = [tail[1]]
else:
param2 = tail[1]
if DEBUG:
print('Sampler Parameter-1: ', param1)
print('Sampler Parameter-2: ', param2)
# Eval params
para1, sig = evaluate_program([param1], sig=sig, l=l)
para2, sig = evaluate_program([param2], sig=sig, l=l)
# Make sure to have it in torch tensor
try:
para1 = _totensor(x=para1)
except:
# Most likely a tensor inside a list
if isinstance(para1, list):
para1 = para1[0]
para1 = _totensor(x=para1)
try:
para2 = _totensor(x=para2)
except:
# Most likely a tensor inside a list
if isinstance(para2, list):
para2 = para2[0]
para2 = _totensor(x=para2)
if DEBUG:
print('Eval Sampler Parameter-1: ', para1)
print('Eval Sampler Parameter-2: ', para2, "\n")
return [op_func(para1, para2), sig]
else:
# Exponential has only one parameter
# Check for single referenced string
if isinstance(tail[0], str):
param1 = [tail[0]]
else:
param1 = tail[0]
if DEBUG:
print('Sampler Parameter-1: ', param1)
para1, sig = evaluate_program([param1], sig=sig, l=l)
if DEBUG:
print('Eval Sampler Parameter-1: ', para1)
# Make sure to have it in torch tensor
try:
para1 = _totensor(x=para1)
except:
# Most likely a tensor inside a list
if isinstance(para1, list):
para1 = para1[0]
para1 = _totensor(x=para1)
if DEBUG:
print('Tensor Sampler Parameter-1: ', para1, "\n")
return [op_func(para1), sig]
# Sample
elif root == 'sample':
if DEBUG:
print('Sampler program: ', tail)
sampler, sig = evaluate_program(tail, sig=sig, l=l)
if DEBUG:
print('Sampler: ', sampler)
try:
sample_ = sampler.sample()
except:
# For some reason it is not a sampler object
sample_ = sampler
# Obtain likelihood-- cases where it can be obtained
try:
if "logW" in sig.keys():
sig["logW"] += sampler.log_prob(sample_)
else:
sig["logW"] = sampler.log_prob(sample_)
except:
if "logW" in sig.keys():
sig["logW"] += 0.0
else:
sig["logW"] = 0.0
return [sample_, sig]
# Observe
elif root == 'observe':
# import pdb; pdb.set_trace()
if DEBUG:
print('Observe tail: ', tail)
print('Observe tail: ', len(tail))
if len(tail) == 2:
# Check for single referenced string
if isinstance(tail[0], str):
ob_pm1 = [tail[0]]
else:
ob_pm1 = tail[0]
if isinstance(tail[1], str):
ob_pm2 = [tail[1]]
else:
ob_pm2 = tail[1]
else:
raise AssertionError('Unknown list of observe params!')
if DEBUG:
print('Observe Param-1: ', ob_pm1)
print('Observe Param-2: ', ob_pm2)
# Evaluate observe params
distn, sig = evaluate_program([ob_pm1], sig=sig, l=l)
value, sig = evaluate_program([ob_pm2], sig=sig, l=l)
value = _totensor(x=value)
if DEBUG:
print('Observe distribution: ', distn)
print('Observe Value: ', value, "\n")
# Obtain likelihood-- cases where it can be obtained
try:
if "logW" in sig.keys():
sig["logW"] += distn.log_prob(value)
else:
sig["logW"] = distn.log_prob(value)
except:
if "logW" in sig.keys():
sig["logW"] += 0.0
else:
sig["logW"] = 0.0
return [value, sig]
# Most likely a single element list or function name
else:
if DEBUG:
print('End case Root Value: ', root)
print('End case Tail Value: ', tail)
# Check in local vars
if root in l.keys():
return [l[root], sig]
# Check in Functions vars
elif root in rho.keys():
# import pdb; pdb.set_trace()
fnparams_ = {**l}
fnparams, fnbody =rho[root]
if len(tail) != len(fnparams):
raise AssertionError('Function params mis-match!')
else:
for k in range(len(tail)):
fnparams_[fnparams[k]] = evaluate_program([tail[k]], sig=sig, l=l)[0]
if DEBUG:
print('Function Params :', fnparams_)
print('Function Body :', fnbody)
# Evalute function body
eval_output, sig = evaluate_program([fnbody], sig=sig, l=fnparams_)
if DEBUG:
print('Function evaluation output: ', eval_output)
return [eval_output, sig]
else:
return [root, sig]
except:
# Just a single element
return [ast, sig]
else:
# Parse functions
for func in range(len(ast)-1):
if DEBUG:
print('Function: ', ast[func])
fname, _ = evaluate_program([ast[func]], sig=sig, l=l)
if DEBUG:
print('Parsed function: ', fname)
print("\n")
# Evaluate Expression
try:
outputs_, sig = evaluate_program([ast[-1]], sig=sig, l=l)
except:
raise AssertionError('Failed to evaluate expression!')
if DEBUG:
print('Final output: ', outputs_)
# Return
return [outputs_, sig]
return [None, sig]
def get_stream(ast):
"""
Return a stream of prior samples
"""
while True:
yield evaluate_program(ast)[0]
def likelihood_weighting_IS(ast, L):
samples = []
for i in range(L):
sig = {}
sig["logW"] = 0.0
r_l, sig_l = evaluate_program(ast, sig=sig, l={})
s_l = sig_l["logW"]
samples.append([r_l, s_l])
return samples
def independent_MH(ast, S):
sig = {}
sig["logW"] = 0.0
r = evaluate_program(ast, sig=sig, l={})[0]
logW = 0.0
all_r = []
uniform_dist = distributions.uniform.Uniform(low=0.0, high=1.0)
for i in range(S):
sig = {}
sig["logW"] = 0.0
r_l, sig_l = evaluate_program(ast, sig=sig, l={})
s_l = sig_l["logW"]
alpha = math.exp(s_l)/math.exp(logW)
u = (uniform_dist.sample()).item()
if u < alpha:
r = r_l
logW = s_l
all_r.append([r])
return all_r
#------------------------------Test Functions --------------------------------#
def run_deterministic_tests():
for i in range(1,14):
ast_path = f'./jsons/eval/deterministic/test_{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
ret, sig = evaluate_program(ast)
print('Running evaluation-based-sampling for deterministic test number {}:'.format(str(i)))
truth = load_truth('./programs/tests/deterministic/test_{}.truth'.format(i))
print("Evaluation Output: ", ret)
print("Ground Truth: ", truth)
try:
assert(is_tol(ret, truth))
except AssertionError:
raise AssertionError('return value {} is not equal to truth {} for exp {}'.format(ret,truth,ast))
print('Test passed \n')
print('All deterministic tests passed.')
def run_probabilistic_tests():
num_samples=1e4
#num_samples=10
max_p_value =1e-4
for i in range(1,7):
ast_path = f'./jsons/eval/probabilistic/test_{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
stream = get_stream(ast)
# samples = []
# for k in range(1):
# samples.append(next(stream))
# print(samples)
print('Running evaluation-based-sampling for probabilistic test number {}:'.format(str(i)))
truth = load_truth('./programs/tests/probabilistic/test_{}.truth'.format(i))
p_val = run_prob_test(stream, truth, num_samples)
# Empty globals funcs
rho = {}
assert(p_val > max_p_value)
print('P-Value: ', p_val)
print('Test passed \n')
print('All probabilistic tests passed.')
def run_hw2_tests():
#for i in range(1,5):
for i in range(4,5):
if i == 1:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i+1)))
ast_path = f'./jsons/eval/final/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
print("Single Run Evaluation: ")
ret, sig = evaluate_program(ast)
print("Evaluation Output: ", ret)
print("\n")
print("Expectation: ")
stream = get_stream(ast)
samples = []
for k in range(1000):
samples.append(next(stream))
# print(samples)
all_samples = torch.tensor(samples)
# print("Evaluation Output: ", all_samples)
print("Mean of 1000 samples: ", torch.mean(all_samples))
print("\n")
# Empty globals funcs
rho = {}
elif i == 2:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i+1)))
ast_path = f'./jsons/eval/final/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print(len(ast))
print("Single Run Evaluation: ")
ret, sig = evaluate_program(ast)
print("Evaluation Output: ", ret)
print("\n")
print("Expectation: ")
stream = get_stream(ast)
samples = []
for k in range(1000):
if k == 0:
samples = next(stream)
samples = samples.unsqueeze(0)
print(samples.shape)
else:
sample = next(stream)
sample = sample.unsqueeze(0)
samples = torch.cat((samples, sample), dim=0)
print("Evaluation Output: ", samples.shape)
print("Mean of 1000 samples: ", torch.mean(samples, dim=0))
print("\n")
# print(samples)
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.hist([a[0] for a in samples])
ax2.hist([a[1] for a in samples])
plt.savefig(f'plots/2.png')
# Empty globals funcs
rho = {}
elif i == 3:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i+1)))
ast_path = f'./jsons/eval/final/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print("Single Run Evaluation: ")
# ret, sig = evaluate_program(ast)
# print("Evaluation Output: ", ret)
# print("\n")
print("Expectation: ")
stream = get_stream(ast)
samples = []
for k in range(1000):
if k == 0:
samples = next(stream)
samples = samples.unsqueeze(0)
# print(samples.shape)
else:
sample = next(stream)
sample = sample.unsqueeze(0)
samples = torch.cat((samples, sample), dim=0)
# print(samples)
print("Evaluation Output: ", samples.shape)
print("Mean of 1000 samples for each HMM step: \n", torch.mean(samples, dim=0))
print("\n")
fig, axs = plt.subplots(3,6)
png = [axs[i//6,i%6].hist([a[i] for a in samples]) for i in range(17)]
plt.tight_layout()
plt.savefig(f'plots/p3.png')
# Empty globals funcs
rho = {}
elif i == 4:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i+1)))
ast_path = f'./jsons/eval/final/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print("Single Run Evaluation: ")
# ret, sig = evaluate_program(ast)
# print("Evaluation Output: ", ret.shape)
# print("\n")
print("Expectation: ")
stream = get_stream(ast)
samples = []
for k in range(1000):
if k == 0:
samples = next(stream)
samples = samples.unsqueeze(0)
# print(samples.shape)
else:
sample = next(stream)
sample = sample.unsqueeze(0)
samples = torch.cat((samples, sample), dim=0)
# print(samples)
print("Evaluation Output: ", samples.shape)
W_0 = samples[:, 0:10]
b_0 = samples[:, 10:20]
W_1 = samples[:, 20:120]
b_1 = samples[:, 120:]
print("W_0: ", W_0.shape)
print("b_0: ", b_0.shape)
print("W_1: ", W_1.shape)
print("b_1: ", b_1.shape)
print("Mean of 1000 samples for W_0: \n", torch.mean(W_0, dim=0))
print("Mean of 1000 samples for b_0: \n", torch.mean(b_0, dim=0))
print("Mean of 1000 samples for W_1: \n", torch.mean(W_1, dim=0))
print("Mean of 1000 samples for b_1: \n", torch.mean(b_1, dim=0))
print("\n")
# Empty globals funcs
rho = {}
def my_tests():
## Test-1
l = {'observe3': 2.1, 'observe4': 3.9, 'observe5': 5.3, 'observe6': 7.7,\
'observe7': 10.2, 'observe8': 12.9, 'sample2': torch.tensor([-15.6374]),\
'sample1': [torch.tensor([-2.3942])]}
ast = [['normal', ['+', ['*', 'sample1', 1.0], 'sample2'], 1.0]]
ret, sig = evaluate_program(ast, l=l)
print('Running evaluation-based-sampling for my test')
print("Evaluation Output: ", ret)
l = {'observe8': 1.1, 'observe10': 2.1, 'observe12': 2.0, 'observe14': 1.9,\
'observe16': 0.0, 'observe18': -0.1, 'observe20': -0.05,\
'sample6': torch.tensor([0.4358, 0.0157, 0.5484]),\
'sample13': [torch.tensor(2)], 'sample19': [torch.tensor(1)],\
'sample1': [torch.tensor([0.0391])],\
'sample7': [torch.tensor(2)],\
'sample4': [torch.tensor([3.7046])], 'sample3': [torch.tensor([1.2139])],\
'sample2': [torch.tensor([12.3055])], 'sample5': [torch.tensor([0.9708])],\
'sample17': [torch.tensor(0)], 'sample0': [torch.tensor([-9.7500])],\
'sample9': [torch.tensor(2)], 'sample15': [torch.tensor(2)],
'sample11': [torch.tensor(2)]}
ast = [['observe', ['get', ['vector', ['normal', 'sample0', 'sample1'], ['normal', 'sample2', 'sample3'], ['normal', 'sample4', 'sample5']], 'sample7'], 1.1]]
ret, sig = evaluate_program(ast, l=l)
print('Running evaluation-based-sampling for my test')
print("Evaluation Output: ", ret)
## Test-2
l={'sample2': [torch.tensor([16.1031])], 'sample9': [torch.tensor(1)],\
'sample17': [torch.tensor(0)], 'sample6': [torch.tensor([0.3119, 0.1449, 0.5432])],\
'sample5': [torch.tensor([0.1669])], 'sample3': [torch.tensor([0.4494])],\
'sample15': [torch.tensor(0)], 'sample19': [torch.tensor(2)], 'sample1': [torch.tensor([0.0649])],\
'sample13': [torch.tensor(2)], 'sample0': [torch.tensor([-6.3466])], 'sample7': [torch.tensor(0)],\
'sample11': [torch.tensor(2)], 'sample4': [torch.tensor([3.3935])]
}
ast = [['discrete', 'sample6']]
ret, sig = evaluate_program(ast, l=l)
print('Running evaluation-based-sampling for my test')
print("Evaluation Output: ", ret)
#------------------------------MAIN--------------------------------------------
if __name__ == '__main__':
# Change the path
program_path = '/Users/tony/Documents/prog-prob/CS539-HW-3'
# # Uncomment the appropriate tests to run
# # Deterministic Test
# run_deterministic_tests()
#
# # Probabilistic Test
# run_probabilistic_tests()
# # Run HW-2 Tests
# run_hw2_tests()
for i in range(1,5):
# Note: this path should be with respect to the daphne path!
# ast = daphne(['desugar', '-i', f'{program_path}/src/programs/{i}.daphne'])
# ast_path = f'./jsons/HW3/eval/{i}.json'
# with open(ast_path, 'w') as fout:
# json.dump(ast, fout, indent=2)
# print('\n\n\nSample of posterior of program {}:'.format(i))
if i == 1:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i)))
ast_path = f'./jsons/HW3/eval/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print("Single Run Evaluation: ")
# ret, sig = evaluate_program(ast)
# print("Evaluation Output: ", ret)
# print("Evaluation Sigma: ", sig)
# print("\n")
print("--------------------------------")
print("Importance sampling Evaluation: ")
num_samples = 100000
all_output = likelihood_weighting_IS(ast=ast, L=num_samples)
W_k = 0.0
for k in range(num_samples):
r_l, W_l = all_output[k]
W_k += math.exp(W_l)
EX = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX += ((W_l/W_k) * r_l)
print("Posterior Mean: ", EX)
print("--------------------------------")
print("\n")
EX2 = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX2 += ((W_l/W_k) * torch.pow(r_l, 2))
var = EX2 - torch.pow(EX, 2)
print("Posterior Variance:", var)
print("--------------------------------")
print("\n")
# Empty globals funcs
rho = {}
elif i == 2:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i)))
ast_path = f'./jsons/HW3/eval/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print(len(ast))
# print("Single Run Evaluation: ")
# ret, sig = evaluate_program(ast)
# print("Evaluation Output: ", ret)
# print("\n")
print("--------------------------------")
print("Importance sampling Evaluation: ")
num_samples = 100000
all_output = likelihood_weighting_IS(ast=ast, L=num_samples)
W_k = 0.0
for k in range(num_samples):
r_l, W_l = all_output[k]
W_k += math.exp(W_l)
EX_slope = 0.0
EX_bias = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX_slope += ((W_l/W_k) * r_l[0])
EX_bias += ((W_l/W_k) * r_l[1])
print("Posterior Bias Mean: ", EX_bias)
print("Posterior Slope Mean: ", EX_slope)
print("--------------------------------")
print("\n")
EX2_ = []
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX2_.extend([(W_l/W_k) * r_l[0] * r_l[1]])
covar = sum(EX2_) - (EX_slope * EX_bias)
print("Posterior Covariance : ", covar)
print("---------------------------------")
print("\n")
# Empty globals funcs
rho = {}
elif i == 3:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i)))
ast_path = f'./jsons/HW3/eval/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print("Single Run Evaluation: ")
# ret, sig = evaluate_program(ast)
# print("Evaluation Output: ", ret, sig)
# print("\n")
print("--------------------------------")
print("Importance sampling Evaluation: ")
num_samples = 10000
all_output = likelihood_weighting_IS(ast=ast, L=num_samples)
W_k = 0.0
for k in range(num_samples):
r_l, W_l = all_output[k]
W_k += math.exp(W_l)
EX = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX += ((W_l/W_k) * float(r_l))
print("Posterior Mean: ", EX)
print("--------------------------------")
print("\n")
EX2 = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX2 += ((W_l/W_k) * (float(r_l)**2))
var = EX2 - (EX**2)
print("Posterior Variance:", var)
print("--------------------------------")
print("\n")
# Empty globals funcs
rho = {}
elif i == 4:
print('Running evaluation-based-sampling for Task number {}:'.format(str(i)))
ast_path = f'./jsons/HW3/eval/{i}.json'
with open(ast_path) as json_file:
ast = json.load(json_file)
# print(ast)
# print("Single Run Evaluation: ")
# ret, sig = evaluate_program(ast)
# print("Evaluation Output: ", ret, sig)
# print("\n")
print("--------------------------------")
print("Importance sampling Evaluation: ")
num_samples = 100000
all_output = likelihood_weighting_IS(ast=ast, L=num_samples)
W_k = 0.0
for k in range(num_samples):
r_l, W_l = all_output[k]
W_k += math.exp(W_l)
EX = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX += ((W_l/W_k) * r_l)
print("Posterior Mean: ", EX)
print("--------------------------------")
print("\n")
EX2 = 0.0
for l in range(num_samples):
r_l, W_l = all_output[l]
W_l = math.exp(W_l)
EX2 += ((W_l/W_k) * torch.pow(r_l, 2))
var = EX2 - (EX)**2
print("Posterior Variance:", var)
print("--------------------------------")
print("\n")
# Empty globals funcs
rho = {}
#-------------------------------------------------------------------------------
|
import pathlib
import sys
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
from mirumon.settings.config import get_app_settings
project_root_dir = pathlib.Path(__file__).resolve().parents[3]
sys.path.append(str(project_root_dir))
config = context.config
fileConfig(config.config_file_name) # type: ignore
target_metadata = None # type: ignore
dsn = str(get_app_settings().postgres_dsn)
config.set_main_option("sqlalchemy.url", dsn)
def run_migrations_online() -> None:
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
run_migrations_online()
|
# TODO Find what RPi returns and change the condition to work on all systems other than RPi
import platform
from gpiozero.pins.mock import MockFactory
from gpiozero import Device
def checkSimulate():
if platform.system() == "Darwin" or platform.system() == "Windows":
Device.pin_factory = MockFactory()
# Check if current time is between startTime and endTime
from datetime import datetime
def isTimeBetween(startTime, endTime):
timeNow = datetime.now().time()
return (timeNow >= datetime.strptime(startTime, '%H:%M:%S').time() and timeNow <= datetime.strptime(endTime, '%H:%M:%S').time())
# Check internet connection
from urllib.request import urlopen
from urllib.error import URLError
def hasInternetConnection():
try:
urlopen('http://www.google.com', timeout=10)
return True
except URLError as err:
return False
# Restarts the current program
import os, sys, psutil, logging
def restartProgram():
try:
p = psutil.Process(os.getpid())
for handler in p.open_files() + p.connections():
os.close(handler.fd)
except Exception as e:
logging.error(e)
python = sys.executable
os.execl(python, python, *sys.argv)
# Check & pull latest code from git
import subprocess
def checkUpdates():
print("Checking for updated code in git")
if hasInternetConnection():
try:
output = subprocess.check_output(["git", "pull"])
# print("git output: " + output)
output = output.replace("-", " ")
if output != "Already up to date.\n":
print("Restarting the application...\n")
restartProgram()
print("Completing code update")
except:
pass
else:
print("No internet connection")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `dwglasso` package."""
import unittest
import numpy as np
from matplotlib import pyplot as plt
import seaborn
from .context import dwglasso
from dwglasso import var
from .random_var_generators import random_var, iid_gaussian_var, iid_ber_graph
class TestVAR_plots(unittest.TestCase):
'''Creates some plots from a VAR system'''
@classmethod
def setUpClass(cls):
cls.B0 = np.array([[0.8, 0.1],
[0., 0.8]])
cls.B1 = np.array([[0.1, 0.0],
[0., 0.1]])
cls.B = [cls.B0, cls.B1]
np.random.seed(2718)
return
def test_000_plot(self):
'''Drive a stable system with 0 mean noise'''
T = 100
U = np.random.multivariate_normal(np.zeros(2),
0.1 * np.eye(2),
T)
t = range(T)
system = var.VAR(self.B)
Y = system.drive(U)
for i in range(2):
plt.plot(t, U[:, i], linestyle='--', alpha=0.5,
label='$u_%d(t)$' % i)
plt.plot(t, Y[:, i], linewidth=2, label='$x_%d(t)$' % i)
plt.legend()
plt.xlabel('$t$')
plt.ylabel('Output')
plt.title('Driven Stable VAR(2) System')
plt.show()
return
def test_001_plot(self):
'''Drive an unstable system with 0 mean noise'''
T = 10
U = np.random.multivariate_normal(np.zeros(2),
0.1 * np.eye(2),
T)
t = range(T)
system = var.VAR([2 * B_tau for B_tau in self.B])
Y = system.drive(U)
for i in range(2):
plt.plot(t, U[:, i], linestyle='--', alpha=0.5,
label='$u_%d(t)$' % i)
plt.plot(t, Y[:, i], linewidth=2, label='$x_%d(t)$' % i)
plt.legend()
plt.xlabel('$t$')
plt.ylabel('Output')
plt.title('Driven Unstable VAR(2) System')
plt.show()
return
class TestVAR(unittest.TestCase):
"""Basic tests for VAR model"""
# setUpClass / tearDownClass are executed only once for TestVAR
@classmethod
def setUpClass(cls):
# Data common to many tests
cls.B0 = np.array([[0.8, 0.1],
[0., 0.8]])
cls.B1 = np.array([[0.1, 0.0],
[0., 0.1]])
cls.B = [cls.B0, cls.B1]
cls.G = np.array([[1.0, 1.0],
[0.0, 1.0]])
np.random.seed(2718)
cls.n = 50
cls.p = 4
cls.q = 0.4
cls.B_random = random_var(lambda: iid_ber_graph(cls.n, cls.q),
lambda G: iid_gaussian_var(cls.p, cls.G,
0.65 / cls.q),
max_tries=1)
return
# setUp / tearDown are executed before and after every test
def setUp(self):
"""Set up test fixtures, if any."""
return
def tearDown(self):
"""Tear down test fixtures, if any."""
return
def test_000_basic_init(self):
system = var.VAR(self.B)
self.assertEqual(system.n, 2)
self.assertEqual(system.p, 2)
self.assertEqual(system.t, 0)
return
def test_001_is_stable(self):
stable_system = var.VAR(self.B)
self.assertTrue(stable_system.is_stable())
unstable_system = var.VAR([2 * self.B0, self.B1])
self.assertFalse(unstable_system.is_stable())
return
def test_002_induced_graph(self):
system = var.VAR(self.B)
self.assertTrue(np.all(system.induced_graph() == self.G))
return
def test_003_drive1(self):
system = var.VAR(self.B)
u = np.array([1., 1.])
y = system.drive(u)
self.assertTrue(np.all(y == u))
self.assertEqual(system.t, 1)
return
def test_004_drive2(self):
system = var.VAR(self.B)
U = np.ones((3, 2))
Y_expected = np.array([[1., 1.],
[1.8, 1.9],
[2.54, 2.8]])
Y = system.drive(U)
self.assertTrue(np.allclose(Y, Y_expected, atol=1e-12),
msg='Y = %s,\n Y_expeted = %s' % (Y, Y_expected))
self.assertEqual(system.t, 3)
return
def test_005_drive3(self):
system = var.VAR(self.B)
u = np.array([1., 1.])
U = np.ones((3, 2))
Y_expected = system.drive(U)
system.reset()
for t in range(3):
y = system.drive(u)
self.assertTrue(np.allclose(Y_expected[t, :],
y, atol=1e-12),
msg='y = %s,\n y_expected = %s' %
(y, Y_expected[t, :]))
return
def test_006_drive4(self):
# Same as test_005_drive3, but with a more complicated system
T = 25
system = var.VAR(self.B_random)
n = system.n
# Random input noise
U = np.random.multivariate_normal(np.zeros(n),
np.eye(n),
T)
Y_expected = system.drive(U)
system.reset()
for t in range(T):
y = system.drive(U[t, :])
self.assertTrue(np.allclose(Y_expected[t, :],
y, atol=1e-12))
return
def test_007_state_init(self):
system = var.VAR(self.B, x_0=np.array([1.8, 1.9, 1., 1.]))
y = system.drive(np.array([1., 1.]))
self.assertTrue(np.allclose(y, np.array([2.54, 2.8])))
return
def test_008_exceptions(self):
with self.assertRaises(ValueError):
var.VAR([np.eye(2), np.eye(3)])
with self.assertRaises(ValueError):
var.VAR(self.B, x_0=np.array([1, 2, 3]))
with self.assertRaises(ValueError):
system = var.VAR(self.B)
system.drive(np.array([1, 2, 3]))
# def test_command_line_interface(self):
# """Test the CLI."""
# runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# assert 'dwglasso.cli.main' in result.output
# help_result = runner.invoke(cli.main, ['--help'])
# assert help_result.exit_code == 0
# assert '--help Show this message and exit.' in help_result.output
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Optional, Dict, Tuple, Union
from copy import deepcopy
# type Aliases
#
SDRNodeType = Tuple[str, str]
""" SDRNodeType is a tuple of strings (node_type and node_uid) """
SDREdgeType = Tuple[str, Optional[str]]
""" SDREdgeType is a tuple (edge_type edge_uid) """
SDRKeyType = str
""" SDRKeyType is a string representation of (source_node, edge_type, target_node) """
SDRValueType = Dict[str, Union[str, float, None]]
""" SDRValueType is a dictionary: {prob: float, numeric: float} """
class SDR:
def __init__(self, sdr=None):
self.sdr: dict = {}
if sdr is not None:
if isinstance(sdr, SDR):
self.sdr = deepcopy(sdr.sdr)
elif isinstance(sdr, dict):
self.sdr = deepcopy(sdr)
def __contains__(self, sdr_key: SDRKeyType) -> bool:
"""
method to check if an sdr_key exists in the sdr
:param sdr_key: edge key to check
:return: True if it exists else False
"""
return sdr_key in self.sdr
def __iter__(self) -> iter:
"""
method to return an iterable of the sdr keys
:return: iterable of self keys
"""
return iter(self.sdr)
def __getitem__(self, sdr_key: SDRKeyType) -> SDRValueType:
"""
method to access the sdr edge attributes
:param sdr_key: the edge to return
:return: the edge attributes {source_type:, source_uid:, target_type: , target_uid:, edge_type:, edge_uid:, prob:, numeric:, numeric_min:, numeric_max:}
"""
return self.sdr[sdr_key]
def set_item(self,
source_node: SDRNodeType,
edge: SDREdgeType,
target_node: SDRNodeType,
probability: float = 1.0,
numeric: Optional[float] = None,
numeric_min: Optional[float] = None,
numeric_max: Optional[float] = None) -> None:
"""
method to set the sdr attributes
:param source_node: tuple of (source_type, source uid)
:param edge: edge_type
:param target_node: tuple of (target_type, target uid)
:param probability: probability of the edge
:param numeric: numeric value associated with edge
:param numeric_min: the min numeric can be
:param numeric_max: the max numeric can be
:return: None
"""
sdr_key = '{}:{}:{}:{}:{}:{}'.format(source_node[0], source_node[1], edge[0], edge[1], target_node[0], target_node[1])
self.sdr[sdr_key] = {'source_type': source_node[0], 'source_uid': source_node[1],
'target_type': target_node[0], 'target_uid': target_node[1],
'edge_type': edge[0],
'edge_uid': edge[1],
'prob': probability,
'numeric': numeric,
'numeric_min': numeric_min,
'numeric_max': numeric_max
}
def update(self, sdr) -> None:
"""
method to update and sdr
:param sdr: the sdr to update with
:return: None
"""
self.sdr.update(sdr.sdr)
def get_dict(self) -> dict:
"""
method to return a dict representation
:return: dict of dicts: {edge_key: {'source_type' , 'source_uid' , 'edge_type' , 'edge_uid' , 'target_type' , 'target_uid', 'prob', 'numeric', 'numeric_min', 'numeric_max'}
"""
return deepcopy(self.sdr)
|
from .cameron import cameron_prediction
from .exceptions import DistanceOutOfBoundsError
from .purdy import purdy, purdy_prediction
from .riegel import riegel_prediction
from .vo2max import VO2Max, VO2Max_prediction
name = 'running_performance'
__all__ = [
cameron_prediction,
DistanceOutOfBoundsError,
name,
purdy_prediction,
purdy,
riegel_prediction,
VO2Max_prediction,
VO2Max,
]
|
from application.infrastructure.error.errors import VCFHandlerBaseError
class AuthenticationError(VCFHandlerBaseError):
message = 'Authentication Error.'
error_type = 'AuthenticationError'
class AuthorizationError(VCFHandlerBaseError):
message = 'Authorization error.'
error_type = 'AuthorizationError'
|
# standard library imports
import os
import math
from math import sqrt, pi
# package imports
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions.gamma import Gamma
from torch.distributions.multivariate_normal import MultivariateNormal
import tensorflow as tf
import tensorflow_probability as tfp
tf.keras.backend.set_floatx('float32')
# local imports
import bnn.layers.sparse as layers
import bnn.inference
import bnn.util as util
class BayesLinearLasso(nn.Module):
"""
Linear regression with double expential prior
"""
def __init__(self, dim_in, dim_out, prior_w2_sig2=1.0, noise_sig2=1.0, scale_global=1.0, groups=None, scale_groups=None):
super(BayesLinearLasso, self).__init__()
### architecture
self.dim_in = dim_in
self.dim_out = dim_out
self.prior_w2_sig2 = prior_w2_sig2
self.noise_sig2 = noise_sig2
self.scale_global = scale_global
self.groups = groups # list of lists with grouping (e.g. [[1,2,3], [4,5]])
self.scale_groups = scale_groups
def make_unnormalized_log_prob_tf(self, x, y):
# Convert to tensors
y_tf = tf.convert_to_tensor(y)
x_tf = tf.convert_to_tensor(x)
scale_global_tf = tf.dtypes.cast(tf.convert_to_tensor(self.scale_global), tf.float64)
if self.groups is not None:
groups_tf = [tf.convert_to_tensor(group) for group in self.groups]
@tf.function
def unnormalized_log_prob(w):
resid = y_tf - x_tf@w
# likelihood and L2 penalty
log_prob = -1/(2*self.noise_sig2)*tf.transpose(resid)@(resid)
# L2 penalty?
#log_prob += - 1/(2*prior_w2_sig2)*tf.transpose(w2)@w2
# Within group
log_prob -= tf.math.reduce_sum(scale_global_tf*tf.math.abs(w)) # L1 penalty
#log_prob -= tf.math.reduce_sum(scale_global_tf*w**2) # L2 penalty
# Group level
if self.groups is not None:
for scale_groups, group in zip(self.scale_groups, groups_tf):
log_prob -= scale_groups*tf.norm(tf.gather(w, group)) # L1 penalty
#log_prob -= scale_groups*tf.norm(tf.gather(w, group)**2) # L2 penalty
return log_prob[0,0]
return unnormalized_log_prob
def train(self, x, y, num_results = int(10e3), num_burnin_steps = int(1e3)):
'''
Train with HMC
'''
unnormalized_log_prob_tf = self.make_unnormalized_log_prob_tf(x, y)
init_values = .1*np.random.randn(self.dim_in,1)
samples, accept = bnn.inference.mcmc.hmc_tf(unnormalized_log_prob_tf,
init_values,
num_results,
num_burnin_steps,
num_leapfrog_steps=3,
step_size=1.)
return samples, accept
class RffGradPen(nn.Module):
"""
Random features layer
Variance of output layer scaled by width (see RFF activation function)
"""
def __init__(self, dim_in, dim_hidden, dim_out, prior_w2_sig2=1.0, noise_sig2=1.0, scale_global=1.0, groups=None, scale_groups=None, lengthscale=1.0, penalty_type='l1'):
super(RffGradPen, self).__init__()
### architecture
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.dim_out = dim_out
self.prior_w2_sig2 = prior_w2_sig2
self.noise_sig2 = noise_sig2
self.scale_global = scale_global
self.groups = groups # list of lists with grouping (e.g. [[1,2,3], [4,5]])
self.scale_groups = scale_groups
self.lengthscale = lengthscale
self.penalty_type = penalty_type
self.register_buffer('w', torch.empty(dim_hidden, dim_in))
self.register_buffer('b', torch.empty(dim_hidden))
self.sample_features()
self.act = lambda z: sqrt(2/self.dim_hidden)*torch.cos(z)
def sample_features(self):
# sample random weights for RFF features
self.w.normal_(0, 1 / self.lengthscale)
self.b.uniform_(0, 2*pi)
def hidden_features(self, x):
#return self.act(x@self.w.T + self.b.reshape(1,-1)) # (n, dim_hidden)
return self.act(F.linear(x, self.w, self.b)) # (n, dim_hidden)
def compute_jacobian(self, x):
'''
Compute jacobian of hidden units with respect to inputs.
Assumes inputs do not impact each other (i.e. input observation n only impacts hidden for observation n)
Inputs:
x: (n_obs, dim_in) tensor
Outputs:
jac: (n_obs, dim_out, dim_in) tensor of derivatives
'''
jac = []
for n in range(x.shape[0]):
jac_n = torch.autograd.functional.jacobian(self.hidden_features, x[n,:].reshape(1,-1)).squeeze() # dim_hidden x dim_in
jac.append(jac_n)
return torch.stack(jac) # n_obs x dim_out x dim_in
def compute_Ax(self, x):
'''
Computes A matrix
'''
n = x.shape[0]
J = self.compute_jacobian(x) # N x K x D
#Ja = -sqrt(2/self.dim_hidden) * self.w.unsqueeze(0) * torch.sin(F.linear(x, self.w, self.b)).unsqueeze(-1) #analytical jacobian
# all inputs
A_d = [1/n*J[:,:,d].T@J[:,:,d] for d in range(self.dim_in)]
# groups of inputs
if self.groups is not None:
A_groups = [torch.sum(torch.stack([A_d[i] for i in group]),0) for group in self.groups]
else:
A_groups = None
return A_d, A_groups
def make_unnormalized_log_prob_tf(self, x, y):
# Set prior (since based on data)
Ax_d, Ax_groups = self.compute_Ax(x)
# Convert to tensors
y_tf = tf.convert_to_tensor(y)
h_tf = tf.convert_to_tensor(self.hidden_features(x))
Ax_d_tf = [tf.convert_to_tensor(A) for A in Ax_d]
if Ax_groups is not None:
Ax_groups_tf = [tf.convert_to_tensor(A) for A in Ax_groups]
@tf.function
def unnormalized_log_prob(w):
resid = y_tf - h_tf@w
# likelihood
log_prob = -1/(2*self.noise_sig2)*tf.transpose(resid)@(resid)
# L2 penalty
log_prob += - 1/(2*self.prior_w2_sig2)*tf.transpose(w)@w
## likelihood and L2 penalty
#log_prob = -1/self.noise_sig2*tf.transpose(resid)@(resid) \
# - tf.transpose(w)@(1/self.prior_w2_sig2*tf.eye(self.dim_hidden, dtype=tf.float64))@w
# Within group gradient penalty
for scale_global, A in zip(self.scale_global, Ax_d_tf):
grad_f_sq = tf.transpose(w)@(A)@w
if self.penalty_type == 'l1':
log_prob += - scale_global*tf.math.sqrt(grad_f_sq)
elif self.penalty_type == 'l2':
log_prob += - scale_global*grad_f_sq
# Group level gradient penalty
if Ax_groups is not None:
for scale_groups, A in zip(self.scale_groups, Ax_groups_tf):
log_prob -= scale_groups*tf.math.sqrt(tf.transpose(w)@(A)@w)
return log_prob[0,0]
return unnormalized_log_prob
def train(self, x, y, num_results = int(10e3), num_burnin_steps = int(1e3)):
'''
Train with HMC
'''
unnormalized_log_prob_tf = self.make_unnormalized_log_prob_tf(x, y)
init_values = .1*np.random.randn(self.dim_hidden,1) #tf.constant(.01, shape=(self.dim_hidden,1), dtype=tf.float64)
samples, accept = bnn.inference.mcmc.hmc_tf(unnormalized_log_prob_tf,
init_values,
num_results,
num_burnin_steps,
num_leapfrog_steps=3,
step_size=1.)
return samples, accept
class RffGradPenHyper(nn.Module):
"""
Random features layer
INCLUDES PRIOR ON lengthscale AND HYPERPRIOR ON prior_w2_sig2
Variance of output layer scaled by width (see RFF activation function)
Inputs:
- dim_in: dimension of inputs (int)
- dim_hidden: number of hidden units (int)
- dim_out: output dimension (int)
- prior_w2_sig2: prior variance of output weights. Corresponds to amplitude variance of RBF kernel. (scalar)
- noise_sig2: observational noise (scalar)
- scale_global: NOT IMPLEMENTED
- groups: NOT IMPLEMENTED
- scale_groups: NOT IMPLEMENTED
- lengthscale: Corresponds to lengthscale of RBF kernel. (scalar)
- penalty_type: select 'l1' for lasso penalty, 'l2' for ridge penalty (str)
"""
def __init__(self, dim_in, dim_hidden, dim_out, prior_w2_sig2=1.0, noise_sig2=1.0, scale_global=1.0, groups=None, scale_groups=None, lengthscale=1.0, penalty_type='l1'):
super(RffGradPenHyper, self).__init__()
### architecture
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.dim_out = dim_out
self.prior_w2_sig2 = prior_w2_sig2
self.noise_sig2 = noise_sig2
self.scale_global = scale_global
self.groups = groups # list of lists with grouping (e.g. [[1,2,3], [4,5]])
self.scale_groups = scale_groups
self.lengthscale = lengthscale
self.penalty_type = penalty_type
self.register_buffer('w', torch.empty(dim_hidden, dim_in))
self.register_buffer('b', torch.empty(dim_hidden))
self.sample_features()
self.act = lambda z: sqrt(2/self.dim_hidden)*torch.cos(z)
self.act_tf = lambda z: sqrt(2/self.dim_hidden)*tf.math.cos(z)
def sample_features(self):
# sample random weights for RFF features
self.w.normal_(0, 1)
self.b.uniform_(0, 2*pi)
self.w_tf = tf.convert_to_tensor(self.w)
self.b_tf = tf.convert_to_tensor(self.b)
def hidden_features(self, x, lengthscale=1.0):
#return self.act(x@self.w.T + self.b.reshape(1,-1)) # (n, dim_hidden)
return self.act(F.linear(x, self.w / lengthscale, self.b)) # (n, dim_hidden)
def hidden_features_tf(self, x, lengthscale=1.0):
#return self.act(x@self.w.T + self.b.reshape(1,-1)) # (n, dim_hidden)
return self.act_tf(x @ tf.transpose(self.w_tf) / lengthscale + tf.reshape(self.b_tf, (1,-1))) # (n, dim_hidden)
def hidden_features_tf_precompute(self, x_w_tf, lengthscale=1.0):
return self.act_tf(x_w_tf / lengthscale + tf.reshape(self.b_tf, (1,-1))) # (n, dim_hidden)
def make_unnormalized_log_prob_tf(self, x, y):
# Convert to tensors
x_tf = tf.convert_to_tensor(x)
y_tf = tf.convert_to_tensor(y)
n = x.shape[0]
# for lengthscale prior and prior_w2_sig2 hyperprior
l_alpha = tf.convert_to_tensor(1.0, dtype=tf.float64)
l_beta = tf.convert_to_tensor(1.0, dtype=tf.float64)
prior_w2_sig2_alpha = tf.convert_to_tensor(1.0, dtype=tf.float64)
prior_w2_sig2_beta = tf.convert_to_tensor(1.0, dtype=tf.float64)
def log_prob_invgamma(x, alpha, beta):
unnormalized_prob = -(1. + alpha) * tf.math.log(x) - beta / x
normalization = (
tf.math.lgamma(alpha) - alpha * tf.math.log(beta))
return unnormalized_prob - normalization
# precompute
x_w_tf = x @ tf.transpose(self.w_tf)
@tf.function
def unnormalized_log_prob(w, l, prior_w2_sig2):
'''
w: output layer weights
l: lengthscale
'''
h_tf = self.hidden_features_tf_precompute(x_w_tf, l)
resid = y_tf - h_tf@w
# Jacobian of hidden layer (N x K x D)
J = -sqrt(2/self.dim_hidden) * tf.expand_dims(self.w_tf,0) / l * tf.expand_dims(tf.math.sin(x_w_tf / l + tf.reshape(self.b_tf, (1,-1))), -1) # analytical jacobian
# gradient penalties for each input dimension
Ax_d_tf = [1/n*tf.transpose(J[:,:,d])@J[:,:,d] for d in range(self.dim_in)]
# likelihood
log_prob = -1/(2*self.noise_sig2)*tf.transpose(resid)@(resid)
# L2 penalty
log_prob += - 1/(2*prior_w2_sig2)*tf.transpose(w)@w
# prior_w2_sig2 hyperprior
log_prob += log_prob_invgamma(prior_w2_sig2, prior_w2_sig2_alpha, prior_w2_sig2_beta)
# lengthscale prior
log_prob += log_prob_invgamma(l, l_alpha, l_beta)
# Within group gradient penalty
for scale_global, A in zip(self.scale_global, Ax_d_tf):
grad_f_sq = tf.transpose(w)@(A)@w
if self.penalty_type == 'l1':
log_prob += - scale_global*tf.math.sqrt(grad_f_sq)
elif self.penalty_type == 'l2':
log_prob += - scale_global*grad_f_sq
'''
# Group level gradient penalty
if Ax_groups is not None:
for scale_groups, A in zip(self.scale_groups, Ax_groups_tf):
log_prob -= scale_groups*tf.math.sqrt(tf.transpose(w)@(A)@w)
'''
return log_prob[0,0]
return unnormalized_log_prob
def train(self, x, y, num_results = int(10e3), num_burnin_steps = int(1e3)):
'''
Train with HMC
'''
unnormalized_log_prob_tf = self.make_unnormalized_log_prob_tf(x, y)
init_values = [.1*np.random.randn(self.dim_hidden,1), tf.constant(1.0, dtype=tf.float64), tf.constant(1.0, dtype=tf.float64)]
samples, accept = bnn.inference.mcmc.hmc_tf(unnormalized_log_prob_tf,
init_values,
num_results,
num_burnin_steps,
num_leapfrog_steps=3,
step_size=1.)
return samples, accept
class RffGradPenHyper_v2(object):
"""
Random features layer
INCLUDES PRIOR ON lengthscale AND HYPERPRIOR ON prior_w2_sig2
Variance of output layer scaled by width (see RFF activation function)
Inputs:
- dim_in: dimension of inputs (int)
- dim_hidden: number of hidden units (int)
- dim_out: output dimension (int)
- prior_w2_sig2: prior variance of output weights. Corresponds to amplitude variance of RBF kernel. (scalar)
- noise_sig2: observational noise (scalar)
- scale_global: NOT IMPLEMENTED
- groups: NOT IMPLEMENTED
- scale_groups: NOT IMPLEMENTED
- lengthscale: Corresponds to lengthscale of RBF kernel. (scalar)
- penalty_type: select 'l1' for lasso penalty, 'l2' for ridge penalty (str)
"""
def __init__(self, dim_in, dim_hidden, dim_out, prior_w2_sig2=1.0, noise_sig2=1.0, scale_global=1.0, groups=None, scale_groups=None, lengthscale=1.0, penalty_type='l1', family='gaussian'):
super(RffGradPenHyper_v2, self).__init__()
### architecture
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.dim_out = dim_out
self.prior_w2_sig2 = prior_w2_sig2
self.noise_sig2 = noise_sig2
self.scale_global = scale_global
self.groups = groups # list of lists with grouping (e.g. [[1,2,3], [4,5]])
self.scale_groups = scale_groups
self.lengthscale = lengthscale
self.penalty_type = penalty_type
self.family = family
self.sample_features()
self.act = lambda z: sqrt(2/self.dim_hidden)*tf.math.cos(z)
def sample_features(self):
# sample random weights for RFF features
self.w1 = tf.cast(tf.convert_to_tensor(np.random.normal(0,1,(self.dim_hidden, self.dim_in))), dtype=tf.float32)
self.b1 = tf.cast(tf.convert_to_tensor(np.random.uniform(0,2*pi,(self.dim_hidden,))), dtype=tf.float32)
def compute_xw1(self, x):
if not tf.is_tensor(x):
x = tf.convert_to_tensor(x, dtype=tf.float32)
return x @ tf.transpose(self.w1)
def hidden_features(self, x=None, xw1=None, lengthscale=None):
if xw1 is None:
xw1 = self.compute_xw1(x)
if lengthscale is None:
lengthscale = self.lengthscale
return self.act(xw1 / lengthscale + tf.reshape(self.b1, (1,-1))) # (n, dim_hidden)
def forward(self, w2, x=None, xw1=None, lengthscale=None, h=None):
if h is None:
if xw1 is None:
xw1 = self.compute_xw1(x)
if lengthscale is None:
lengthscale = self.lengthscale
h = self.hidden_features(x, xw1, lengthscale)
return h@tf.reshape(w2,(-1,1))
def jacobian_hidden_features(self, x=None, xw1=None, lengthscale=None):
if xw1 is None:
xw1 = self.compute_xw1(x)
if lengthscale is None:
lengthscale = self.lengthscale
return -sqrt(2/self.dim_hidden) * tf.expand_dims(self.w1,0) / lengthscale * tf.expand_dims(tf.math.sin(xw1 / lengthscale + tf.reshape(self.b1, (1,-1))), -1) # analytical jacobian
def grad_norm(self, x=None, xw1=None, lengthscale=None):
J = self.jacobian_hidden_features(x=x, xw1=xw1, lengthscale=lengthscale)
Ax_d = [1/J.shape[0]*tf.transpose(J[:,:,d])@J[:,:,d] for d in range(self.dim_in)]
return Ax_d
def log_marginal_likelihood(self, x, y):
## GPy for comparison
import GPy
kernel = GPy.kern.RBF(input_dim=x.shape[1], lengthscale=self.lengthscale, variance=self.prior_w2_sig2)
K_gp = kernel.K(x,x) # (n, n)
gp = GPy.models.GPRegression(x,y,kernel)
gp.Gaussian_noise.variance = self.noise_sig2
log_prob_gp = gp.log_likelihood()
## regular log marginal likelihood
n = x.shape[0]
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
h = self.hidden_features(x)
K = h @ tf.transpose(h) * self.prior_w2_sig2
A = K + self.noise_sig2*tf.eye(n)
log_prob = -0.5*n*np.log(2*np.pi) - 0.5*tf.linalg.logdet(A) - 0.5*tf.transpose(y) @ tf.linalg.inv(A) @ y
## faster way
m = int(self.dim_hidden / 2)
phi = h * np.sqrt(self.dim_hidden/2)
A = tf.transpose(phi)@phi + m*self.noise_sig2/self.prior_w2_sig2*tf.eye(2*m)
R = tf.linalg.cholesky(A)
alpha1 = tf.linalg.solve(R, tf.transpose(phi)@y)
log_prob2 = -1/(2*self.noise_sig2)*(tf.norm(y)**2 - tf.norm(alpha1)**2) - 0.5*tf.reduce_sum(tf.math.log(tf.linalg.diag_part(R)**2)) + m*np.log(m*self.noise_sig2/self.prior_w2_sig2) - n/2*np.log(2*np.pi*self.noise_sig2)
breakpoint()
def make_log_marginal_likelihood(self, x, y):
xw1 = self.compute_xw1(x)
@tf.function
def unnormalized_log_prob(lengthscale, xw1=xw1):
n = x.shape[0]
h = self.hidden_features(x=None, xw1=xw1, lengthscale=lengthscale)
m = int(self.dim_hidden / 2)
phi = h * np.sqrt(self.dim_hidden/2)
A = tf.transpose(phi)@phi + m*self.noise_sig2/self.prior_w2_sig2*tf.eye(2*m)
R = tf.linalg.cholesky(A)
alpha1 = tf.linalg.solve(R, tf.transpose(phi)@y)
log_prob = -1/(2*self.noise_sig2)*(tf.norm(y)**2 - tf.norm(alpha1)**2) - 0.5*tf.reduce_sum(tf.math.log(tf.linalg.diag_part(R)**2)) + m*tf.math.log(m*self.noise_sig2/self.prior_w2_sig2) - n/2*np.log(2*np.pi*self.noise_sig2)
return log_prob
return unnormalized_log_prob
def train_log_marginal_likelihood(self, x, y, n_epochs, learning_rate=0.001, early_stopping=False, tol=1e-4, patience=3, clipvalue=100, batch_size=None):
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
hyperparam_hist = {}
lengthscale_map = tfp.math.softplus_inverse(tf.constant(self.lengthscale, dtype=tf.float32)) # note: _map is untransformed by softplus
lengthscale_map = tf.Variable(lengthscale_map, dtype=np.float32)
hyperparam_hist['lengthscale'] = [tf.math.softplus(lengthscale_map).numpy()]
print('lengthscale init: ', hyperparam_hist['lengthscale'][0])
#prior_w2_sig2_map = tfp.math.softplus_inverse(tf.constant(self.prior_w2_sig2, dtype=tf.float32)) # note: _map is untransformed by softplus
#prior_w2_sig2_map = tf.Variable(prior_w2_sig2_map, dtype=np.float32)
#hyperparam_hist['prior_w2_sig2'] = [tf.math.softplus(prior_w2_sig2_map).numpy()]
#print('prior_w2_sig2 init: ', hyperparam_hist['prior_w2_sig2'][0])
opt = tf.keras.optimizers.SGD(learning_rate=learning_rate, clipvalue=clipvalue)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
if batch_size is None:
batch_size = x.shape[0]# full batch
train_dataset = train_dataset.batch(batch_size)
n_no_improvement = 0 # for early stopping
neg_log_marginal_likelihoods = [] # one for each batch
for xbatch, ybatch in train_dataset:
log_marginal_likelihood_ = self.make_log_marginal_likelihood(xbatch, ybatch)
log_marginal_likelihood = lambda lengthscale: log_marginal_likelihood_(tf.math.softplus(lengthscale)) # use softpluses
var_list = [lengthscale_map]
neg_log_marginal_likelihoods.append(lambda: -log_marginal_likelihood(*var_list)) # evaluate on Variables
for epoch in range(n_epochs):
for neg_log_marginal_likelihood in neg_log_marginal_likelihoods:
opt.minimize(neg_log_marginal_likelihood, var_list=var_list)
### if you want to processes the gradients
#with tf.GradientTape() as tape:
# loss = neg_log_marginal_likelihood()
#grads = tape.gradient(loss, var_list)
#grads = [tf.clip_by_norm(g, 500.) for g in grads]
#opt.apply_gradients(zip(grads, var_list))
#print(tf.math.softplus(lengthscale_map))
###
hyperparam_hist['lengthscale'].append(tf.math.softplus(lengthscale_map).numpy().item())
#hyperparam_hist['prior_w2_sig2'].append(tf.math.softplus(prior_w2_sig2_map).numpy().item())
if early_stopping:
if np.all([np.abs(val[-1] - val[-2]) < tol for val in hyperparam_hist.values()]):
n_no_improvement += 1
else:
n_no_improvement = 0
if n_no_improvement==patience:
break
# unpack
self.lengthscale = tf.convert_to_tensor(tf.math.softplus(lengthscale_map)).numpy().item()
#self.prior_w2_sig2 = tf.convert_to_tensor(tf.math.softplus(prior_w2_sig2_map)).numpy().item()
print('lengthscale final: ', self.lengthscale)
#print('prior_w2_sig2 final: ', self.prior_w2_sig2)
return hyperparam_hist
def make_unnormalized_log_prob(self, x, y, infer_lengthscale=False):
# for lengthscale prior and prior_w2_sig2 hyperprior (should move this to init...)
lengthscale_alpha = tf.convert_to_tensor(1.0, dtype=tf.float32)
lengthscale_beta = tf.convert_to_tensor(1.0, dtype=tf.float32)
prior_w2_sig2_alpha = tf.convert_to_tensor(1.0, dtype=tf.float32)
prior_w2_sig2_beta = tf.convert_to_tensor(1.0, dtype=tf.float32)
def log_prob_invgamma(x, alpha, beta):
unnormalized_prob = -(1. + alpha) * tf.math.log(x) - beta / x
normalization = (tf.math.lgamma(alpha) - alpha * tf.math.log(beta))
return unnormalized_prob - normalization
# precompute
xw1 = self.compute_xw1(x)
h = self.hidden_features(x=None, xw1=xw1, lengthscale=self.lengthscale)
Ax_d = self.grad_norm(x=None, xw1=xw1, lengthscale=self.lengthscale)
@tf.function
def unnormalized_log_prob(w2, lengthscale, prior_w2_sig2, infer_lengthscale=infer_lengthscale, xw1=xw1, h=h, Ax_d=Ax_d, family=self.family):
'''
w2: output layer weights
lengthscale: lengthscale
prior_w2_sig2: prior variance of output layer weights
lengthscale and prior_w2_sig2 are only used if infer_hyper is True
'''
if infer_lengthscale:
# recompute hidden features and gradient penalty (because they depend on lengthscale)
h = self.hidden_features(x=None, xw1=xw1, lengthscale=lengthscale)
Ax_d = self.grad_norm(x=None, xw1=xw1, lengthscale=lengthscale)
f_pred = self.forward(w2, h=h)
resid = y - f_pred
# likelihood
if family == 'gaussian':
log_prob = -1/(2*self.noise_sig2)*tf.transpose(resid)@(resid)
elif family == 'poisson':
log_prob = tf.reduce_sum(y * f_pred - tf.math.exp(f_pred))
elif family == 'binomial':
#p_pred = tf.math.sigmoid(f_pred)
#log_prob = tf.reduce_sum(y*tf.math.log(p_pred) + (1-y)*tf.math.log(p_pred))
log_prob = tf.reduce_sum(y*f_pred - tf.math.log(1+tf.math.exp(f_pred)))
# L2 penalty
log_prob += - 1/(2*prior_w2_sig2)*tf.transpose(w2)@w2
# prior_w2_sig2 hyperprior
log_prob += log_prob_invgamma(prior_w2_sig2, prior_w2_sig2_alpha, prior_w2_sig2_beta)
# lengthscale prior
log_prob += log_prob_invgamma(lengthscale**2, lengthscale_alpha, lengthscale_beta)
# Within group gradient penalty
for scale_global, A in zip(self.scale_global, Ax_d):
grad_f_sq = tf.transpose(w2)@(A)@w2
if self.penalty_type == 'l1':
log_prob += - scale_global*tf.math.sqrt(grad_f_sq)
elif self.penalty_type == 'l2':
log_prob += - scale_global*grad_f_sq
'''
# Group level gradient penalty
if Ax_groups is not None:
for scale_groups, A in zip(self.scale_groups, Ax_groups_tf):
log_prob -= scale_groups*tf.math.sqrt(tf.transpose(w)@(A)@w)
'''
return log_prob[0,0]
#@tf.function
def unnormalized_log_prob_vec(params):
return unnormalized_log_prob(w2=tf.reshape(params[:-2],(-1,1)), lengthscale=params[-2], prior_w2_sig2=params[-1])
return unnormalized_log_prob, unnormalized_log_prob_vec
def train_map(self, x, y, n_epochs, learning_rate=0.001, early_stopping=False, tol=1e-4, patience=3, clipvalue=100, batch_size=None, infer_lengthscale=True, infer_prior_w2_sig2=True):
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
hyperparam_hist = {}
# starting values
w2_map = tf.Variable(np.random.randn(self.dim_hidden,1)/self.dim_hidden, dtype=np.float32)
lengthscale_map = tfp.math.softplus_inverse(tf.constant(self.lengthscale, dtype=tf.float32)) # note: _map is untransformed by softplus
if infer_lengthscale:
lengthscale_map = tf.Variable(lengthscale_map, dtype=np.float32)
hyperparam_hist['lengthscale'] = [tf.math.softplus(lengthscale_map).numpy()]
print('lengthscale init: ', hyperparam_hist['lengthscale'][0])
prior_w2_sig2_map = tfp.math.softplus_inverse(tf.constant(self.prior_w2_sig2, dtype=tf.float32)) # note: _map is untransformed by softplus
if infer_prior_w2_sig2:
prior_w2_sig2_map = tf.Variable(prior_w2_sig2_map, dtype=np.float32)
hyperparam_hist['prior_w2_sig2'] = [tf.math.softplus(prior_w2_sig2_map).numpy()]
print('lengthscale init: ', hyperparam_hist['prior_w2_sig2'][0])
opt = tf.keras.optimizers.SGD(learning_rate=learning_rate, clipvalue=clipvalue)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
if batch_size is None:
batch_size = x.shape[0]# full batch
train_dataset = train_dataset.batch(batch_size)
n_no_improvement = 0 # for early stopping
unnormalized_neg_log_probs = [] # one for each batch
for xbatch, ybatch in train_dataset:
unnormalized_log_prob_, _ = self.make_unnormalized_log_prob(xbatch, ybatch, infer_lengthscale=infer_lengthscale)
if infer_lengthscale and infer_prior_w2_sig2:
unnormalized_log_prob = lambda w2, lengthscale, prior_w2_sig2: unnormalized_log_prob_(w2, tf.math.softplus(lengthscale), tf.math.softplus(prior_w2_sig2)) # use softpluses
var_list = [w2_map, lengthscale_map, prior_w2_sig2_map]
elif infer_lengthscale and (not infer_prior_w2_sig2):
unnormalized_log_prob = lambda w2, lengthscale: unnormalized_log_prob_(w2, tf.math.softplus(lengthscale), tf.math.softplus(prior_w2_sig2_map)) # use softpluses
var_list = [w2_map, lengthscale_map]
elif (not infer_lengthscale) and infer_prior_w2_sig2:
unnormalized_log_prob = lambda w2, prior_w2_sig2: unnormalized_log_prob_(w2, tf.math.softplus(lengthscale_map), tf.math.softplus(prior_w2_sig2)) # use softpluses
var_list = [w2_map, prior_w2_sig2_map]
else:
unnormalized_log_prob = lambda w2: unnormalized_log_prob_(w2, tf.math.softplus(lengthscale_map), tf.math.softplus(prior_w2_sig2_map)) # use softpluses
var_list = [w2_map]
unnormalized_neg_log_probs.append(lambda: -unnormalized_log_prob(*var_list)) # evaluate on Variables
for epoch in range(n_epochs):
for unnormalized_neg_log_prob in unnormalized_neg_log_probs:
#opt.minimize(unnormalized_neg_log_prob, var_list=var_list)
### if you want to processes the gradients
with tf.GradientTape() as tape:
loss = unnormalized_neg_log_prob()
grads = tape.gradient(loss, var_list)
grads = [tf.clip_by_norm(g, 500.) for g in grads]
opt.apply_gradients(zip(grads, var_list))
print(tf.math.softplus(lengthscale_map))
###
if infer_lengthscale:
hyperparam_hist['lengthscale'].append(tf.math.softplus(lengthscale_map).numpy().item())
if infer_prior_w2_sig2:
hyperparam_hist['prior_w2_sig2'].append(tf.math.softplus(prior_w2_sig2_map).numpy().item())
if early_stopping:
if np.all([np.abs(val[-1] - val[-2]) < tol for val in hyperparam_hist.values()]):
n_no_improvement += 1
else:
n_no_improvement = 0
if n_no_improvement==patience:
break
# unpack
w2 = tf.convert_to_tensor(w2_map)
if infer_lengthscale:
self.lengthscale = tf.convert_to_tensor(tf.math.softplus(lengthscale_map)).numpy().item()
if infer_prior_w2_sig2:
self.prior_w2_sig2 = tf.convert_to_tensor(tf.math.softplus(prior_w2_sig2_map)).numpy().item()
print('lengthscale final: ', self.lengthscale)
print('prior_w2_sig2 final: ', self.prior_w2_sig2)
return w2, hyperparam_hist
def train(self, x, y, num_results = int(10e3), num_burnin_steps = int(1e3), infer_lengthscale=False, infer_prior_w2_sig2=False, w2_init=None):
'''
'''
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
# initialize w2 randomly or to MAP
if w2_init is None:
w2_init = tf.cast(tf.convert_to_tensor(np.random.randn(self.dim_hidden,1)/self.dim_hidden), tf.float32)
# set up objective and initialization depending if variational parameters inferred
unnormalized_log_prob_, _ = self.make_unnormalized_log_prob(x, y, infer_lengthscale=infer_lengthscale)
if infer_lengthscale and infer_prior_w2_sig2:
unnormalized_log_prob = lambda w2, lengthscale, prior_w2_sig2: unnormalized_log_prob_(w2, lengthscale, prior_w2_sig2)
init_values = [w2_init, tf.constant(self.lengthscale, dtype=tf.float32), tf.constant(self.prior_w2_sig2, dtype=tf.float32)]
elif infer_lengthscale and (not infer_prior_w2_sig2):
unnormalized_log_prob = lambda w2, lengthscale: unnormalized_log_prob_(w2, lengthscale, self.prior_w2_sig2)
init_values = [w2_init, tf.constant(self.lengthscale, dtype=tf.float32)]
elif (not infer_lengthscale) and infer_prior_w2_sig2:
unnormalized_log_prob = lambda w2, prior_w2_sig2: unnormalized_log_prob_(w2, self.lengthscale, prior_w2_sig2)
init_values = [w2_init, tf.constant(self.prior_w2_sig2, dtype=tf.float32)]
else:
unnormalized_log_prob = lambda w2: unnormalized_log_prob_(w2, self.lengthscale, self.prior_w2_sig2)
init_values = w2_init
###
#breakpoint()
#unnormalized_log_prob(w2_init)
###
samples, accept = bnn.inference.mcmc.hmc_tf(unnormalized_log_prob,
init_values,
num_results,
num_burnin_steps,
num_leapfrog_steps=3,
step_size=1.)
return samples, accept
def fit(self, x, y):
'''
Computes conjugate posterior
'''
assert self.penalty_type == 'l2'
#assert np.all([s==self.scale_global[0] for s in self.scale_global]) # only works if all scales are the same (easy to adapt if not though)
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
h = self.hidden_features(x, lengthscale=self.lengthscale)
Ax_d = self.grad_norm(x, lengthscale=self.lengthscale)
Ax_d = [s*A for s,A in zip(self.scale_global, Ax_d)] # multiply by scale
Ax = tf.reduce_sum(tf.stack(Ax_d),0) # sum over input dimension
prior_sig2inv_mat = 1/self.prior_w2_sig2*tf.eye(self.dim_hidden) + Ax # prior includes gradient penalty
sig2 = tf.linalg.inv(prior_sig2inv_mat + tf.transpose(h)@(h)/self.noise_sig2) # Should replace with cholesky
mu = sig2 @ tf.transpose(h)@y/self.noise_sig2
return mu, sig2
class RffGradPenHyper_v3(object):
"""
- L2 penalty only
- closed-form solution
- hyperparameters: lengthscale, prior_w2_sig2 (amplitude variance), and scale_global (regularization strength)
- lengthscale and scale_global hyperparameters can be input-specific
Inputs:
- dim_in: dimension of inputs (int)
- dim_hidden: number of hidden units (int)
- dim_out: output dimension (int)
- prior_w2_sig2: prior variance of output weights. Corresponds to amplitude variance of RBF kernel. (scalar)
- noise_sig2: observational noise (scalar)
- scale_global: NOT IMPLEMENTED
- groups: NOT IMPLEMENTED
- scale_groups: NOT IMPLEMENTED
- lengthscale: Corresponds to lengthscale of RBF kernel. (scalar)
- penalty_type: select 'l1' for lasso penalty, 'l2' for ridge penalty (str)
"""
def __init__(self, dim_in, dim_hidden, dim_out, prior_w2_sig2=1.0, noise_sig2=1.0, scale_global=1.0, groups=None, scale_groups=None, lengthscale=1.0, penalty_type='l2', family='gaussian', dtype=tf.float32):
super(RffGradPenHyper_v3, self).__init__()
assert scale_groups is None
assert penalty_type=='l2'
assert family=='gaussian'
self.dtype = dtype
### architecture
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.dim_out = dim_out
self.noise_sig2 = noise_sig2
self.groups = groups # list of lists with grouping (e.g. [[1,2,3], [4,5]])
self.scale_groups = scale_groups
self.penalty_type = penalty_type
self.family = family
# hyperparameters
self.lengthscale = tf.reshape(tf.cast(tf.convert_to_tensor(lengthscale), self.dtype), -1)
self.prior_w2_sig2 = tf.reshape(tf.cast(tf.convert_to_tensor(prior_w2_sig2), self.dtype), -1)
self.scale_global = tf.reshape(tf.cast(tf.convert_to_tensor(scale_global), self.dtype), -1)
self.sample_features()
self.act = lambda z: sqrt(2/self.dim_hidden)*tf.math.cos(z)
def sample_features(self):
# sample random weights for RFF features
self.w1 = tf.cast(tf.convert_to_tensor(np.random.normal(0,1,(self.dim_hidden, self.dim_in))), dtype=tf.float32)
self.b1 = tf.cast(tf.convert_to_tensor(np.random.uniform(0,2*pi,(self.dim_hidden,))), dtype=tf.float32)
def compute_xw1(self, x, lengthscale=None):
if not tf.is_tensor(x):
x = tf.convert_to_tensor(x, dtype=tf.float32) # (n, dim_in)
if lengthscale is None:
lengthscale = self.lengthscale # (dim_in, )
return (x / tf.expand_dims(lengthscale, 0)) @ tf.transpose(self.w1) # (n, dim_hidden)
def hidden_features(self, x=None, xw1=None, lengthscale=None):
if xw1 is None:
xw1 = self.compute_xw1(x, lengthscale=lengthscale)
if lengthscale is None:
lengthscale = self.lengthscale
return self.act(xw1 + tf.reshape(self.b1, (1,-1))) # (n, dim_hidden)
def forward(self, w2, x=None, xw1=None, lengthscale=None, h=None):
if h is None:
if xw1 is None:
xw1 = self.compute_xw1(x, lengthscale=lengthscale)
if lengthscale is None:
lengthscale = self.lengthscale
h = self.hidden_features(x, xw1, lengthscale)
return h@tf.reshape(w2,(-1,1))
def jacobian_hidden_features(self, x=None, xw1=None, lengthscale=None):
if xw1 is None:
xw1 = self.compute_xw1(x, lengthscale=None)
if lengthscale is None:
lengthscale = self.lengthscale
return -sqrt(2/self.dim_hidden) * tf.expand_dims(self.w1 / tf.expand_dims(lengthscale, 0), 0) * tf.expand_dims(tf.math.sin(xw1 + tf.reshape(self.b1, (1,-1))), -1) # analytical jacobian
def grad_norm(self, x=None, xw1=None, lengthscale=None):
J = self.jacobian_hidden_features(x=x, xw1=xw1, lengthscale=lengthscale)
Ax_d = [1/J.shape[0]*tf.transpose(J[:,:,d])@J[:,:,d] for d in range(self.dim_in)]
return Ax_d # list of length D, each element is K x K
def make_log_marginal_likelihood(self):
@tf.function
def log_marginal_likelihood(x, y, lengthscale, prior_w2_sig2, scale_global):
# precompute
N = x.shape[0]
K = self.dim_hidden
yy = tf.transpose(y) @ y
h = self.hidden_features(x=x, xw1=None, lengthscale=lengthscale)
# gradients
Ax_d = self.grad_norm(x, lengthscale=lengthscale) # list of length D, each element is K x K
Ax_d = tf.stack(Ax_d) * tf.reshape(scale_global,(-1,1,1)) # (D, K, K)
Ax = tf.reduce_sum(tf.stack(Ax_d),0) # sum over input dimension
# inverse of prior covariance of w2
if self.prior_w2_sig2.shape[0]==1:
prior_cov_inv = tf.eye(self.dim_hidden)/prior_w2_sig2
else:
prior_cov_inv = tf.linalg.diag(1/prior_w2_sig2)
prior_cov_inv += Ax
#prior_cov = tf.linalg.inv(prior_cov_inv) # not sure if there's a way around this...
# cholesky stuff
A = prior_cov_inv + tf.transpose(h)@h/self.noise_sig2
L = tf.linalg.cholesky(A)
alpha = tf.linalg.solve(L, tf.transpose(h)@y)
R = tf.linalg.cholesky(prior_cov_inv)
# log marginal likelihood
log_prob = \
-N/2*tf.math.log(2*np.pi*self.noise_sig2) \
-1/(2*self.noise_sig2)*yy \
+tf.reduce_sum(tf.math.log(tf.linalg.diag_part(R))) \
-tf.reduce_sum(tf.math.log(tf.linalg.diag_part(L))) \
+1/(2*self.noise_sig2**2) * tf.transpose(alpha)@alpha
return log_prob
return log_marginal_likelihood
def train_log_marginal_likelihood(self, x, y, n_epochs, learning_rate=0.001, clipvalue=100, batch_size=None, opt_lengthscale=True, opt_prior_w2_sig2=True, opt_scale_global=True):
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
# initialize
lengthscale_raw = tf.Variable(tfp.math.softplus_inverse(tf.cast(tf.convert_to_tensor(self.lengthscale), tf.float32)), name='lengthscale') # note: _raw is untransformed by softplus
prior_w2_sig2_raw = tf.Variable(tfp.math.softplus_inverse(tf.cast(tf.convert_to_tensor(self.prior_w2_sig2), tf.float32)), name='prior_w2_sig2')
scale_global_raw = tf.Variable(tfp.math.softplus_inverse(tf.cast(tf.convert_to_tensor(self.scale_global), tf.float32)), name='scale_global')
# initialize
hyperparam_hist = {}
hyperparam_hist['lengthscale'] = [tf.math.softplus(lengthscale_raw).numpy()]
hyperparam_hist['prior_w2_sig2'] = [tf.math.softplus(prior_w2_sig2_raw).numpy()]
hyperparam_hist['scale_global'] = [tf.math.softplus(scale_global_raw).numpy()]
hyperparam_hist['loss'] = [None]
# batch dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
if batch_size is None:
batch_size = x.shape[0]# full batch
train_dataset = train_dataset.batch(batch_size)
# set up optimization
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
log_marginal_likelihood = self.make_log_marginal_likelihood()
log_marginal_likelihood_raw = lambda x, y, lengthscale_raw, prior_w2_sig2_raw, scale_global_raw: log_marginal_likelihood(x, y, tf.math.softplus(lengthscale_raw), tf.math.softplus(prior_w2_sig2_raw), tf.math.softplus(scale_global_raw)) # takes raw values
# decide which hyperparameters to optimize
var_list = []
lr_mult = []
if opt_lengthscale:
var_list += [lengthscale_raw]
lr_mult += [1.0]
if opt_prior_w2_sig2:
var_list += [prior_w2_sig2_raw]
lr_mult += [1.0]
if opt_scale_global:
var_list += [scale_global_raw]
lr_mult += [100.0]
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
loss = -log_marginal_likelihood_raw(x, y, lengthscale_raw, prior_w2_sig2_raw, scale_global_raw)
grads = tape.gradient(loss, var_list)
grads = [g*m for g, m in zip(grads,lr_mult)]
#grads = [tf.clip_by_norm(g, clipvalue) for g in grads] # gradient clipping
opt.apply_gradients(zip(grads, var_list))
return loss
# optimize
for epoch in range(n_epochs):
for xbatch, ybatch in train_dataset:
loss = train_step(xbatch, ybatch)
hyperparam_hist['lengthscale'].append(tf.math.softplus(lengthscale_raw).numpy())
hyperparam_hist['prior_w2_sig2'].append(tf.math.softplus(prior_w2_sig2_raw).numpy())
hyperparam_hist['scale_global'].append(tf.math.softplus(scale_global_raw).numpy())
hyperparam_hist['loss'].append(loss.numpy().item())
# unpack
self.lengthscale = tf.math.softplus(lengthscale_raw)
self.prior_w2_sig2 = tf.math.softplus(prior_w2_sig2_raw)
self.scale_global = tf.math.softplus(scale_global_raw)
return hyperparam_hist
def fit(self, x, y):
'''
Computes conjugate posterior
'''
x = tf.cast(tf.convert_to_tensor(x), tf.float32)
y = tf.cast(tf.convert_to_tensor(y), tf.float32)
h = self.hidden_features(x, lengthscale=self.lengthscale)
Ax_d = self.grad_norm(x, lengthscale=self.lengthscale) # (N, K, D)
#Ax_d = Ax_d * tf.expand_dims(self.scale_global,(0,1)) # multiply by scale (N, K, D)
Ax_d = [s*A for s,A in zip(self.scale_global, Ax_d)] # multiply by scale (N, K, D)
Ax = tf.reduce_sum(tf.stack(Ax_d),0) # sum over input dimension
if self.prior_w2_sig2.shape[0]==1:
prior_sig2inv_mat = tf.eye(self.dim_hidden)/self.prior_w2_sig2
else:
prior_sig2inv_mat = tf.linalg.diag(self.prior_w2_sig2)
prior_sig2inv_mat += Ax # prior includes gradient penalty
sig2 = tf.linalg.inv(prior_sig2inv_mat + tf.transpose(h)@(h)/self.noise_sig2) # Should replace with cholesky
mu = sig2 @ tf.transpose(h)@y/self.noise_sig2
return mu, sig2
class RffHs(nn.Module):
"""
RFF model with horseshoe
Currently only single layer supported
"""
def __init__(self,
dim_in, \
dim_out, \
dim_hidden=50, \
infer_noise=False, sig2_inv=None, sig2_inv_alpha_prior=None, sig2_inv_beta_prior=None, \
linear_term=False, linear_dim_in=None,
layer_in_name='RffVarSelectLogitNormalLayer',
**kwargs):
super(RffHs, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.infer_noise=infer_noise
self.linear_term=linear_term
self.linear_dim_in=linear_dim_in
# noise
if self.infer_noise:
self.sig2_inv_alpha_prior=torch.tensor(sig2_inv_alpha_prior)
self.sig2_inv_beta_prior=torch.tensor(sig2_inv_beta_prior)
self.sig2_inv = None
self.register_buffer('sig2_inv_alpha', torch.empty(1, requires_grad=False)) # For now each output gets same noise
self.register_buffer('sig2_inv_beta', torch.empty(1, requires_grad=False))
else:
self.sig2_inv_alpha_prior=None
self.sig2_inv_beta_prior=None
self.register_buffer('sig2_inv', torch.tensor(sig2_inv).clone().detach())
# layers
#self.layer_in = layers.RffHsLayer2(self.dim_in, self.dim_hidden, **kwargs)
#self.layer_in = layers.RffLogitNormalLayer(self.dim_in, self.dim_hidden, **kwargs)
self.layer_in = layers.get_layer(layer_in_name)(self.dim_in, self.dim_hidden, **kwargs)
self.layer_out = layers.LinearLayer(self.dim_hidden, sig2_y=1/sig2_inv, **kwargs)
def forward(self, x, x_linear=None, weights_type_layer_in='sample_post', weights_type_layer_out='sample_post', n_samp_layer_in=None):
'''
n_samp is number of samples from variational distribution (first layer)
'''
# network
h = self.layer_in(x, weights_type=weights_type_layer_in, n_samp=n_samp_layer_in)
y = self.layer_out(h, weights_type=weights_type_layer_out)
# add linear term if specified
if self.linear_term and x_linear is not None:
return y + self.blm(x_linear, sample=sample)
else:
return y
def sample_posterior_predictive(self, x_test, x_train, y_train):
'''
Need training data in order to get sample from non-variational full conditional distribution (output layer)
Code duplicates some of forward, not ideal
'''
# 1: sample from variational distribution
self.layer_in.sample_variational(store=True)
# 2: forward pass of training data with sample from 1
h = self.layer_in(x_train, weights_type='stored')
# 3: sample output weights from conjugate (depends on ouput from 2)
self.layer_out.fixed_point_updates(h, y_train) # conjugate update of output weights
self.layer_out.sample_weights(store=True)
# 4: forward pass of test data using samples from 1 and 3
return self.forward(x_test, weights_type_layer_in='stored', weights_type_layer_out='stored')
def kl_divergence(self):
return self.layer_in.kl_divergence()
def log_prob(self, y_observed, y_pred):
'''
y_observed: (n_obs, dim_out)
y_pred: (n_obs, n_pred, dim_out)
averages over n_pred (e.g. could represent different samples), sums over n_obs
'''
lik = Normal(y_pred, torch.sqrt(1/self.sig2_inv))
return lik.log_prob(y_observed.unsqueeze(1)).mean(1).sum(0)
def loss_original(self, x, y, x_linear=None, temperature=1, n_samp=1):
'''negative elbo'''
y_pred = self.forward(x, x_linear, weights_type_layer_in='sample_post', weights_type_layer_out='stored', n_samp_layer_in=n_samp)
kl_divergence = self.kl_divergence()
#kl_divergence = 0
log_prob = self.log_prob(y, y_pred)
#log_prob = 0
return -log_prob + temperature*kl_divergence
def loss(self, x, y, x_linear=None, temperature=1, n_samp=1):
'''
Uses sample of weights from full conditional *based on samples of s* to compute likelihood
'''
kl_divergence = self.kl_divergence()
#breakpoint()
# 1: sample from variational distribution
self.layer_in.sample_variational(store=True)
# 2: forward pass of training data with sample from 1
h = self.layer_in(x, weights_type='stored')
# 3: sample output weights from conjugate (depends on ouput from 2)
self.layer_out.fixed_point_updates(h, y) # conjugate update of output weights
self.layer_out.sample_weights(store=True)
# 4: forward pass of test data using samples from 1 and 3
y_pred = self.forward(x, weights_type_layer_in='stored', weights_type_layer_out='stored', n_samp_layer_in=1)
log_prob = self.log_prob(y, y_pred)
return -log_prob + temperature*kl_divergence
def fixed_point_updates(self, x, y, x_linear=None, temperature=1):
self.layer_in.fixed_point_updates() # update horseshoe aux variables
#### COMMENTING OUT OUTPUT LAYER UPDATES SINCE NOW PART OF LOSS FUNCTION ####
"""
h = self.layer_in(x, weights_type='sample_post') # hidden units based on sample from variational dist
self.layer_out.fixed_point_updates(h, y) # conjugate update of output weights
self.layer_out.sample_weights(store=True) # sample output weights from full conditional
"""
####
if self.linear_term:
if self.infer_noise:
self.blm.sig2_inv = self.sig2_inv_alpha/self.sig2_inv_beta # Shouldnt this be a samplle?
self.blm.fixed_point_updates(y - self.forward(x, x_linear=None, sample=True)) # Subtract off just the bnn
if self.infer_noise and temperature > 0:
sample_y_bnn = self.forward(x, x_linear=None, sample=True) # Sample
if self.linear_term:
E_y_linear = F.linear(x_linear, self.blm.beta_mu)
SSR = torch.sum((y-sample_y_bnn-E_y_linear)**2) + torch.sum(self.blm.xx_inv * self.blm.beta_sig2).sum()
else:
SSR = torch.sum((y - sample_y_bnn)**2)
self.sig2_inv_alpha = self.sig2_inv_alpha_prior + temperature*0.5*x.shape[0] # Can be precomputed
self.sig2_inv_beta = self.sig2_inv_beta_prior + temperature*0.5*SSR
def init_parameters(self, seed=None):
if seed is not None:
torch.manual_seed(seed)
self.layer_in.init_parameters()
self.layer_out.init_parameters()
if self.infer_noise:
self.sig2_inv_alpha = self.sig2_inv_alpha_prior
self.sig2_inv_beta = self.sig2_inv_beta_prior
if self.linear_term:
self.blm.init_parameters()
def reinit_parameters(self, x, y, n_reinit=1):
seeds = torch.zeros(n_reinit).long().random_(0, 1000)
losses = torch.zeros(n_reinit)
for i in range(n_reinit):
self.init_parameters(seeds[i])
losses[i] = self.loss(x, y)
self.init_parameters(seeds[torch.argmin(losses).item()])
def precompute(self, x=None, x_linear=None):
# Needs to be run before training
if self.linear_term:
self.blm.precompute(x_linear)
def get_n_parameters(self):
n_param=0
for p in self.parameters():
n_param+=np.prod(p.shape)
return n_param
def print_state(self, x, y, epoch=0, n_epochs=0):
'''
prints things like training loss, test loss, etc
'''
print('Epoch[{}/{}], kl: {:.6f}, likelihood: {:.6f}, elbo: {:.6f}'\
.format(epoch, n_epochs, self.kl_divergence().item(), -self.loss(x,y,temperature=0).item(), -self.loss(x,y).item()))
class RffBeta(nn.Module):
"""
RFF model beta prior on indicators
Currently only single layer supported
"""
def __init__(self,
dim_in, \
dim_out, \
dim_hidden=50, \
infer_noise=False, sig2_inv=None, sig2_inv_alpha_prior=None, sig2_inv_beta_prior=None, \
linear_term=False, linear_dim_in=None,
**kwargs):
super(RffBeta, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.infer_noise=infer_noise
self.linear_term=linear_term
self.linear_dim_in=linear_dim_in
# noise
if self.infer_noise:
self.sig2_inv_alpha_prior=torch.tensor(sig2_inv_alpha_prior)
self.sig2_inv_beta_prior=torch.tensor(sig2_inv_beta_prior)
self.sig2_inv = None
self.register_buffer('sig2_inv_alpha', torch.empty(1, requires_grad=False)) # For now each output gets same noise
self.register_buffer('sig2_inv_beta', torch.empty(1, requires_grad=False))
else:
self.sig2_inv_alpha_prior=None
self.sig2_inv_beta_prior=None
self.register_buffer('sig2_inv', torch.tensor(sig2_inv).clone().detach())
# layers
self.layer_in = layers.RffBetaLayer(self.dim_in, self.dim_hidden, **kwargs)
self.layer_out = layers.LinearLayer(self.dim_hidden, sig2_y=1/sig2_inv, **kwargs)
def forward(self, x, x_linear=None, weights_type_layer_in='sample_post', weights_type_layer_out='sample_post'):
# network
h = self.layer_in(x, weights_type=weights_type_layer_in)
y = self.layer_out(h, weights_type=weights_type_layer_out)
# add linear term if specified
if self.linear_term and x_linear is not None:
return y + self.blm(x_linear, sample=sample)
else:
return y
def kl_divergence(self):
return self.layer_in.kl_divergence()
def compute_loss_gradients(self, x, y, x_linear=None, temperature=1.):
# sample from variational dist
self.layer_in.sample_variational(store=True)
# compute log likelihood
y_pred = self.forward(x, x_linear, weights_type_layer_in='stored', weights_type_layer_out='stored')
log_lik = -self.neg_log_prob(y, y_pred)
# gradients of score function
for p in self.layer_in.parameters():
if p.grad is not None:
p.grad.zero_()
log_q = self.layer_in.log_prob_variational()
log_q.backward()
self.layer_in.s_a_trans_grad_q = self.layer_in.s_a_trans.grad.clone()
self.layer_in.s_b_trans_grad_q = self.layer_in.s_b_trans.grad.clone()
# gradients of kl
for p in self.layer_in.parameters(): p.grad.zero_()
kl = self.kl_divergence()
kl.backward()
self.layer_in.s_a_trans_grad_kl = self.layer_in.s_a_trans.grad.clone()
self.layer_in.s_b_trans_grad_kl = self.layer_in.s_b_trans.grad.clone()
# gradients of loss=-elbo
with torch.no_grad():
self.layer_in.s_a_trans.grad = -log_lik*self.layer_in.s_a_trans_grad_q + temperature*self.layer_in.s_a_trans_grad_kl
self.layer_in.s_b_trans.grad = -log_lik*self.layer_in.s_b_trans_grad_q + temperature*self.layer_in.s_b_trans_grad_kl
def loss(self, x, y, x_linear=None, temperature=1):
'''negative elbo
NON DIFFERENTIABLE BECAUSE OF SCORE METHOD
'''
y_pred = self.forward(x, x_linear, weights_type_layer_in='sample_post', weights_type_layer_out='stored')
kl_divergence = self.kl_divergence()
#kl_divergence = 0
neg_log_prob = self.neg_log_prob(y, y_pred)
#neg_log_prob = 0
return neg_log_prob + temperature*kl_divergence
def neg_log_prob(self, y_observed, y_pred):
N = y_observed.shape[0]
if self.infer_noise:
sig2_inv = self.sig2_inv_alpha/self.sig2_inv_beta # Is this right? i.e. IG vs G
else:
sig2_inv = self.sig2_inv
log_prob = -0.5 * N * math.log(2 * math.pi) + 0.5 * N * torch.log(sig2_inv) - 0.5 * torch.sum((y_observed - y_pred)**2) * sig2_inv
return -log_prob
def fixed_point_updates(self, x, y, x_linear=None, temperature=1):
h = self.layer_in(x, weights_type='sample_post') # hidden units based on sample from variational dist
self.layer_out.fixed_point_updates(h, y) # conjugate update of output weights
self.layer_out.sample_weights(store=True) # sample output weights from full conditional
if self.linear_term:
if self.infer_noise:
self.blm.sig2_inv = self.sig2_inv_alpha/self.sig2_inv_beta # Shouldnt this be a samplle?
self.blm.fixed_point_updates(y - self.forward(x, x_linear=None, sample=True)) # Subtract off just the bnn
if self.infer_noise and temperature > 0:
sample_y_bnn = self.forward(x, x_linear=None, sample=True) # Sample
if self.linear_term:
E_y_linear = F.linear(x_linear, self.blm.beta_mu)
SSR = torch.sum((y-sample_y_bnn-E_y_linear)**2) + torch.sum(self.blm.xx_inv * self.blm.beta_sig2).sum()
else:
SSR = torch.sum((y - sample_y_bnn)**2)
self.sig2_inv_alpha = self.sig2_inv_alpha_prior + temperature*0.5*x.shape[0] # Can be precomputed
self.sig2_inv_beta = self.sig2_inv_beta_prior + temperature*0.5*SSR
def init_parameters(self, seed=None):
if seed is not None:
torch.manual_seed(seed)
self.layer_in.init_parameters()
self.layer_out.init_parameters()
if self.infer_noise:
self.sig2_inv_alpha = self.sig2_inv_alpha_prior
self.sig2_inv_beta = self.sig2_inv_beta_prior
if self.linear_term:
self.blm.init_parameters()
def reinit_parameters(self, x, y, n_reinit=1):
seeds = torch.zeros(n_reinit).long().random_(0, 1000)
losses = torch.zeros(n_reinit)
for i in range(n_reinit):
self.init_parameters(seeds[i])
losses[i] = self.loss(x, y)
self.init_parameters(seeds[torch.argmin(losses).item()])
def precompute(self, x=None, x_linear=None):
# Needs to be run before training
if self.linear_term:
self.blm.precompute(x_linear)
def get_n_parameters(self):
n_param=0
for p in self.parameters():
n_param+=np.prod(p.shape)
return n_param
def print_state(self, x, y, epoch=0, n_epochs=0):
'''
prints things like training loss, test loss, etc
'''
print('Epoch[{}/{}], kl: {:.6f}, likelihood: {:.6f}, elbo: {:.6f}'\
.format(epoch, n_epochs, self.kl_divergence().item(), -self.loss(x,y,temperature=0).item(), -self.loss(x,y).item()))
def train(model, optimizer, x, y, n_epochs, x_linear=None, n_warmup = 0, n_rep_opt=10, print_freq=None, frac_start_save=1, frac_lookback=0.5, path_checkpoint='./'):
'''
frac_lookback will only result in reloading early stopped model if frac_lookback < 1 - frac_start_save
'''
loss = torch.zeros(n_epochs)
loss_best = torch.tensor(float('inf'))
loss_best_saved = torch.tensor(float('inf'))
saved_model = False
model.precompute(x, x_linear)
for epoch in range(n_epochs):
# TEMPERATURE HARDECODED, NEED TO FIX
#temperature_kl = 0. if epoch < n_epochs/2 else 1.0
#temperature_kl = epoch / (n_epochs/2) if epoch < n_epochs/2 else 1.0
temperature_kl = epoch / (n_epochs/10) if epoch < n_epochs/10 else 1.0
#temperature_kl = 0. # SET TO ZERO TO IGNORE KL
for i in range(n_rep_opt):
l = model.loss(x, y, x_linear=x_linear, temperature=temperature_kl)
# backward
optimizer.zero_grad()
l.backward(retain_graph=True)
optimizer.step()
##
#print('------------- %d -------------' % epoch)
#print('s :', model.layer_in.s_loc.data)
#print('grad :', model.layer_in.s_loc.grad)
#model.layer_in.s_loc.grad.zero_()
#kl = model.layer_in.kl_divergence()
#kl.backward()
#if epoch > 500:
# breakpoint()
#print('grad kl:', model.layer_in.s_loc.grad)
##
loss[epoch] = l.item()
with torch.no_grad():
model.fixed_point_updates(x, y, x_linear=x_linear, temperature=1)
# print state
if print_freq is not None:
if (epoch + 1) % print_freq == 0:
model.print_state(x, y, epoch+1, n_epochs)
# see if improvement made (only used if KL isn't tempered)
if loss[epoch] < loss_best and temperature_kl==1.0:
loss_best = loss[epoch]
# save model
if epoch > frac_start_save*n_epochs and loss[epoch] < loss_best_saved:
print('saving mode at epoch = %d' % epoch)
saved_model = True
loss_best_saved = loss[epoch]
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss[epoch],
}, os.path.join(path_checkpoint, 'checkpoint.tar'))
# end training if no improvement made in a while and more than half way done
epoch_lookback = np.maximum(1, int(epoch - .25*n_epochs)) # lookback is 25% of samples by default
if epoch_lookback > frac_start_save*n_epochs+1:
loss_best_lookback = torch.min(loss[epoch_lookback:epoch+1])
percent_improvement = (loss_best - loss_best_lookback)/torch.abs(loss_best) # positive is better
if percent_improvement < 0.0:
print('stopping early at epoch = %d' % epoch)
break
# reload best model if saving
if saved_model:
checkpoint = torch.load(os.path.join(path_checkpoint, 'checkpoint.tar'))
model.load_state_dict(checkpoint['model_state_dict'])
print('reloading best model from epoch = %d' % checkpoint['epoch'])
model.eval()
return loss[:epoch]
def train_score(model, optimizer, x, y, n_epochs, x_linear=None, n_warmup = 0, n_rep_opt=10, print_freq=None, frac_start_save=1):
loss = torch.zeros(n_epochs)
loss_best = 1e9 # Need better way of initializing to make sure it's big enough
model.precompute(x, x_linear)
for epoch in range(n_epochs):
# TEMPERATURE HARDECODED, NEED TO FIX
#temperature_kl = 0. if epoch < n_epochs/2 else 1
#temperature_kl = epoch / (n_epochs/2) if epoch < n_epochs/2 else 1
temperature_kl = 0. # SET TO ZERO TO IGNORE KL
for i in range(n_rep_opt):
optimizer.zero_grad()
model.compute_loss_gradients(x, y, x_linear=x_linear, temperature=temperature_kl)
# backward
torch.nn.utils.clip_grad_norm_(model.parameters(), 100)
optimizer.step()
with torch.no_grad():
model.fixed_point_updates(x, y, x_linear=x_linear, temperature=1)
if epoch > frac_start_save*n_epochs and loss[epoch] < loss_best:
print('saving...')
loss_best = loss[epoch]
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss[epoch],
}, 'checkpoint.tar')
if print_freq is not None:
if (epoch + 1) % print_freq == 0:
model.print_state(x, y, epoch+1, n_epochs)
return loss
|
import pygame
import numpy as np
import time
import random
# Generate some test data
data = np.arange(200).reshape((4,5,10))
print(data)
# Write the array to disk
# outfile = open('test.txt', 'w')
# I'm writing a header here just for the sake of readability
# # Any line starting with "#" will be ignored by numpy.loadtxt
# outfile.write('# Array shape: {0}\n'.format(data.shape))
# # Iterating through a ndimensional array produces slices along
# # the last axis. This is equivalent to data[i,:,:] in this case
# for data_slice in data:
# # The formatting string indicates that I'm writing out
# # the values in left-justified columns 7 characters in width
# # with 2 decimal places.
# np.savetxt(outfile, data_slice, fmt='%-7.2f')
# # Writing out a break to indicate different slices...
# outfile.write('# New slice\n')
new_data = np.loadtxt('test.txt')
new_data = new_data.reshape((4,5,10))
print(new_data)
|
import pytest
import numpy as np
import os
from collections import namedtuple
from ProcessOptimizer import dummy_minimize
from ProcessOptimizer import gp_minimize
from ProcessOptimizer.benchmarks import bench1
from ProcessOptimizer.benchmarks import bench3
from ProcessOptimizer.callbacks import TimerCallback
from ProcessOptimizer.callbacks import DeltaYStopper
from ProcessOptimizer.callbacks import DeadlineStopper
from ProcessOptimizer.callbacks import CheckpointSaver
from ProcessOptimizer.utils import load
@pytest.mark.fast_test
def test_timer_callback():
callback = TimerCallback()
dummy_minimize(bench1, [(-1.0, 1.0)], callback=callback, n_calls=10)
assert len(callback.iter_time) <= 10
assert 0.0 <= sum(callback.iter_time)
@pytest.mark.fast_test
def test_deltay_stopper():
deltay = DeltaYStopper(0.2, 3)
Result = namedtuple('Result', ['func_vals'])
assert deltay(Result([0, 1, 2, 3, 4, 0.1, 0.19]))
assert not deltay(Result([0, 1, 2, 3, 4, 0.1]))
assert deltay(Result([0, 1])) is None
@pytest.mark.fast_test
def test_deadline_stopper():
deadline = DeadlineStopper(0.0001)
gp_minimize(bench3,
[(-1.0, 1.0)],
n_random_starts=1,
callback=deadline,
n_calls=10,
random_state=1)
assert len(deadline.iter_time) == 1
assert np.sum(deadline.iter_time) > deadline.total_time
deadline = DeadlineStopper(60)
gp_minimize(bench3,
[(-1.0, 1.0)],
n_random_starts=1,
callback=deadline,
n_calls=10,
random_state=1)
assert len(deadline.iter_time) == 10
assert np.sum(deadline.iter_time) < deadline.total_time
@pytest.mark.fast_test
def test_checkpoint_saver():
checkpoint_path = "./test_checkpoint.pkl"
if os.path.isfile(checkpoint_path):
os.remove(checkpoint_path)
checkpoint_saver = CheckpointSaver(checkpoint_path, compress=9)
result = dummy_minimize(bench1,
[(-1.0, 1.0)],
callback=checkpoint_saver,
n_calls=10)
assert os.path.exists(checkpoint_path)
assert load(checkpoint_path).x == result.x
if os.path.isfile(checkpoint_path):
os.remove(checkpoint_path)
|
from .models import CreatedDocument, CreatedVersionedDocument, SplashMetadata, VersionedSplashMetadata
|
a = raw_input()
a = list(a)
for j in range(1,len(a)):
key = a[j]
i = j-1
while i>=0 and a[i]>key:
a[i + 1]=a[i]
i-=1
a[i+1] = key
print(a)
|
/usr/lib64/python3.6/genericpath.py
|
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(name='check_workspace',
version='0.1',
description='utilities for plotting in ROOT',
url='https://github.com/xju2/check_workspace',
long_description=readme,
author='Xiangyang Ju',
author_email='xiangyang.ju@gmail.com',
license=license,
packages=['check_workspace'],
zip_safe=False
)
|
def goodbye():
print("Thank you for visiting") #not working right now, will fix later
quit()
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AddAnalyticsClusterDetails(object):
"""
DEPRECATED -- please use HeatWave API instead.
Details required to add an Analytics Cluster.
"""
def __init__(self, **kwargs):
"""
Initializes a new AddAnalyticsClusterDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param shape_name:
The value to assign to the shape_name property of this AddAnalyticsClusterDetails.
:type shape_name: str
:param cluster_size:
The value to assign to the cluster_size property of this AddAnalyticsClusterDetails.
:type cluster_size: int
"""
self.swagger_types = {
'shape_name': 'str',
'cluster_size': 'int'
}
self.attribute_map = {
'shape_name': 'shapeName',
'cluster_size': 'clusterSize'
}
self._shape_name = None
self._cluster_size = None
@property
def shape_name(self):
"""
**[Required]** Gets the shape_name of this AddAnalyticsClusterDetails.
The shape determines resources to allocate to the Analytics
Cluster nodes - CPU cores, memory.
:return: The shape_name of this AddAnalyticsClusterDetails.
:rtype: str
"""
return self._shape_name
@shape_name.setter
def shape_name(self, shape_name):
"""
Sets the shape_name of this AddAnalyticsClusterDetails.
The shape determines resources to allocate to the Analytics
Cluster nodes - CPU cores, memory.
:param shape_name: The shape_name of this AddAnalyticsClusterDetails.
:type: str
"""
self._shape_name = shape_name
@property
def cluster_size(self):
"""
**[Required]** Gets the cluster_size of this AddAnalyticsClusterDetails.
The number of analytics-processing nodes provisioned for the
Analytics Cluster.
:return: The cluster_size of this AddAnalyticsClusterDetails.
:rtype: int
"""
return self._cluster_size
@cluster_size.setter
def cluster_size(self, cluster_size):
"""
Sets the cluster_size of this AddAnalyticsClusterDetails.
The number of analytics-processing nodes provisioned for the
Analytics Cluster.
:param cluster_size: The cluster_size of this AddAnalyticsClusterDetails.
:type: int
"""
self._cluster_size = cluster_size
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
"""
Open implementation of MonoLoco / MonoLoco++ / MonStereo
"""
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
# guess the number
import random
def parse_int_from_input(input_question):
while True:
try:
return int(input(input_question))
except ValueError:
print("Pick A Valid Integer")
def play_game(max_number):
random_number = random.randint(0, max_number)
guess = parse_int_from_input(f"Guess A Number Between 0 and {max_number}: ")
count = 1
while True:
if guess > random_number:
guess = parse_int_from_input("Too High! Guess Again: ")
count += 1
elif guess < random_number:
guess = parse_int_from_input("Too Low! Guess Again: ")
count += 1
else:
print(f"You Got It In {count} Guesses!")
return count
def main():
guesses = []
play_again = True
while play_again:
max_number = parse_int_from_input("Pick a max number: ")
guesses.append(play_game(max_number))
play_again_input = input("Do you want to play again (y/n)? ").lower()
while play_again_input not in ("y", "n"):
play_again_input = input("Invalid Answer, Please Type 'y' or 'n'? ").lower()
if play_again_input == "n":
print(f"The Max Number Of Guesses Was: {max(guesses)}")
print(f"The Min Number Of Guesses Was: {min(guesses)}")
print(f"The Average Number Of Guesses Was: {average(guesses)}")
break
else:
continue
if __name__ == "__main__":
main()
|
# coding=utf-8
# TestBenchmarkSwiftDictionary.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.lldbbench import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
class TestBenchmarkSwiftDictionary(BenchBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.benchmarks_test
def test_run_command(self):
"""Benchmark the Swift dictionary data formatter"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark the Swift dictionary data formatter"""
self.runCmd("file " + getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
sw = Stopwatch()
sw.start()
self.expect('frame variable -A dict', substrs=['[300]', '300'])
sw.stop()
print("time to print: %s" % (sw))
|
import random
import string
import os
import sys
import requests
import httpx
import time
import colorama
from colorama import *
os.system("title Discord Account Creator - Kings Cheats")
os.system("CLS")
global captchakey
f = open("2cap.txt", "r")
captchakey = f.read()
init()
def useragent():
file = open('useragent.txt','r')
useragent = (random.choice(list(file)))
useragent2 = []
useragent2.append(useragent)
useragent1 = []
for element in useragent2:
useragent1.append(element.strip())
finaluseragent = ''.join(str(e) for e in useragent1)
return finaluseragent
def name():
names = []
name = []
with open('names.txt','r') as username:
for line in username:
names.append(line)
for element in names:
name.append(element.strip())
return random.choice(name)
def proxy():
proxies = []
proxy1 = []
with open('proxies.txt','r') as proxi:
for line in proxi:
proxies.append(line)
for element in proxies:
proxy1.append(element.strip())
return random.choice(proxy1)
def dob():
dateofbirth = str(random.randint(1990, 2002))+'-'+'{:02d}'.format(random.randint(1,12))+'-'+'{:02d}'.format(random.randint(1,28))
return dateofbirth
def password():
chars = string.ascii_letters + string.digits
upcase = random.choice(string.ascii_uppercase)
password1 = upcase + ''.join(random.choice(chars)for x in range(11))
return password1
def main(amout):
for x in range(amout):
username = name()
passwod = password()
userage = useragent()
cookiemonster = httpx.get('https://discord.com/register').headers['set-cookie']
sep = cookiemonster.split(";")
sx = sep[0]
sx2 = sx.split("=")
dfc = sx2[1]
split = sep[6]
split2 = split.split(",")
split3 = split2[1]
split4 = split3.split("=")
sdc = split4[1]
print(f'{Fore.GREEN}Got Cookie!')
while __name__ == '__main__':
fingerprints = httpx.get("https://discord.com/api/v9/experiments",timeout=10)
if fingerprints.text == '':
pass
else:
realfingerprint = fingerprints.json()['fingerprint']
print(f'{Fore.GREEN}Got Fingerprints!')
break
header = {
"Host": "discord.com",
"Connection": "keep-alive",
"sec-ch-ua": '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
"X-Super-Properties": "eyJvcyI6Ik1hYyBPUyBYIiwiYnJvd3NlciI6IkNocm9tZSIsImRldmljZSI6IiIsInN5c3RlbV9sb2NhbGUiOiJlbi1VUyIsImJyb3dzZXJfdXNlcl9hZ2VudCI6Ik1vemlsbGEvNS4wIChNYWNpbnRvc2g7IEludGVsIE1hYyBPUyBYIDEwXzE1XzcpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS85Mi4wLjQ1MTUuMTMxIFNhZmFyaS81MzcuMzYiLCJicm93c2VyX3ZlcnNpb24iOiI5Mi4wLjQ1MTUuMTMxIiwib3NfdmVyc2lvbiI6IjEwLjE1LjciLCJyZWZlcnJlciI6IiIsInJlZmVycmluZ19kb21haW4iOiIiLCJyZWZlcnJlcl9jdXJyZW50IjoiIiwicmVmZXJyaW5nX2RvbWFpbl9jdXJyZW50IjoiIiwicmVsZWFzZV9jaGFubmVsIjoic3RhYmxlIiwiY2xpZW50X2J1aWxkX251bWJlciI6OTI3OTIsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9",
"X-Fingerprint": realfingerprint,
"Accept-Language": "en-US",
"sec-ch-ua-mobile": "?0",
"User-Agent": userage,
"Content-Type": "application/json",
"Authorization": "undefined",
"Accept": "*/*",
"Origin": "https://discord.com",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://discord.com/register",
"X-Debug-Options": "bugReporterEnabled",
"Accept-Encoding": "gzip, deflate, br",
"Cookie": f"__dcfduid={dfc}; __sdcfduid={sdc}"
}
print(f'{Fore.GREEN}Got User-Agent!')
captchadata = {
'User-Agent':userage
}
print(f'{Fore.GREEN}Solving Captcha...')
captchaurl = f'http://2captcha.com/in.php?key={captchakey}&method=hcaptcha&sitekey=f5561ba9-8f1e-40ca-9b5b-a0b3f719ef34&pageurl=http://discord.com/register'
captcharequests = requests.post(captchaurl, data=captchadata, json=1)
captchares = (captcharequests.text)
x = captchares.split('|')
id = x[1]
time.sleep(15)
while True:
resurl = requests.get(f'http://2captcha.com/res.php?key={captchakey}&action=get&id={id}')
if resurl.text == "CAPCHA_NOT_READY":
pass
else:
break
captoken = resurl.text
final, captchatoken = captoken.split('|')
if captcharequests.text == 'ERROR_ZERO_BALANCE':
print(f'{Fore.RED}Error: You do not have any balance!')
print(f'{Fore.GREEN}Got Captcha Token!')
print(f'{Fore.GREEN}Registering Account...')
domainlist = ['@gmail.com','@yahoo.com','@protonmail.com','@hotmail.com','@icloud.com','@aol.com']
email = ''.join(random.choice(string.ascii_letters + string.digits)for x in range(12)) + random.choice(domainlist)
payload = {
'fingerprint': realfingerprint,
'email': email,
'captcha_key': captchatoken,
'consent': 'true',
'date_of_birth': dob(),
'gift_code_sku_id': 'null',
'invite': 'null',
'password': passwod,
'username': username
}
tries = 0
while tries != 5:
try:
registeraccount = httpx.post('https://discord.com/api/v9/auth/register',headers=header, json=payload, timeout=10)
if registeraccount.status_code == 201:
print(f'{Fore.GREEN}Successfully Registered Account!')
break
elif registeraccount.status_code == 200:
print(f'Successfully Registered Account!')
break
elif registeraccount.status_code == 429:
print(f'{Fore.RED}Resource is being rate limited! For {registeraccount.text["retry_after"]}')
tries += 1
time.sleep(3)
else:
print(f'{Fore.RED}Status Code: {registeraccount.status_code}')
tries += 1
time.sleep(3)
except Exception as e:
print(f'Error: {e}')
tries += 1
time.sleep(3)
if tries == 5:
print(f'{Fore.RED}Max Retries exceeded aborting connection')
return
print(f'{Fore.GREEN}Getting Token...')
token = registeraccount.json()['token']
os.system('cls' if os.name=='nt' else 'clear')
print(f'{Fore.GREEN}{username}:{email}:{passwod}:{token}')
with open('tokens.txt','a') as tokenfile:
tokenfile.write(token + '\n')
print(f'{Fore.GREEN}Token: {token}')
print(f'{Fore.GREEN}Waiting 2 minutes')
time.sleep(120)
amtoftokens = int(input('How many tokens do you want to gen?: '))
print(f'{Fore.RED}Note: Almost all tokens will be locked!')
main(amtoftokens)
|
from mpl_toolkits.mplot3d import Axes3D
import scipy.io as sio
import matplotlib.image as img
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from numpy.linalg import svd
def feature_normalize(X):
means = np.mean(X, axis=0)
X_norm = X - means
stds = np.std(X_norm, axis=0)
X_norm = X_norm / stds
return means, stds, X_norm
def compute_covariance_matrix(X):
return X.T.dot(X) / X.shape[0]
def pca(X):
covariance_matrix = compute_covariance_matrix(X)
U, S, V = svd(covariance_matrix, full_matrices=True, compute_uv=True)
return U, S
def project_data(X, U, K):
return X.dot(U[:, :K])
def recover_data(Z, U, K):
return Z.dot(U[:, :K].T)
def grid_plot(X, dim):
fig = plt.figure(figsize=(6, 6))
M, N = X.shape
gs = gridspec.GridSpec(dim, dim)
gs.update(bottom=0.01, top=0.99, left=0.01, right=0.99,
hspace=0.05, wspace=0.05)
k = 0
for i in range(dim):
for j in range(dim):
ax = plt.subplot(gs[i, j])
ax.axis('off')
ax.imshow(-X[k].reshape(int(np.sqrt(N)), int(np.sqrt(N))).T,
cmap=plt.get_cmap('Greys'), # vmin=-1, vmax=1,
interpolation='nearest') # ,alpha = 1.0)
k += 1
plt.show()
def main():
# 1
file_path = 'ex7data1.mat'
dataset = sio.loadmat(file_path)
X = dataset["X"]
# 2
# X[:, 0] - first column
plt.scatter(X[:, 0], X[:, 1], marker="o")
plt.show()
# 3
covariance_matrix = compute_covariance_matrix(X)
print(covariance_matrix)
# 4
# Feature normalize
# mu, sigma
means, stds, X_norm = feature_normalize(X)
# Run SVD
U, S = pca(X_norm)
print(U, S)
# 5
# Draw the eigenvectors centered at mean of data. These lines show the
# directions of maximum variations in the dataset.
fig, ax = plt.subplots()
ax.plot(X[:, 0], X[:, 1], 'o', mew=0.25)
for i in range(len(S)):
ax.arrow(means[0], means[1], 1.5 * S[i] * U[0, i], 1.5 * S[i] * U[1, i],
head_width=0.25, head_length=0.2, fc='k', ec='k', lw=2, zorder=1000)
ax.axis([0.5, 6.5, 2, 8])
ax.set_aspect('equal')
ax.grid(False)
plt.show()
print('Top principal component: U[:, 0] = [{:.6f} {:.6f}]'.format(U[0, 0], U[1, 0]))
print(' (you should expect to see [-0.707107 -0.707107])')
# 6
# Project the data onto K = 1 dimension
K = 1
Z = project_data(X_norm, U, K)
print('Projection of the first example: {:.6f}'.format(Z[0, 0]))
print('(this value should be about : 1.481274)')
# 7
X_rec = recover_data(Z, U, K)
print('Approximation of the first example: [{:.6f} {:.6f}]'.format(X_rec[0, 0], X_rec[0, 1]))
print(' (this value should be about [-1.047419 -1.047419])')
# 8
plt.figure(figsize=(6, 6))
plt.plot(X_norm.T[0], X_norm.T[1], 'bo', mfc='none', mec='b', ms=8, label='Original Data Points')
plt.plot(X_rec.T[0], X_rec.T[1], 'ro', mfc='none', mec='r', ms=8, label='PCA Reduced Data Points')
plt.xlabel('x1', fontsize=14)
plt.ylabel('x2', fontsize=14)
plt.legend(loc=4)
for (x, y), (x_rec, y_rec) in zip(X_norm, X_rec):
plt.plot([x, x_rec], [y, y_rec], 'k--', lw=1)
plt.xlim(-4, 3)
plt.ylim(-4, 3)
plt.show()
# 9
file_path = 'ex7faces.mat'
dataset = sio.loadmat(file_path)
X = dataset['X']
# 10
grid_plot(X, 10)
# 11
# mu, sigma
means, stds, X_norm = feature_normalize(X)
U, S = pca(X_norm)
# 12, 13
grid_plot(U.T, 6)
# 14-15
grid_plot(U.T, 10)
# 16
A = img.imread('output.jpg')
X = A.reshape(-1, 3)
# 17
# Sample 1000 random indexes (since working with all the data is
# too expensive. If you have a fast computer, you may increase this.
sel = np.random.choice(X.shape[0], size=1000)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection='3d')
idx = np.loadtxt('output.txt')
ax.scatter(X[sel, 0], X[sel, 1], X[sel, 2], cmap='rainbow', c=idx[sel], s=10)
ax.set_title('Pixel dataset plotted in 3D.\nColor shows centroid memberships')
plt.show()
# 18
mu, sigma, X_norm = feature_normalize(X)
# PCA and project the data to 2D
U, S = pca(X_norm)
Z = project_data(X_norm, U, 2)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.scatter(Z[sel, 0], Z[sel, 1], cmap='rainbow', c=idx[sel], s=32)
ax.set_title('Pixel dataset plotted in 2D, using PCA for dimensionality reduction')
ax.grid(False)
plt.show()
if __name__ == '__main__':
main()
|
"""
Extra HTML Widget classes
"""
import datetime
import re
from django.forms.widgets import Widget, Select
from django.utils import datetime_safe
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from django.utils.formats import get_format
from django.conf import settings
__all__ = ('SelectDateWidget',)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
output = []
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
output.append('year')
#if not self.first_select: self.first_select = 'year'
elif char in 'bEFMmNn':
output.append('month')
#if not self.first_select: self.first_select = 'month'
elif char in 'dj':
output.append('day')
#if not self.first_select: self.first_select = 'day'
return output
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None, required=True):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(value, input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
else:
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
choices = [(i, i) for i in self.years]
year_html = self.create_select(name, self.year_field, value, year_val, choices)
choices = MONTHS.items()
month_html = self.create_select(name, self.month_field, value, month_val, choices)
choices = [(i, i) for i in range(1, 32)]
day_html = self.create_select(name, self.day_field, value, day_val, choices)
output = []
for field in _parse_date_fmt():
if field == 'year':
output.append(year_html)
elif field == 'month':
output.append(month_html)
elif field == 'day':
output.append(day_html)
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
first_select = None
field_list = _parse_date_fmt()
if field_list:
first_select = field_list[0]
if first_select is not None:
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
def create_select(self, name, field, value, val, choices):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not (self.required and val):
choices.insert(0, self.none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
def _has_changed(self, initial, data):
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
data = datetime_safe.datetime.strptime(data, input_format).date()
except (TypeError, ValueError):
pass
return super(SelectDateWidget, self)._has_changed(initial, data)
|
from rest_framework import serializers
from authors.apps.articles.models import Articles
from authors.apps.highlights.models import Highlights
from .models import Profile, CustomFollows
class GetProfileSerializer(serializers.ModelSerializer):
"""
serializers for user profile upon user registration.
"""
username = serializers.ReadOnlyField(source='get_username')
image_url = serializers.ReadOnlyField(source='get_cloudinary_url')
class Meta:
model = Profile
fields = (
'username', 'first_name', 'last_name', 'bio', 'image', 'image_url',
'website', 'city', 'phone', 'country')
read_only_fields = ("created_at", "updated_at")
class GetCurrentUserProfileSerializer(serializers.ModelSerializer):
"""
serializers for current user profile.
"""
highlights_on_my_articles = serializers.SerializerMethodField()
my_follow_count = serializers.SerializerMethodField()
my_highlights = serializers.SerializerMethodField()
username = serializers.ReadOnlyField(source='get_username')
image_url = serializers.ReadOnlyField(source='get_cloudinary_url')
class Meta:
model = Profile
fields = (
'username', 'first_name', 'last_name', 'bio', 'image', 'image_url', 'my_highlights',
'website', 'city', 'phone', 'country', 'highlights_on_my_articles', 'my_follow_count')
read_only_fields = ("created_at", "updated_at")
def get_highlights_on_my_articles(self, obj):
"""
Method to retrieve highlights made on my articles
:return:
List of highlights made on my articles
"""
author_articles = Articles.objects.filter(author=obj.user)
highlights_on_my_article = []
for article in author_articles:
total_article_highlights = Highlights.objects.filter(article=article).count()
if total_article_highlights > 0:
highlights_on_my_article.append(
{"article": {
"title": article.title,
"slug": article.slug
},
"totalHighlights": total_article_highlights
}
)
return highlights_on_my_article
def get_my_highlights(self, obj):
"""
Method to retrieve my highlights details
:param obj:
:return:
"""
highlights_article_ids = Highlights.objects.filter(profile=obj).values_list('article_id')
highlighted_articles = Articles.objects.filter(id__in=highlights_article_ids)
my_highlights = []
for article in highlighted_articles:
total_article_highlights = Highlights.objects.filter(profile=obj, article=article).count()
my_highlights.append(
{"article": {
"title": article.title,
"slug": article.slug
},
"totalHighlights": total_article_highlights
}
)
return my_highlights
def get_my_follow_count(self, obj):
"""
Method to retrieve my followers and those I follow details
:param obj:
:return:
"""
# Find the authors and users the current user is following
followingCount = CustomFollows.objects.filter(
from_profile_id=obj.id
).count()
# Find the authors and users the current user is followed by
followerCount = CustomFollows.objects.filter(
to_profile_id=obj.id
).count()
return {"followingCount": followingCount, "followerCount": followerCount}
|
import test_common
from spartan.examples.sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_digits
N_TREES = 50
class TestRandomForest(test_common.ClusterTest):
def test_forest(self):
ds = load_digits()
X = ds.data
y = ds.target
rf = RandomForestClassifier(n_estimators = N_TREES)
rf.fit(X, y)
#make sure it memorize data
assert rf.score(X, y) >= 0.95
|
from infosystem.common import exception
operation_after_post_registry = dict()
def do_after_post(manager, operation):
'''
Decorator for register action after post based on manager operation
Parameters:
manager(cls): The Manager class reference
operation(cls): The Operation class reference
Returns:
do_after_post(manager, operation)(fn): The original function
'''
def wrapper(fn):
key = (manager, operation)
if operation_after_post_registry.get(key):
raise exception.InfoSystemException(
f'The operation {manager}.{operation} was already registered')
operation_after_post_registry[key] = fn
return fn
return wrapper
|
import re
import json
import logging
import queue
import threading
from importlib import import_module
import asyncio
from lxml import etree
class Farmer(object):
def __init__(self, steam, appids=[], scheduler="simple"):
self._state = "running"
self.mutex = threading.RLock()
self.steam = steam
async def sched(self, appids=[], scheduler="simple"):
games, __scheduler__ = {}, None
while True:
if self.state == "pause":
if self.steam.client.current_games_played:
await self.steam.clear()
yield True; continue
if self.state == "stop":
return
if not games:
games = await self.farmable()
if appids: games = { i:games[i] for i in appids }
module = import_module('esteam.schedulers.' + scheduler)
__scheduler__ = module.scheduler(games, self.steam)
r = await __scheduler__.__anext__()
if not r: return
yield r
self.sched = sched(self, appids, scheduler)
async def _games_farmable(self):
session = await self.steam.session()
if not session: return {}
def parse_page(doc):
games = doc.xpath("//div[contains(@class, 'badge_row') and contains(@class, 'is_link')]")
results = {}
for game in games:
try:
link = game.xpath("a[contains(@class, 'badge_row_overlay')]/@href")[0].strip()
appid = re.findall("gamecards/(\d+)", link)
if not appid: continue
appid = int(appid[0])
progress = game.xpath(".//div[contains(@class, 'badge_title_stats_drops')]/span[contains(@class, 'progress_info_bold')]/text()")
if not progress: continue
progress = progress[0].strip()
cards = re.findall("\d+", progress)
if not cards: continue
cards = int(cards[0])
results[appid] = {'cards_remain': cards}
except Exception as e:
continue
return results
url_template = "http://steamcommunity.com/id/luxrck/badges/?p={}"
url = url_template.format(1)
r = await session.get(url)
htmldoc = await r.text()
doc = etree.HTML(htmldoc)
num_pages = doc.xpath("//div[contains(@class, 'profile_paging')]/div[contains(@class, 'pageLinks')]")
if num_pages:
num_pages = num_pages[0].xpath("a[contains(@class, 'pagelink')]/text()")
if num_pages:
num_pages = re.search("\d+", num_pages[-1].strip())
if num_pages: num_pages = int(num_pages)
else:
num_pages = 1
results = {}
results.update(parse_page(doc))
for p in range(2, num_pages+1):
url = url_template.format(p)
htmldoc = session.get(url).text
doc = etree.HTML(htmldoc)
results.update(parse_page(doc))
return results
async def farmable(self):
owned_games = await self.steam.games()
games = await self._games_farmable()
for game in games:
info = owned_games.get(game, {})
games[game].update(info)
return games
@property
def state(self):
return self._state
@state.setter
def state(self, val):
self.mutex.acquire()
self._state = val
self.mutex.release()
class FarmerManager(object):
__instance__ = None
def __new__(cls, *args, **kwargs):
if not FarmerManager.__instance__:
FarmerManager.__instance__ = object.__new__(cls)
FarmerManager.__init__(FarmerManager.__instance__, *args, **kwargs)
return FarmerManager.__instance__
def __init__(self, loop=None):
self.farmers = {}
self._addq = queue.Queue()
self._popq = queue.Queue()
self._state = "running" # running, pause, stop
self.mutex = threading.RLock()
self.loop = loop or asyncio.get_event_loop()
@property
def state(self):
return self._state
@state.setter
def state(self, val):
self.mutex.acquire()
self._state = val
self.mutex.release()
def add(self, steam, appids=[], scheduler="simple"):
f = Farmer(steam, appids, scheduler=scheduler)
self._addq.put(f)
# self.farmers[steam.username] = f
def pop(self, steam):
self._popq.put(steam)
# return self.farmers.pop(steam.username, None)
async def run_async(self):
while True:
if self.state == "pause":
await asyncio.sleep(1); continue
if self.state == "stop":
return
for u,f in self.farmers.items():
r = await f.sched.__anext__()
if not r: self.farmers.pop(u)
while not self._addq.empty():
try:
f = self._addq.get_nowait()
self.farmers.setdefault(f.steam.username, f)
except queue.Empty:
break
while not self._popq.empty():
try:
s = self._popq.get_nowait()
await s.games_played([])
self.farmers.pop(s.username, None)
except queue.Empty:
break
await asyncio.sleep(1)
def run(self, asynchronous=False):
if not asynchronous:
return threading.Thread(target=self.loop.run_until_complete, args=([__run__(self)]), daemon=True).start()
return self.run_async()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
# upnp-tester.py
#
# very basic atm
#
# provides these functions:
#
# list - display all devices
# extract <uuid> - extract device and service xml files and put them in a
# /tmp/<uuid> directory
# send <uuid> - pack the before extracted xml files in a tar.gz and
# send them via email to the Coherence googlemail account
#
import os
from sets import Set
from twisted.internet import stdio
from twisted.protocols import basic
from twisted.internet import protocol
try:
from twisted.mail import smtp
from twisted.names import client as namesclient
from twisted.names import dns
import StringIO
class SMTPClient(smtp.ESMTPClient):
""" build an email message and send it to our googlemail account
"""
def __init__(self, mail_from, mail_to, mail_subject, mail_file, *args, **kwargs):
smtp.ESMTPClient.__init__(self, *args, **kwargs)
self.mailFrom = mail_from
self.mailTo = mail_to
self.mailSubject = mail_subject
self.mail_file = mail_file
self.mail_from = mail_from
def getMailFrom(self):
result = self.mailFrom
self.mailFrom = None
return result
def getMailTo(self):
return [self.mailTo]
def getMailData(self):
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
msg = MIMEMultipart()
msg['Subject'] = self.mailSubject
msg['From'] = self.mail_from
msg['To'] = self.mailTo
fp = open(self.mail_file, 'rb')
tar = MIMEApplication(fp.read(), 'x-tar')
fp.close()
tar.add_header('Content-Disposition', 'attachment', filename=os.path.basename(self.mail_file))
msg.attach(tar)
return StringIO.StringIO(msg.as_string())
def sentMail(self, code, resp, numOk, addresses, log):
print 'Sent', numOk, 'messages'
class SMTPClientFactory(protocol.ClientFactory):
protocol = SMTPClient
def __init__(self, mail_from, mail_to, mail_subject, mail_file, *args, **kwargs):
self.mail_from = mail_from
self.mail_to = mail_to
self.mail_subject = mail_subject
self.mail_file = mail_file
def buildProtocol(self, addr):
return self.protocol(self.mail_from, self.mail_to,
self.mail_subject, self.mail_file,
secret=None, identity='localhost')
except ImportError:
pass
from twisted.internet import reactor, defer
from twisted.web import client
from coherence.base import Coherence
class UI(basic.LineReceiver):
from os import linesep as delimiter
def connectionMade(self):
self.print_prompt()
def lineReceived(self, line):
args = line.strip().split()
if args:
cmd = args[0].lower()
if hasattr(self, 'cmd_%s' % cmd):
getattr(self, 'cmd_%s' % (cmd))(args[1:])
elif cmd == "?":
self.cmd_help(args[1:])
else:
self.transport.write("""Unknown command '%s'\n""" % (cmd))
self.print_prompt()
def cmd_help(self, args):
"help -- show help"
methods = Set([getattr(self, x) for x in dir(self) if x[:4] == "cmd_"])
self.transport.write("Commands:\n")
for method in methods:
if hasattr(method, '__doc__'):
self.transport.write("%s\n" % (method.__doc__))
def cmd_list(self, args):
"list -- list devices"
self.transport.write("Devices:\n")
for d in self.coherence.get_devices():
self.transport.write(str("%s %s [%s/%s/%s]\n" % (d.friendly_name, ':'.join(d.device_type.split(':')[3:5]), d.st, d.usn.split(':')[1], d.host)))
def cmd_extract(self, args):
"extract <uuid> -- download xml files from device"
device = self.coherence.get_device_with_id(args[0])
if device == None:
self.transport.write("device %s not found - aborting\n" % args[0])
else:
self.transport.write(str("extracting from %s @ %s\n" % (device.friendly_name, device.host)))
try:
l = []
def device_extract(workdevice, path):
tmp_dir = os.path.join(path, workdevice.get_uuid())
os.mkdir(tmp_dir)
d = client.downloadPage(workdevice.get_location(), os.path.join(tmp_dir, 'device-description.xml'))
l.append(d)
for service in workdevice.services:
d = client.downloadPage(service.get_scpd_url(), os.path.join(tmp_dir, '%s-description.xml' % service.service_type.split(':', 3)[3]))
l.append(d)
for ed in workdevice.devices:
device_extract(ed, tmp_dir)
def finished(result):
self.transport.write(str("\nextraction of device %s finished\nfiles have been saved to /tmp/%s\n" % (args[0], args[0])))
self.print_prompt()
device_extract(device, '/tmp')
dl = defer.DeferredList(l)
dl.addCallback(finished)
except Exception, msg:
self.transport.write(str("problem creating download directory %s\n" % msg))
def cmd_send(self, args):
"send <uuid> -- send before extracted xml files to the Coherence home base"
if os.path.isdir(os.path.join('/tmp', args[0])) == 1:
cwd = os.getcwd()
os.chdir('/tmp')
import tarfile
tar = tarfile.open(os.path.join('/tmp', args[0] + '.tgz'), "w:gz")
for file in os.listdir(os.path.join('/tmp', args[0])):
tar.add(os.path.join(args[0], file))
tar.close()
os.chdir(cwd)
def got_mx(result):
mx_list = result[0]
mx_list.sort(lambda x, y: cmp(x.payload.preference, y.payload.preference))
if len(mx_list) > 0:
import posix
import pwd
import socket
reactor.connectTCP(str(mx_list[0].payload.name), 25,
SMTPClientFactory('@'.join((pwd.getpwuid(posix.getuid())[0], socket.gethostname())), 'upnp.fingerprint@googlemail.com', 'xml-files', os.path.join('/tmp', args[0] + '.tgz')))
mx = namesclient.lookupMailExchange('googlemail.com')
mx.addCallback(got_mx)
def cmd_quit(self, args):
"quit -- quits this program"
reactor.stop()
cmd_exit = cmd_quit
def print_prompt(self):
self.transport.write('>>> ')
if __name__ == '__main__':
c = Coherence({'logmode': 'none'})
ui = UI()
ui.coherence = c
stdio.StandardIO(ui)
reactor.run()
|
from django.db import models
# Create your models here.
class BookModel(models.Model):
bookname = models.CharField(max_length=50)
subject = models.CharField(max_length=50)
price = models.IntegerField()
class Meta:
db_table = "books"
class Bankaccount(models.Model):
accountno = models.IntegerField()
name = models.CharField(max_length=50)
balance = models.IntegerField()
class Meta:
db_table = "newbank"
|
import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
import botocore
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::ElastiCache::CacheCluster'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
EC_CLIENT_MOCK = MagicMock()
class Boto3Mock():
sample = 123
def client(self, client_name, *args, **kwargs):
self.sample = 123
if client_name == 'config':
return CONFIG_CLIENT_MOCK
if client_name == 'sts':
return STS_CLIENT_MOCK
if client_name == 'elasticache':
return EC_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('ELASTICACHE_REDIS_CLUSTER_AUTO_BACKUP_CHECK')
def replication_groups_se(**kwargs):
if 'Marker' not in kwargs:
return {'ReplicationGroups': [{'ReplicationGroupId':'ABC', 'SnapshotRetentionLimit': 16}], 'Marker': 'ABC'}
return {'ReplicationGroups': [{'ReplicationGroupId':'DEF', 'SnapshotRetentionLimit': 10}]}
class CompliantResourceTest(unittest.TestCase):
def test_scenario_5_is_compliant(self):
EC_CLIENT_MOCK.describe_cache_clusters = MagicMock(return_value={'CacheClusters': [{'CacheClusterId':'GHI', 'SnapshotRetentionLimit': 16, 'Engine': 'redis'}]})
EC_CLIENT_MOCK.describe_replication_groups.side_effect = replication_groups_se
lambda_result = RULE.lambda_handler(build_lambda_scheduled_event('{"SnapshotRetentionPeriod":"15"}'), {})
print(lambda_result)
assert_successful_evaluation(self, lambda_result, [build_expected_response('COMPLIANT', "GHI"),
build_expected_response('COMPLIANT', "ABC"),
build_expected_response('NON_COMPLIANT', "DEF", annotation="Automatic backup retention period for Amazon ElastiCache cluster DEF is less then 15 day(s).")
], len(lambda_result))
class NonCompliantResourceTest(unittest.TestCase):
def test_scenario_4_low_retention(self):
EC_CLIENT_MOCK.describe_cache_clusters = MagicMock(return_value={'CacheClusters': [{'CacheClusterId':'ABC', 'SnapshotRetentionLimit': 16, 'Engine': 'redis'}]})
EC_CLIENT_MOCK.describe_replication_groups = MagicMock(return_value={'ReplicationGroups': []})
lambda_result = RULE.lambda_handler(build_lambda_scheduled_event('{"SnapshotRetentionPeriod":"15"}'), {})
assert_successful_evaluation(self, lambda_result, [build_expected_response("COMPLIANT", "ABC")], len(lambda_result))
def test_scenario_3_no_auto_backup(self):
# compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None
EC_CLIENT_MOCK.describe_cache_clusters = MagicMock(return_value={'CacheClusters': [{'CacheClusterId':'ABCD', 'SnapshotRetentionLimit': 0, 'Engine': 'redis'}]})
EC_CLIENT_MOCK.describe_replication_groups = MagicMock(return_value={'ReplicationGroups': []})
lambda_result = RULE.lambda_handler(build_lambda_scheduled_event('{"SnapshotRetentionPeriod":"15"}'), {})
assert_successful_evaluation(self, lambda_result, [build_expected_response("NON_COMPLIANT", "ABCD", annotation="Automatic backup not enabled for Amazon ElastiCache cluster: ABCD")], len(lambda_result))
class ErrorTest(unittest.TestCase):
def test_scenario_2_parameter_error(self):
lambda_result = RULE.lambda_handler(build_lambda_scheduled_event('{"SnapshotRetentionPeriod":"-1"}'), {})
assert_customer_error_response(self, lambda_result, customer_error_message='SnapshotRetentionPeriod value should be an integer greater than 0', customer_error_code='InvalidParameterValueException')
class NotApplicableResourceTest(unittest.TestCase):
def test_scenario_1_no_resources(self):
EC_CLIENT_MOCK.describe_cache_clusters = MagicMock(return_value={'CacheClusters': []})
EC_CLIENT_MOCK.describe_replication_groups = MagicMock(return_value={'ReplicationGroups': []})
lambda_result = RULE.lambda_handler(build_lambda_scheduled_event('{"SnapshotRetentionPeriod":"15"}'), {})
assert_successful_evaluation(self, lambda_result, [build_expected_response("NOT_APPLICABLE", "123456789012")], len(lambda_result))
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError
from tensorforce.core import tf_util
from tensorforce.core.parameters import Parameter
class Decaying(Parameter):
"""
Decaying hyperparameter (specification key: `decaying`, `linear`, `exponential`, `polynomial`,
`inverse_time`, `cosine`, `cosine_restarts`, `linear_cosine`, `linear_cosine_noisy`).
Args:
decay ("linear" | "exponential" | "polynomial" | "inverse_time" | "cosine" | "cosine_restarts" | "linear_cosine" | "linear_cosine_noisy"):
Decay type, see also
`TensorFlow docs <https://www.tensorflow.org/api_docs/python/tf/train>`__
(<span style="color:#C00000"><b>required</b></span>).
unit ("timesteps" | "episodes" | "updates"): Unit of decay schedule
(<span style="color:#C00000"><b>required</b></span>).
num_steps (int): Number of decay steps
(<span style="color:#C00000"><b>required</b></span>).
initial_value (float | int): Initial value
(<span style="color:#C00000"><b>required</b></span>).
increasing (bool): Whether to subtract the decayed value from 1.0
(<span style="color:#00C000"><b>default</b></span>: false).
inverse (bool): Whether to take the inverse of the decayed value
(<span style="color:#00C000"><b>default</b></span>: false).
scale (float): Scaling factor for (inverse) decayed value
(<span style="color:#00C000"><b>default</b></span>: 1.0).
kwargs: Additional arguments depend on decay mechanism.<br>
Linear decay:
<ul>
<li><b>final_value</b> (<i>float | int</i>) – Final value
(<span style="color:#C00000"><b>required</b></span>).</li>
</ul>
Exponential decay:
<ul>
<li><b>decay_rate</b> (<i>float</i>) – Decay rate
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>staircase</b> (<i>bool</i>) – Whether to apply decay in a discrete
staircase, as opposed to continuous, fashion.
(<span style="color:#00C000"><b>default</b></span>: false).</li>
</ul>
Polynomial decay:
<ul>
<li><b>final_value</b> (<i>float | int</i>) – Final value
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>power</b> (<i>float | int</i>) – Power of polynomial
(<span style="color:#00C000"><b>default</b></span>: 1, thus linear).</li>
<li><b>cycle</b> (<i>bool</i>) – Whether to cycle beyond num_steps
(<span style="color:#00C000"><b>default</b></span>: false).</li>
</ul>
Inverse time decay:
<ul>
<li><b>decay_rate</b> (<i>float</i>) – Decay rate
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>staircase</b> (<i>bool</i>) – Whether to apply decay in a discrete
staircase, as opposed to continuous, fashion.
(<span style="color:#00C000"><b>default</b></span>: false).</li>
</ul>
Cosine decay:
<ul>
<li><b>alpha</b> (<i>float</i>) – Minimum learning rate value as a fraction of
learning_rate
(<span style="color:#00C000"><b>default</b></span>: 0.0).</li>
</ul>
Cosine decay with restarts:
<ul>
<li><b>t_mul</b> (<i>float</i>) – Used to derive the number of iterations in the
i-th period
(<span style="color:#00C000"><b>default</b></span>: 2.0).</li>
<li><b>m_mul</b> (<i>float</i>) – Used to derive the initial learning rate of the
i-th period
(<span style="color:#00C000"><b>default</b></span>: 1.0).</li>
<li><b>alpha</b> (<i>float</i>) – Minimum learning rate value as a fraction of
the learning_rate
(<span style="color:#00C000"><b>default</b></span>: 0.0).</li>
</ul>
Linear cosine decay:
<ul>
<li><b>num_periods</b> (<i>float</i>) – Number of periods in the cosine part of
the decay
(<span style="color:#00C000"><b>default</b></span>: 0.5).</li>
<li><b>alpha</b> (<i>float</i>) – Alpha value
(<span style="color:#00C000"><b>default</b></span>: 0.0).</li>
<li><b>beta</b> (<i>float</i>) – Beta value
(<span style="color:#00C000"><b>default</b></span>: 0.001).</li>
</ul>
Noisy linear cosine decay:
<ul>
<li><b>initial_variance</b> (<i>float</i>) – Initial variance for the noise
(<span style="color:#00C000"><b>default</b></span>: 1.0).</li>
<li><b>variance_decay</b> (<i>float</i>) – Decay for the noise's variance
(<span style="color:#00C000"><b>default</b></span>: 0.55).</li>
<li><b>num_periods</b> (<i>float</i>) – Number of periods in the cosine part of
the decay
(<span style="color:#00C000"><b>default</b></span>: 0.5).</li>
<li><b>alpha</b> (<i>float</i>) – Alpha value
(<span style="color:#00C000"><b>default</b></span>: 0.0).</li>
<li><b>beta</b> (<i>float</i>) – Beta value
(<span style="color:#00C000"><b>default</b></span>: 0.001).</li>
</ul>
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
dtype (type): <span style="color:#0000C0"><b>internal use</b></span>.
min_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
max_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, decay, unit, num_steps, initial_value, increasing=False, inverse=False, scale=1.0,
name=None, dtype=None, min_value=None, max_value=None, **kwargs
):
assert decay in (
'cosine', 'cosine_restarts', 'exponential', 'inverse_time', 'linear', 'linear_cosine',
'linear_cosine_noisy', 'polynomial'
)
assert unit in ('timesteps', 'episodes', 'updates')
assert (isinstance(num_steps, int) or num_steps % 10.0 == 0.0) and num_steps > 0
assert isinstance(initial_value, (float, int))
if isinstance(initial_value, int):
if dtype != 'int':
raise TensorforceError.dtype(
name='Decaying', argument='initial_value', dtype=type(initial_value)
)
elif isinstance(initial_value, float):
if dtype != 'float':
raise TensorforceError.dtype(
name='Decaying', argument='initial_value', dtype=type(initial_value)
)
else:
raise TensorforceError.unexpected()
if decay == 'linear':
assert len(kwargs) == 1 and 'final_value' in kwargs
decay = 'polynomial'
self.decay = decay
self.num_steps = int(num_steps)
self.initial_value = initial_value
self.increasing = increasing
self.inverse = inverse
self.scale = scale
self.kwargs = kwargs
super().__init__(
unit=unit, name=name, dtype=dtype, min_value=min_value, max_value=max_value
)
def min_value(self):
if self.decay == 'cosine' or self.decay == 'cosine_restarts':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
if self.initial_value >= 0.0:
min_value = self.initial_value * self.kwargs.get('alpha', 0.0)
max_value = self.initial_value
else:
min_value = self.initial_value
max_value = self.initial_value * self.kwargs.get('alpha', 0.0)
elif self.decay == 'exponential' or self.decay == 'inverse_time':
assert 0.0 <= self.kwargs['decay_rate'] <= 1.0
if self.kwargs['decay_rate'] == 1.0:
min_value = max_value = self.initial_value
elif self.initial_value >= 0.0:
min_value = 0.0
max_value = self.initial_value
else:
min_value = self.initial_value
max_value = 0.0
elif self.decay == 'linear_cosine' or self.decay == 'linear_cosine_noisy':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
assert 0.0 <= self.kwargs.get('beta', 0.0) <= 1.0
if self.initial_value >= 0.0:
min_value = self.initial_value * self.kwargs.get('beta', 0.001)
max_value = self.initial_value * (
1.0 + self.kwargs.get('alpha', 0.0) + self.kwargs.get('beta', 0.001)
)
else:
min_value = self.initial_value * (
1.0 + self.kwargs.get('alpha', 0.0) + self.kwargs.get('beta', 0.001)
)
max_value = self.initial_value * self.kwargs.get('beta', 0.001)
elif self.decay == 'polynomial':
if self.kwargs.get('power', 1.0) == 0.0:
min_value = max_value = self.initial_value
elif self.initial_value >= self.kwargs['final_value']:
min_value = self.kwargs['final_value']
max_value = self.initial_value
else:
min_value = self.initial_value
max_value = self.kwargs['final_value']
assert min_value <= max_value
if self.increasing:
assert 0.0 <= min_value <= max_value <= 1.0
min_value, max_value = 1.0 - max_value, 1.0 - min_value
if self.inverse:
assert util.epsilon <= min_value <= max_value
min_value, max_value = 1.0 / max_value, 1.0 / min_value
if self.scale == 1.0:
pass
elif self.scale >= 0.0:
min_value, max_value = self.scale * min_value, self.scale * max_value
else:
min_value, max_value = self.scale * max_value, self.scale * min_value
return self.spec.py_type()(min_value)
def max_value(self):
if self.decay == 'cosine' or self.decay == 'cosine_restarts':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
if self.initial_value >= 0.0:
min_value = self.initial_value * self.kwargs.get('alpha', 0.0)
max_value = self.initial_value
else:
min_value = self.initial_value
max_value = self.initial_value * self.kwargs.get('alpha', 0.0)
elif self.decay == 'exponential' or self.decay == 'inverse_time':
assert 0.0 <= self.kwargs['decay_rate'] <= 1.0
if self.kwargs['decay_rate'] == 1.0:
min_value = max_value = self.initial_value
elif self.initial_value >= 0.0:
min_value = 0.0
max_value = self.initial_value
else:
min_value = self.initial_value
max_value = 0.0
elif self.decay == 'linear_cosine' or self.decay == 'linear_cosine_noisy':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
assert 0.0 <= self.kwargs.get('beta', 0.0) <= 1.0
if self.initial_value >= 0.0:
min_value = self.initial_value * self.kwargs.get('beta', 0.001)
max_value = self.initial_value * (
1.0 + self.kwargs.get('alpha', 0.0) + self.kwargs.get('beta', 0.001)
)
else:
min_value = self.initial_value * (
1.0 + self.kwargs.get('alpha', 0.0) + self.kwargs.get('beta', 0.001)
)
max_value = self.initial_value * self.kwargs.get('beta', 0.001)
elif self.decay == 'polynomial':
if self.kwargs.get('power', 1.0) == 0.0:
min_value = max_value = self.initial_value
elif self.initial_value >= self.kwargs['final_value']:
min_value = self.kwargs['final_value']
max_value = self.initial_value
else:
min_value = self.initial_value
max_value = self.kwargs['final_value']
assert min_value <= max_value
if self.increasing:
assert 0.0 <= min_value <= max_value <= 1.0
min_value, max_value = 1.0 - max_value, 1.0 - min_value
if self.inverse:
assert 0.0 < min_value <= max_value
min_value, max_value = 1.0 / max_value, 1.0 / min_value
if self.scale == 1.0:
pass
elif self.scale >= 0.0:
min_value, max_value = self.scale * min_value, self.scale * max_value
else:
min_value, max_value = self.scale * max_value, self.scale * min_value
return self.spec.py_type()(max_value)
def final_value(self):
if self.decay == 'cosine' or self.decay == 'cosine_restarts':
assert 0.0 <= self.kwargs['decay_rate'] <= 1.0
value = self.initial_value * self.kwargs.get('alpha', 0.0)
elif self.decay == 'exponential' or self.decay == 'inverse_time':
assert 0.0 <= self.kwargs['decay_rate'] <= 1.0
if self.kwargs['decay_rate'] == 1.0:
value = self.initial_value
else:
value = 0.0
elif self.decay == 'linear_cosine' or self.decay == 'linear_cosine_noisy':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
assert 0.0 <= self.kwargs.get('beta', 0.0) <= 1.0
value = self.initial_value * self.kwargs.get('beta', 0.001)
elif self.decay == 'polynomial':
if self.kwargs.get('power', 1.0) == 0.0:
value = self.initial_value
else:
value = self.kwargs['final_value']
if self.increasing:
assert 0.0 <= value <= 1.0
value = 1.0 - value
if self.inverse:
assert value > 0.0
value = 1.0 / value
if self.scale != 1.0:
value = value * self.scale
return self.spec.py_type()(value)
def parameter_value(self, *, step):
initial_value = tf_util.constant(value=self.initial_value, dtype='float')
if self.decay == 'cosine':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
parameter = tf.keras.experimental.CosineDecay(
initial_learning_rate=initial_value, decay_steps=(self.num_steps + 1),
alpha=self.kwargs.get('alpha', 0.0)
)(step=step)
elif self.decay == 'cosine_restarts':
assert 0.0 <= self.kwargs.get('alpha', 0.0) <= 1.0
parameter = tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate=initial_value, first_decay_steps=(self.num_steps + 1),
t_mul=self.kwargs.get('t_mul', 2.0), m_mul=self.kwargs.get('m_mul', 1.0),
alpha=self.kwargs.get('alpha', 0.0)
)(step=step)
elif self.decay == 'exponential':
assert self.kwargs['decay_rate'] >= 0.0
parameter = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=initial_value, decay_steps=(self.num_steps + 1),
decay_rate=self.kwargs['decay_rate'], staircase=self.kwargs.get('staircase', False)
)(step=step)
elif self.decay == 'inverse_time':
assert self.kwargs['decay_rate'] >= 0.0
parameter = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=initial_value, decay_steps=(self.num_steps + 1),
decay_rate=self.kwargs['decay_rate'], staircase=self.kwargs.get('staircase', False)
)(step=step)
elif self.decay == 'linear_cosine':
assert self.kwargs.get('beta', 0.001) >= 0.0
parameter = tf.keras.experimental.LinearCosineDecay(
initial_learning_rate=initial_value, decay_steps=(self.num_steps + 1),
num_periods=self.kwargs.get('num_periods', 0.5),
alpha=self.kwargs.get('alpha', 0.0), beta=self.kwargs.get('beta', 0.001)
)(step=step)
elif self.decay == 'linear_cosine_noisy':
assert self.kwargs.get('beta', 0.001) >= 0.0
parameter = tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate=initial_value, decay_steps=(self.num_steps + 1),
initial_variance=self.kwargs.get('initial_variance', 1.0),
variance_decay=self.kwargs.get('variance_decay', 0.55),
num_periods=self.kwargs.get('num_periods', 0.5),
alpha=self.kwargs.get('alpha', 0.0), beta=self.kwargs.get('beta', 0.001)
)(step=step)
elif self.decay == 'polynomial':
assert self.kwargs.get('power', 1.0) >= 0.0
parameter = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=initial_value, decay_steps=(self.num_steps + 1),
end_learning_rate=self.kwargs['final_value'], power=self.kwargs.get('power', 1.0),
cycle=self.kwargs.get('cycle', False)
)(step=step)
if self.increasing:
one = tf_util.constant(value=1.0, dtype='float')
assertions = list()
if self.config.create_tf_assertions:
zero = tf_util.constant(value=0.0, dtype='float')
assertions.append(tf.debugging.assert_greater_equal(x=parameter, y=zero))
assertions.append(tf.debugging.assert_less_equal(x=parameter, y=one))
with tf.control_dependencies(control_inputs=assertions):
parameter = one - parameter
if self.inverse:
zero = tf_util.constant(value=0.0, dtype='float')
parameter = tf.where(
condition=(parameter > zero),
x=tf.maximum(x=parameter, y=epsilon), y=tf.minimum(x=parameter, y=-epsilon)
)
parameter = tf.math.reciprocal(x=parameter)
if self.scale != 1.0:
scale = tf_util.constant(value=self.scale, dtype='float')
parameter = parameter * scale
parameter = tf_util.cast(x=parameter, dtype=self.spec.type)
return parameter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing,svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
X=[10, 15, 20, 25, 30, 35, 40, 45, 50, 55,60,65,70,75,80]
y=[20, 25, 30, 35, 40, 45, 50, 55, 60, 65,70,75,80,85,90]
X = np.array(X).reshape(-1,1)
y = np.array(y).reshape(-1,1)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
regr = LinearRegression()
regr.fit(X_train, y_train)
print(regr.score(X_test,y_test))
y_pred = regr.predict(X_test)
plt.scatter(X_test, y_test, color = 'b')
plt.plot(X_test, y_pred,color = 'k')
plt.show()
|
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse
from getpaid.backends.payu import PaymentProcessor
class Command(BaseCommand):
help = 'Display URL path for PayU Online URL configuration'
def handle(self, *args, **options):
current_site = Site.objects.get_current()
self.stdout.write('Login to PayU configuration page and setup following links:\n\n')
self.stdout.write(' * Success URL: http://%s%s\n https://%s%s\n\n' % (
current_site.domain,
reverse('getpaid-payu-success', kwargs={'pk': 1234}).replace('1234', '%orderId%'),
current_site.domain,
reverse('getpaid-payu-success', kwargs={'pk': 1234}).replace('1234', '%orderId%'),
)
)
self.stdout.write(' * Failure URL: http://%s%s\n https://%s%s\n\n' % (
current_site.domain,
reverse('getpaid-payu-failure', kwargs={'pk': 1234, 'error': 9999}).replace('1234', r'%orderId%').replace('9999', r'%error%'),
current_site.domain,
reverse('getpaid-payu-failure', kwargs={'pk': 1234, 'error': 9999}).replace('1234', r'%orderId%').replace('9999', r'%error%'),
)
)
self.stdout.write(' * Online URL: http://%s%s\n https://%s%s\n\n' % (
current_site.domain,
reverse('getpaid-payu-online'),
current_site.domain,
reverse('getpaid-payu-online'),
)
)
self.stdout.write('To change domain name please edit Sites settings. Don\'t forget to setup your web server to accept https connection in order to use secure links.\n')
if PaymentProcessor.get_backend_setting('testing', False):
self.stdout.write('\nTesting mode is ON\nPlease be sure that you enabled testing payments in PayU configuration page.\n')
if PaymentProcessor.get_backend_setting('signing', False):
self.stdout.write('\nRequest signing is ON\n * Please be sure that you enabled signing payments in PayU configuration page.\n')
|
from flask import Flask
import secrets
app = Flask(__name__)
app.secret_key = secrets.token_hex()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.