content
stringlengths 5
1.05M
|
|---|
import time
from flexsm import StateMachine, State, Transition, addTransition
root = State("root")
state2 = State("state 2")
state1 = State("state 1")
@addTransition(state=root)
class WaitForXToBecomeSmall(Transition):
def getNextState(self, x):
if x>15:
return root
else:
return state1
@addTransition(state=state1, next=state2)
class Wait1Dot5Seconds(Transition):
def check(self, time_in_state, x):
return time_in_state > 1.5 and x == 14
def onTrigger(self, time_in_state, x):
print("We are in this boring state since {:.2f} seconds".format(time_in_state))
sm = StateMachine(root)
sm.update("y", 17)
print("Changed y to 17, current state={}".format(sm.current_state))
sm.update("x", 19)
print("Changed x to 19, current state={}".format(sm.current_state))
sm.update("x", 15)
print("Changed x to 15, current state={}".format(sm.current_state))
sm.update("x", 14)
print("Changed x to 14, current state={}".format(sm.current_state))
for i in range(20):
time.sleep(0.1)
print("I slept two seconds, current state={}".format(sm.current_state))
|
################################################################################
# E-Cookbook - electronic recipe organizer
# Copyright (C) 2006 Shyam "Doggan" Guthikonda.
#
# Email: shyamguth@gmail.com
# URL: http://shy.am
#
# Project Page: http://sourceforge.net/projects/ecookbook
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import wx
import wx.lib.mixins.listctrl as listmix
import images
class ListCtrl(wx.ListView, listmix.ListCtrlAutoWidthMixin, listmix.ColumnSorterMixin):
itemDataMap = {}
def __init__(self, parent, id, numCols, pos = wx.DefaultPosition, size = wx.DefaultSize,
style = 0):
wx.ListView.__init__(self, parent, id, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
# Arrows on the column headers to show which direction we're sorting
self.__imageList = wx.ImageList(16, 16)
self.__sm_up = self.__imageList.Add(images.getSmallUpArrowBitmap())
self.__sm_dn = self.__imageList.Add(images.getSmallDnArrowBitmap())
self.SetImageList(self.__imageList, wx.IMAGE_LIST_SMALL)
listmix.ColumnSorterMixin.__init__(self, numCols)
# Needed by the sorter mixin
def GetListCtrl(self):
return self
def GetSortImages(self):
return (self.__sm_dn, self.__sm_up)
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:Administrator
@file: errors.py
@time: 2019/05/04
@software: PyCharm
@detail: 错误处理
"""
|
try:
from buidl.cecc import * # noqa: F401,F403
except ModuleNotFoundError:
from buidl.pecc import * # noqa: F401,F403
|
#coding:utf-8
from mock import patch, Mock
from requests import Session
from unittest import TestCase
from facebook_client.request import FacebookSession, FacebookResponse
class MockedRequestsResponse(object):
status_code = 200
def json(self):
return {'key': 'value'}
class TestFacebookSession(TestCase):
def setUp(self):
self.access_token = 'access_token'
self.session = FacebookSession(access_token=self.access_token)
def test_raises_type_error_if_access_token_is_not_provided(self):
self.assertRaises(TypeError, FacebookSession)
def test_builds_full_url_correctly(self):
path = '/path/'
expected_ul = 'https://graph.facebook.com/v2.3/path/'
url = self.session._get_full_url(path)
self.assertEqual(expected_ul, url)
@patch.object(Session, 'request')
def test_adds_access_token_to_params_if_no_params_provided(self, mocked_super_request):
mocked_super_request.return_value = MockedRequestsResponse()
self.session.request('GET', '/path/')
url = self.session._get_full_url('/path/')
mocked_super_request.assert_called_once_with('GET', url, {'access_token': self.access_token})
@patch.object(Session, 'request')
def test_adds_access_token_to_params_if_without_losing_params(self, mocked_super_request):
mocked_super_request.return_value = MockedRequestsResponse()
self.session.request('GET', '/path/', params={'key': 'value'})
url = self.session._get_full_url('/path/')
expected_params = {'access_token': self.access_token, 'key': 'value'}
mocked_super_request.assert_called_once_with('GET', url, expected_params)
@patch.object(Session, 'request', Mock(return_value=MockedRequestsResponse()))
def test_returns_custom_response_object(self):
response = self.session.request('GET', '/path/', params={'key': 'value'})
self.assertIsInstance(response, FacebookResponse)
self.assertEqual(200, response.status_code)
self.assertEqual({'key': 'value'}, response.data)
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Utilities for running a command."""
import os
import shlex
import subprocess # nosec
from enum import Enum
from typing import Dict, Iterable, List
import click
__all__ = ("run_command",)
class Tag(Enum):
"""Command string injection marker tags."""
def __init__(self, start: str, end: str):
self.start = start
self.end = end
ENV = ("{env:", "}")
def _tag_in_string(*, source: str, tag: Tag) -> bool:
"""Determine if a specific tag is in a string.
:param source: String to evaluate
:param Tag tag: Tag to look for
:returns: Decision
"""
if tag.start not in source:
return False
if tag.end not in source[source.index(tag.start) + len(tag.start) :]:
return False
return True
def _value_to_triplet(source: str, tag: Tag) -> Iterable[str]:
"""Extract the first tag value from a string, splitting the source string into the parts before and after the tag.
:param source: String to process
:param Tag tag: Tag to split on
:return: Split string values
"""
if not _tag_in_string(source=source, tag=tag):
raise ValueError(f"Tag not in source: {tag}")
prefix, _value = source.split(tag.start, 1)
value, suffix = _value.split(tag.end, 1)
return prefix, value, suffix
def _inject_environment_variables(*, command_string: str, environment_variables: Dict[str, str]) -> str:
"""Inject environment variables into the command string.
Environment variables must be identified using the ``{env:NAME}`` syntax.
:param str command_string: Command string to modify
:param dict environment_variables: Environment variables to use
:return: Modified command string
:rtype: str
"""
final_command = ""
remaining_command = command_string[:]
while remaining_command:
try:
prefix, name, remaining_command = _value_to_triplet(source=remaining_command, tag=Tag.ENV)
except ValueError:
final_command += remaining_command
remaining_command = ""
continue
final_command += prefix
try:
final_command += environment_variables[name]
except KeyError:
raise click.UsageError(
f'Unable to inject environment variable "{name}" into command. Environment variable not found.'
)
return final_command
def _clean_command_arguments(*, args: str) -> List[str]:
"""Clean args from input for execution."""
return [shlex.quote(i) for i in shlex.split(args)]
def run_command(*, raw_command: str, extra_env_vars: Dict[str, str]) -> subprocess.CompletedProcess:
"""Run a command with the provided environment variables.
:param str raw_command: Raw command string to execute
:param dict extra_env_vars: Environment variables to inject into subprocess environment
:returns: resulting process data
:rtype: subprocess.CompletedProcess
"""
env = os.environ.copy()
for key, value in extra_env_vars.items():
if key in env:
click.secho(f'Environment variable "{key}" will be overwritten in subprocess', fg="red", err=True)
env[key] = value
injected_command = _inject_environment_variables(command_string=raw_command, environment_variables=env)
command_args = _clean_command_arguments(args=injected_command)
# Using check=False because we process error cases in the upstream command that calls this function.
# Using shell=False because we explicitly want to contain this subprocess execution.
# Bandit is disabled for this line because they rightly will not allow any non-whitelisted calls to subprocess.
return subprocess.run(command_args, capture_output=True, env=env, check=False, shell=False) # nosec
|
def for_minus():
for row in range(5):
for col in range(6):
if row==2 and col>0 and col<5 :
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_minus():
row=0
while row<5:
col=0
while col<6:
if row==2 and col>0 and col<5 :
print("*",end=" ")
else:
print(" ",end=" ")
col+=1
row+=1
print()
|
from .utils import (
Git2TypeConstIndex,
get_git2_param,
get_c_wrapper_param_list,
get_c_param_list,
get_c_wrapper_before_list,
)
import unittest
class TestUtils(unittest.TestCase):
C_PARAMS = "git_oid *out, git_index *index, git_repository *repo"
def test_match(self):
pt = Git2TypeConstIndex.parse('git_index *index')
self.assertIsNotNone(pt)
def test_get_param(self):
pt = get_git2_param('git_repository *repo')
self.assertEqual('jlong repoPtr', pt.c_header_param)
def test_get_c_wrapper_param_list(self):
pt_list = [get_git2_param(s.strip()) for s in self.C_PARAMS.split(',')]
s = get_c_wrapper_param_list(pt_list)
self.assertEqual('jobject out, jlong indexPtr, jlong repoPtr', s)
def test_get_c_param_list(self):
pt_list = [get_git2_param(s.strip()) for s in self.C_PARAMS.split(',')]
s = get_c_param_list(pt_list)
self.assertEqual(
'&c_oid, (git_index *)indexPtr, (git_repository *)repoPtr', s)
def test_get_c_wrapper_before_list(self):
pt_list = [get_git2_param(s.strip()) for s in self.C_PARAMS.split(',')]
s = get_c_wrapper_before_list(pt_list)
self.assertEqual('\t git_oid c_out;', s)
if __name__ == '__main__':
unittest.main()
|
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from pyspark.sql import SparkSession
from pyspark.sql.functions import desc, col
cluster_seeds = ['127.0.0.1']
spark = SparkSession.builder.appName('Word Cloud').config('spark.cassandra.connection.host', ','.join(cluster_seeds)).getOrCreate()
spark.sparkContext.setLogLevel('WARN')
sc = spark.sparkContext
table_name = ['crimetype', 'location_crime']
column_names = ['crimetype', 'location_description']
def generate_wordcloud(keyspace='pirates'):
for i in range(0, len(column_names)):
# Read the whole data to form wordcount
if table_name[i]=='crimetype':
data = spark.read.format("org.apache.spark.sql.cassandra")\
.options(table= table_name[i], keyspace=keyspace).load().sort(desc('count'))
else:
data = spark.read.format("org.apache.spark.sql.cassandra") \
.options(table=table_name[i], keyspace=keyspace).load().where(col('count') >= 1000).sort(desc('count'))
dict_key = list(data.select(column_names[i]).toPandas()[column_names[i]])
value = list(data.select('count').toPandas()['count'])
crime_dict = dict(zip(dict_key, value))
image_mask = np.array(Image.open("wordcloud/chicago_contour.jpg"))
# generate word cloud
if table_name[i] == 'crimetype':
wc = WordCloud(background_color="white", max_words=2000, mask=image_mask, contour_width=3, contour_color='steelblue', max_font_size=150)
else:
wc = WordCloud(background_color="white", max_words=2000, mask=image_mask, contour_width=3, contour_color='steelblue', max_font_size=200)
wc.generate_from_frequencies(crime_dict)
# show
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.figure()
# store to file
wc.to_file("./static/images/wc_"+column_names[i]+".png")
#generate_wordcloud()
"""
Function being called in app.py
# Command to run the file individually (makes connection to cassandra database)
uncomment the command above and run using (provided dependencies are met)
spark-submit --packages datastax:spark-cassandra-connector:2.4.0-s_2.11 wordcloud/chicago_wordcloud.py
"""
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Agreement import Agreement
class CustomerAgreement(Agreement):
"""Agreement between the customer and the service supplier to pay for service at a specific service location. It records certain billing information about the type of service provided at the service location and is used during charge creation to determine the type of service.Agreement between the customer and the service supplier to pay for service at a specific service location. It records certain billing information about the type of service provided at the service location and is used during charge creation to determine the type of service.
"""
def __init__(self, billingCycle='', loadMgmt='', budgetBill='', PricingStructures=None, CustomerAccount=None, StandardIndustryCode=None, AuxiliaryAgreements=None, MeterReadings=None, Equipments=None, ServiceSupplier=None, ServiceLocations=None, DemandResponseProgram=None, ServiceDeliveryPoints=None, Customer=None, EndDeviceControls=None, *args, **kw_args):
"""Initialises a new 'CustomerAgreement' instance.
@param billingCycle: Cycle day on which the associated customer account will normally be billed, used to determine when to produce the billing.
@param loadMgmt: Load management code.
@param budgetBill: Budget bill code.
@param PricingStructures: All pricing structures applicable to this customer agreement.
@param CustomerAccount: Customer account owning this agreement.
@param StandardIndustryCode:
@param AuxiliaryAgreements: All (non-service related) auxiliary agreements that refer to this customer agreement.
@param MeterReadings: (could be deprecated in the future) All meter readings for this customer agreement.
@param Equipments:
@param ServiceSupplier: Service supplier for this customer agreement.
@param ServiceLocations: All service locations regulated by this customer agreement.
@param DemandResponseProgram: Demand response program for this customer agreement.
@param ServiceDeliveryPoints: All service delivery points regulated by this customer agreement.
@param Customer: Customer for this agreement.
@param EndDeviceControls: Could be deprecated in the future.
"""
#: Cycle day on which the associated customer account will normally be billed, used to determine when to produce the billing.
self.billingCycle = billingCycle
#: Load management code.
self.loadMgmt = loadMgmt
#: Budget bill code.
self.budgetBill = budgetBill
self._PricingStructures = []
self.PricingStructures = [] if PricingStructures is None else PricingStructures
self._CustomerAccount = None
self.CustomerAccount = CustomerAccount
self._StandardIndustryCode = None
self.StandardIndustryCode = StandardIndustryCode
self._AuxiliaryAgreements = []
self.AuxiliaryAgreements = [] if AuxiliaryAgreements is None else AuxiliaryAgreements
self._MeterReadings = []
self.MeterReadings = [] if MeterReadings is None else MeterReadings
self._Equipments = []
self.Equipments = [] if Equipments is None else Equipments
self._ServiceSupplier = None
self.ServiceSupplier = ServiceSupplier
self._ServiceLocations = []
self.ServiceLocations = [] if ServiceLocations is None else ServiceLocations
self._DemandResponseProgram = None
self.DemandResponseProgram = DemandResponseProgram
self._ServiceDeliveryPoints = []
self.ServiceDeliveryPoints = [] if ServiceDeliveryPoints is None else ServiceDeliveryPoints
self._Customer = None
self.Customer = Customer
self._EndDeviceControls = []
self.EndDeviceControls = [] if EndDeviceControls is None else EndDeviceControls
super(CustomerAgreement, self).__init__(*args, **kw_args)
_attrs = ["billingCycle", "loadMgmt", "budgetBill"]
_attr_types = {"billingCycle": str, "loadMgmt": str, "budgetBill": str}
_defaults = {"billingCycle": '', "loadMgmt": '', "budgetBill": ''}
_enums = {}
_refs = ["PricingStructures", "CustomerAccount", "StandardIndustryCode", "AuxiliaryAgreements", "MeterReadings", "Equipments", "ServiceSupplier", "ServiceLocations", "DemandResponseProgram", "ServiceDeliveryPoints", "Customer", "EndDeviceControls"]
_many_refs = ["PricingStructures", "AuxiliaryAgreements", "MeterReadings", "Equipments", "ServiceLocations", "ServiceDeliveryPoints", "EndDeviceControls"]
def getPricingStructures(self):
"""All pricing structures applicable to this customer agreement.
"""
return self._PricingStructures
def setPricingStructures(self, value):
for p in self._PricingStructures:
filtered = [q for q in p.CustomerAgreements if q != self]
self._PricingStructures._CustomerAgreements = filtered
for r in value:
if self not in r._CustomerAgreements:
r._CustomerAgreements.append(self)
self._PricingStructures = value
PricingStructures = property(getPricingStructures, setPricingStructures)
def addPricingStructures(self, *PricingStructures):
for obj in PricingStructures:
if self not in obj._CustomerAgreements:
obj._CustomerAgreements.append(self)
self._PricingStructures.append(obj)
def removePricingStructures(self, *PricingStructures):
for obj in PricingStructures:
if self in obj._CustomerAgreements:
obj._CustomerAgreements.remove(self)
self._PricingStructures.remove(obj)
def getCustomerAccount(self):
"""Customer account owning this agreement.
"""
return self._CustomerAccount
def setCustomerAccount(self, value):
if self._CustomerAccount is not None:
filtered = [x for x in self.CustomerAccount.CustomerAgreements if x != self]
self._CustomerAccount._CustomerAgreements = filtered
self._CustomerAccount = value
if self._CustomerAccount is not None:
if self not in self._CustomerAccount._CustomerAgreements:
self._CustomerAccount._CustomerAgreements.append(self)
CustomerAccount = property(getCustomerAccount, setCustomerAccount)
def getStandardIndustryCode(self):
return self._StandardIndustryCode
def setStandardIndustryCode(self, value):
if self._StandardIndustryCode is not None:
filtered = [x for x in self.StandardIndustryCode.CustomerAgreements if x != self]
self._StandardIndustryCode._CustomerAgreements = filtered
self._StandardIndustryCode = value
if self._StandardIndustryCode is not None:
if self not in self._StandardIndustryCode._CustomerAgreements:
self._StandardIndustryCode._CustomerAgreements.append(self)
StandardIndustryCode = property(getStandardIndustryCode, setStandardIndustryCode)
def getAuxiliaryAgreements(self):
"""All (non-service related) auxiliary agreements that refer to this customer agreement.
"""
return self._AuxiliaryAgreements
def setAuxiliaryAgreements(self, value):
for x in self._AuxiliaryAgreements:
x.CustomerAgreement = None
for y in value:
y._CustomerAgreement = self
self._AuxiliaryAgreements = value
AuxiliaryAgreements = property(getAuxiliaryAgreements, setAuxiliaryAgreements)
def addAuxiliaryAgreements(self, *AuxiliaryAgreements):
for obj in AuxiliaryAgreements:
obj.CustomerAgreement = self
def removeAuxiliaryAgreements(self, *AuxiliaryAgreements):
for obj in AuxiliaryAgreements:
obj.CustomerAgreement = None
def getMeterReadings(self):
"""(could be deprecated in the future) All meter readings for this customer agreement.
"""
return self._MeterReadings
def setMeterReadings(self, value):
for x in self._MeterReadings:
x.CustomerAgreement = None
for y in value:
y._CustomerAgreement = self
self._MeterReadings = value
MeterReadings = property(getMeterReadings, setMeterReadings)
def addMeterReadings(self, *MeterReadings):
for obj in MeterReadings:
obj.CustomerAgreement = self
def removeMeterReadings(self, *MeterReadings):
for obj in MeterReadings:
obj.CustomerAgreement = None
def getEquipments(self):
return self._Equipments
def setEquipments(self, value):
for p in self._Equipments:
filtered = [q for q in p.CustomerAgreements if q != self]
self._Equipments._CustomerAgreements = filtered
for r in value:
if self not in r._CustomerAgreements:
r._CustomerAgreements.append(self)
self._Equipments = value
Equipments = property(getEquipments, setEquipments)
def addEquipments(self, *Equipments):
for obj in Equipments:
if self not in obj._CustomerAgreements:
obj._CustomerAgreements.append(self)
self._Equipments.append(obj)
def removeEquipments(self, *Equipments):
for obj in Equipments:
if self in obj._CustomerAgreements:
obj._CustomerAgreements.remove(self)
self._Equipments.remove(obj)
def getServiceSupplier(self):
"""Service supplier for this customer agreement.
"""
return self._ServiceSupplier
def setServiceSupplier(self, value):
if self._ServiceSupplier is not None:
filtered = [x for x in self.ServiceSupplier.CustomerAgreements if x != self]
self._ServiceSupplier._CustomerAgreements = filtered
self._ServiceSupplier = value
if self._ServiceSupplier is not None:
if self not in self._ServiceSupplier._CustomerAgreements:
self._ServiceSupplier._CustomerAgreements.append(self)
ServiceSupplier = property(getServiceSupplier, setServiceSupplier)
def getServiceLocations(self):
"""All service locations regulated by this customer agreement.
"""
return self._ServiceLocations
def setServiceLocations(self, value):
for p in self._ServiceLocations:
filtered = [q for q in p.CustomerAgreements if q != self]
self._ServiceLocations._CustomerAgreements = filtered
for r in value:
if self not in r._CustomerAgreements:
r._CustomerAgreements.append(self)
self._ServiceLocations = value
ServiceLocations = property(getServiceLocations, setServiceLocations)
def addServiceLocations(self, *ServiceLocations):
for obj in ServiceLocations:
if self not in obj._CustomerAgreements:
obj._CustomerAgreements.append(self)
self._ServiceLocations.append(obj)
def removeServiceLocations(self, *ServiceLocations):
for obj in ServiceLocations:
if self in obj._CustomerAgreements:
obj._CustomerAgreements.remove(self)
self._ServiceLocations.remove(obj)
def getDemandResponseProgram(self):
"""Demand response program for this customer agreement.
"""
return self._DemandResponseProgram
def setDemandResponseProgram(self, value):
if self._DemandResponseProgram is not None:
filtered = [x for x in self.DemandResponseProgram.CustomerAgreements if x != self]
self._DemandResponseProgram._CustomerAgreements = filtered
self._DemandResponseProgram = value
if self._DemandResponseProgram is not None:
if self not in self._DemandResponseProgram._CustomerAgreements:
self._DemandResponseProgram._CustomerAgreements.append(self)
DemandResponseProgram = property(getDemandResponseProgram, setDemandResponseProgram)
def getServiceDeliveryPoints(self):
"""All service delivery points regulated by this customer agreement.
"""
return self._ServiceDeliveryPoints
def setServiceDeliveryPoints(self, value):
for x in self._ServiceDeliveryPoints:
x.CustomerAgreement = None
for y in value:
y._CustomerAgreement = self
self._ServiceDeliveryPoints = value
ServiceDeliveryPoints = property(getServiceDeliveryPoints, setServiceDeliveryPoints)
def addServiceDeliveryPoints(self, *ServiceDeliveryPoints):
for obj in ServiceDeliveryPoints:
obj.CustomerAgreement = self
def removeServiceDeliveryPoints(self, *ServiceDeliveryPoints):
for obj in ServiceDeliveryPoints:
obj.CustomerAgreement = None
def getCustomer(self):
"""Customer for this agreement.
"""
return self._Customer
def setCustomer(self, value):
if self._Customer is not None:
filtered = [x for x in self.Customer.CustomerAgreements if x != self]
self._Customer._CustomerAgreements = filtered
self._Customer = value
if self._Customer is not None:
if self not in self._Customer._CustomerAgreements:
self._Customer._CustomerAgreements.append(self)
Customer = property(getCustomer, setCustomer)
def getEndDeviceControls(self):
"""Could be deprecated in the future.
"""
return self._EndDeviceControls
def setEndDeviceControls(self, value):
for x in self._EndDeviceControls:
x.CustomerAgreement = None
for y in value:
y._CustomerAgreement = self
self._EndDeviceControls = value
EndDeviceControls = property(getEndDeviceControls, setEndDeviceControls)
def addEndDeviceControls(self, *EndDeviceControls):
for obj in EndDeviceControls:
obj.CustomerAgreement = self
def removeEndDeviceControls(self, *EndDeviceControls):
for obj in EndDeviceControls:
obj.CustomerAgreement = None
|
import nltk
def parse(sent, grammar):
gr = nltk.cfg.parse_cfg(grammar)
parser = nltk.ChartParser(gr, nltk.parse.TD_STRATEGY)
trees = parser.nbest_parse(sent.split())
nltk.draw.draw_trees(*trees)
grammar = """
S -> NP VP
VP -> V NP | VP PP
NP -> Det N | NP PP
PP -> P NP
NP -> 'I'
Det -> 'the' | 'a' | 'my'
N -> 'elephant' | 'pajamas' | 'man' | 'park' | 'telescope'
V -> 'shot' | 'saw'
P -> 'in' | 'on' | 'with'
"""
sent = 'I shot the elephant in my pajamas'
parse(sent, grammar)
|
import unittest
import sys
import pytest
import mock
from niworkflows.viz.utils import save_html
class TestUtils(unittest.TestCase):
@mock.patch('jinja2.Environment')
@mock.patch('niworkflows.common.report.open', mock.mock_open(), create=True)
@pytest.mark.skipif(reason='this test always fails, mock not working OK')
def test_save_html(self, jinja_mock):
template_mock= mock.MagicMock()
jinja_mock.return_value.get_template.return_value = template_mock
unique_string = 'unique string'
html = 'some html'
report_file_name = 'report file name'
save_html(template='overlay_3d_report.tpl',
report_file_name=report_file_name,
unique_string=unique_string,
another_keyword=html)
template_mock.render.assert_called_once_with({'unique_string': unique_string,
'another_keyword': html})
|
"""
A structural thing.
"""
from ._structure import Structure
from .property import Property
__all__ = [
"Structure",
"Property",
]
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TemplateJobInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'application_name': 'str',
'template_id': 'str',
'project_name': 'str',
'repo_type': 'int',
'properties': 'object',
'template_dependencies': 'list[str]',
'repo_info': 'RepositoryInfo'
}
attribute_map = {
'application_name': 'application_name',
'template_id': 'template_id',
'project_name': 'project_name',
'repo_type': 'repo_type',
'properties': 'properties',
'template_dependencies': 'template_dependencies',
'repo_info': 'repo_info'
}
def __init__(self, application_name=None, template_id=None, project_name=None, repo_type=None, properties=None, template_dependencies=None, repo_info=None):
"""TemplateJobInfo - a model defined in huaweicloud sdk"""
self._application_name = None
self._template_id = None
self._project_name = None
self._repo_type = None
self._properties = None
self._template_dependencies = None
self._repo_info = None
self.discriminator = None
if application_name is not None:
self.application_name = application_name
self.template_id = template_id
if project_name is not None:
self.project_name = project_name
if repo_type is not None:
self.repo_type = repo_type
if properties is not None:
self.properties = properties
if template_dependencies is not None:
self.template_dependencies = template_dependencies
if repo_info is not None:
self.repo_info = repo_info
@property
def application_name(self):
"""Gets the application_name of this TemplateJobInfo.
应用名称
:return: The application_name of this TemplateJobInfo.
:rtype: str
"""
return self._application_name
@application_name.setter
def application_name(self, application_name):
"""Sets the application_name of this TemplateJobInfo.
应用名称
:param application_name: The application_name of this TemplateJobInfo.
:type: str
"""
self._application_name = application_name
@property
def template_id(self):
"""Gets the template_id of this TemplateJobInfo.
任务依赖的模板id
:return: The template_id of this TemplateJobInfo.
:rtype: str
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this TemplateJobInfo.
任务依赖的模板id
:param template_id: The template_id of this TemplateJobInfo.
:type: str
"""
self._template_id = template_id
@property
def project_name(self):
"""Gets the project_name of this TemplateJobInfo.
应用名称
:return: The project_name of this TemplateJobInfo.
:rtype: str
"""
return self._project_name
@project_name.setter
def project_name(self, project_name):
"""Sets the project_name of this TemplateJobInfo.
应用名称
:param project_name: The project_name of this TemplateJobInfo.
:type: str
"""
self._project_name = project_name
@property
def repo_type(self):
"""Gets the repo_type of this TemplateJobInfo.
应用代码生成后的地址类型,目前支持0:codehub地址和1:压缩包下载地址
:return: The repo_type of this TemplateJobInfo.
:rtype: int
"""
return self._repo_type
@repo_type.setter
def repo_type(self, repo_type):
"""Sets the repo_type of this TemplateJobInfo.
应用代码生成后的地址类型,目前支持0:codehub地址和1:压缩包下载地址
:param repo_type: The repo_type of this TemplateJobInfo.
:type: int
"""
self._repo_type = repo_type
@property
def properties(self):
"""Gets the properties of this TemplateJobInfo.
应用的动态参数json
:return: The properties of this TemplateJobInfo.
:rtype: object
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this TemplateJobInfo.
应用的动态参数json
:param properties: The properties of this TemplateJobInfo.
:type: object
"""
self._properties = properties
@property
def template_dependencies(self):
"""Gets the template_dependencies of this TemplateJobInfo.
模板 dependency ID 集合
:return: The template_dependencies of this TemplateJobInfo.
:rtype: list[str]
"""
return self._template_dependencies
@template_dependencies.setter
def template_dependencies(self, template_dependencies):
"""Sets the template_dependencies of this TemplateJobInfo.
模板 dependency ID 集合
:param template_dependencies: The template_dependencies of this TemplateJobInfo.
:type: list[str]
"""
self._template_dependencies = template_dependencies
@property
def repo_info(self):
"""Gets the repo_info of this TemplateJobInfo.
:return: The repo_info of this TemplateJobInfo.
:rtype: RepositoryInfo
"""
return self._repo_info
@repo_info.setter
def repo_info(self, repo_info):
"""Sets the repo_info of this TemplateJobInfo.
:param repo_info: The repo_info of this TemplateJobInfo.
:type: RepositoryInfo
"""
self._repo_info = repo_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateJobInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""Tests for autocsr."""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 17:02:19 2018
@author: Stamatis Lefkimmiatis
@email : s.lefkimmatis@skoltech.ru
"""
import torch as th
from pydl import utils
def dct(tensor):
r"""Initializes the input tensor with weights from the dct basis or dictionary."""
assert(tensor.ndimension() == 4),"A 4D tensor is expected."
output_features,input_channels,H,W = tensor.shape
if H*W*input_channels == output_features+1:
tensor.data.copy_(utils.gen_dct3_kernel(tensor.shape[1:]).type_as(tensor)[1:,...])
else:
if input_channels == 1:
weights = utils.odctndict((H,W),output_features+1)
else:
weights = utils.odctndict((H,W,input_channels),output_features+1)
weights = weights[:,1:output_features+1].type_as(tensor).view(H,W,input_channels,output_features)
weights = weights.permute(3,2,0,1)
tensor.data.copy_(weights)
def dctMultiWiener(tensor):
r"""Initializes the input tensor with weights from the dct basis or dictionary."""
assert(tensor.dim() in (4,5)),"A 4D or 5D tensor is expected."
if tensor.dim() == 4:
output_features,input_channels,H,W = tensor.shape
else:
numFilters,output_features,input_channels,H,W = tensor.shape
if H*W == output_features+1:
weights = utils.gen_dct2_kernel((H,W)).type_as(tensor)[1:,...]
if tensor.dim() == 4:
weights = weights.repeat(1,input_channels,1,1)
else:
weights = weights.unsqueeze_(0).repeat(numFilters,1,input_channels,1,1)
else:
if input_channels == 1:
weights = utils.odctndict((H,W),output_features+1)
else:
weights = utils.odctndict((H,W,input_channels),output_features+1)
weights = weights[:,1:output_features+1].type_as(tensor).view(H,W,input_channels,output_features)
weights = weights.permute(3,2,0,1)
if tensor.dim() == 5:
weights = weights.unsqueeze_(0).repeat(numFilters,1,1,1,1)
tensor.data.copy_(weights)
def rbf_lut(centers,sigma,start,end,step):
r"""Computes necessary data for the Look-up table of rbf computation."""
data_samples = th.arange(start,end,step).type_as(centers) # change from range
data_samples = data_samples.unsqueeze(1)
data_samples = (data_samples - centers)/sigma
return data_samples
def msra(tensor):
r"""Initializes the input tensor with weights according to He initialization."""
output_channels,input_channels,H,W = tensor.shape
tensor.data.copy_(th.randn_like(tensor).\
mul(th.sqrt(th.Tensor([2])).type_as(tensor).div(H*W*input_channels)))
def convWeights(tensor,init_type = 'dct'):
if init_type == 'dct':
dct(tensor)
elif init_type == 'msra':
msra(tensor)
else:
raise NotImplementedError
|
from unittest import TestCase
from attendance.Api import Api
from attendance.Objects import AttendanceObject
class TestAttendanceObject(TestCase):
def test_api(self):
api = Api()
attendance = AttendanceObject(api)
self.assertEqual(api, attendance.api)
api2 = Api()
attendance.api = api2
self.assertEqual(api2, attendance.api)
|
from pathlib import Path
# import shutil
import subprocess
from build_html import get_solution_details
if __name__ == '__main__':
for path in Path('solutions').glob('**/*.asm'):
details = get_solution_details(path)
size = details['size']
speed = details['speed']
dst = path.parent / f'size-{size}_speed-{speed}.asm'
# shutil.move(path, dst)
subprocess.call(['git', 'mv', path, dst])
subprocess.call(['git', 'rm', path])
|
from itertools import groupby
import requests, bs4
import re
import pandas as pd
import datetime
from datetime import datetime,timedelta
start_date = '2020.01.01' #開始日期
end_date = '2022.02.13' #結束日期
start = datetime.strptime(str(start_date),"%Y.%m.%d")
end = datetime.strptime(str(end_date),"%Y.%m.%d")
new_start_date = start.strftime("%Y-%m-%d")
new_end_date = end.strftime("%Y-%m-%d")
idx = pd.date_range(f'{new_start_date}',f'{new_end_date}',)
csv_date = []
csv_local = []
##方法一
for page in range(1,1500):
url = f'https://www.cdc.gov.tw/Bulletin/List/MmgtpeidAR5Ooai4-fgHzQ?page={page}&startTime={start_date}&endTime={end_date}'
htmlfile = requests.get(url)
objSoup = bs4.BeautifulSoup(htmlfile.text, 'lxml')
objTag = objSoup.select('.cbp-item') #找到.cbp-item這個class
#print("objTag串列長度 = ", len(objTag))
if len(objTag) != 0: #若selecet回傳的串列不為空則繼續循環(因為不知道會有多少分頁,所以預設了1500頁,如果回傳為空就代表沒有那頁,就結束循環)
for i in range(len(objTag)):
covid = objTag[i].find_all('p', {'class':'JQdotdotdot'}) #新聞標題(本日新增xx例本土,xx例境外)
covid_year = objTag[i].find_all('p',{'class':'icon-year'}) #新聞的年月(2022 - 1)
covid_date = objTag[i].find_all('p',{'class':'icon-date'}) #新聞的日(12)
for j in range(len(covid)):
covid_text = str(covid[j].text) #將新聞標題文字轉為string
#找出本土的例子
pattern = r'(\d+)例((COVID-19)?)本土'
result = re.search(pattern, covid_text)
real_date = str(covid_year[j].text + '-' + covid_date[j].text).replace(' - ','-') #整理日期格式
#print(real_date)
if result != None: #如果有本土案例
if ('新增' in covid_text) and ('例本土' in covid_text) and ('境外' in covid_text):
csv_date.append(real_date)
pattern1 = r'(\d+)例本土'
result1 = re.search(pattern1, covid_text)
local_num = str(result1.group().replace(r'例本土',''))
print(local_num)
csv_local.append(int(local_num))
elif ('新增' in covid_text) and ('例COVID-19本土' in covid_text) and ('境外' in covid_text):
csv_date.append(real_date)
pattern2 = r'(\d+)例COVID-19本土'
result2 = re.search(pattern2, covid_text)
local_num = str(result2.group().replace(r'例COVID-19本土',''))
print(local_num)
csv_local.append(int(local_num))
elif ('新增' in covid_text) and ('本土' in covid_text) and ('境外' not in covid_text):
csv_date.append(real_date)
pattern3 = r'(\d+)例'
result3 = re.search(pattern3, covid_text)
local_num = str(result3.group().replace(r'例',''))
print(local_num)
csv_local.append(int(local_num))
else:
continue
else:
break
# ##方法二
# for page in range(1,1500):
# url = f'https://www.cdc.gov.tw/Bulletin/List/MmgtpeidAR5Ooai4-fgHzQ?page={page}&startTime={start_date}&endTime={end_date}'
# htmlfile = requests.get(url)
# objSoup = bs4.BeautifulSoup(htmlfile.text, 'lxml')
# objTag = objSoup.select('.cbp-item') #找到.cbp-item這個class
# #print("objTag串列長度 = ", len(objTag))
# if len(objTag) != 0: #若selecet回傳的串列不為空則繼續循環(因為不知道會有多少分頁,所以與設了1500頁,如果回傳為空就代表沒有那頁,就結束循環)
# for i in range(len(objTag)):
# covid = objTag[i].find_all('p', {'class':'JQdotdotdot'}) #新聞標題(本日新增xx例本土,xx例境外)
# covid_year = objTag[i].find_all('p',{'class':'icon-year'}) #新聞的年月(2022 - 1)
# covid_date = objTag[i].find_all('p',{'class':'icon-date'}) #新聞的日(12)
# for j in range(len(covid)):
# covid_text = str(covid[j].text) #將新聞標題文字轉為string
# if ('新增' in covid_text) and ('本土' in covid_text) and ('境外' in covid_text):
# pattern = r'(\d+)例本土'
# result = re.search(pattern, covid_text)
# real_date = str(covid_year[j].text + '-' + covid_date[j].text).replace(' - ','-') #整理日期格式
# if result != None: #如果格式為'(\d+)例本土'
# csv_date.append(real_date)
# local_num = str(result.group().replace(r'例本土',''))
# print(local_num)
# csv_local.append(int(local_num))
# else:
# csv_date.append(real_date)
# pattern = r'(\d+)例COVID-19本土'
# result = re.search(pattern, covid_text)
# local_num = str(result.group().replace(r'例COVID-19本土',''))
# print(local_num)
# csv_local.append(int(local_num))
# elif ('新增' in covid_text) and ('本土' in covid_text) and ('境外' not in covid_text):
# pattern = r'(\d+)例'
# result = re.search(pattern, covid_text)
# real_date = str(covid_year[j].text + '-' + covid_date[j].text).replace(' - ','-') #整理日期格式
# if result != None: #如果有本土案例
# csv_date.append(real_date)
# csv_local.append(int(str(result.group().replace('例',''))))
# else:
# continue
# else:
# continue
# else:
# break #(因為不知道會有多少分頁,所以與設了1500頁,如果回傳為空就代表沒有那頁,就結束循環)
# print(len(csv_date))
# print(len(csv_local))
data = [csv_date, csv_local]
col = ['日期','本土確診人數']
df = pd.DataFrame(list(zip(*data)),columns=col)
df_new1 = df.groupby(['日期'], sort=False)['本土確診人數'].sum().reset_index() #如果同一天有兩條新增本土案例的新聞,則相加
df_new2 = df_new1.set_index(pd.to_datetime(df_new1['日期']))
#print(df_new2)
df_new = df_new2.reindex(idx, fill_value=0)
df_final = df_new['本土確診人數']
#print(df_final)
df_final.to_csv(f'{new_start_date}_{new_end_date}本土確診人數.csv')
|
#!/usr/bin/env python3.7
#
# File: sendpkts.py
#
# Description : flooder script
# Created by : Quinn Burke (qkb5007@psu.edu)
# Date : November 2019
# Last Modified : November 2019
### Imports ###
from random import randint, shuffle
# from scapy.all import *
import time
import threading
import sys
import numpy as np
import matplotlib.pyplot as plt
### Classes ###
class Flow():
def __init__(self, bytes_left=0, rate=0, duration=0, src_ip=None):
self.bytes_left = bytes_left
self.rate = rate
self.duration = duration
self.src_ip = src_ip
### Functions ###
def generateFlows(_sim_time, _client_rate):
num_flows = _sim_time * _client_rate
# See https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.pareto.html for generating samples with numpy pareto; specifically, using the formula below to obtain classical pareto from the pareto2/lomax dist
# generate sizes
xm1 = 10.0 # scale
a1 = 1.2 # shape
sizes = sorted((np.random.pareto(a1, num_flows) + 1) * xm1)
# generate durations
xm2 = 0.001
a2 = 1.5
durs = sorted((np.random.pareto(a2, num_flows) + 1) * xm2)
# sort/match to create flows
flows = [None]*num_flows
used_ips = list()
for i in range(num_flows):
src_ip = "10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(randint(12, 99)))
while src_ip in used_ips:
src_ip = "10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(randint(12, 99)))
used_ips.append(src_ip)
flows[i] = Flow(sizes[i], sizes[i]/durs[i], durs[i], src_ip)
gr = plt.GridSpec(1, 7, wspace=0.4, hspace=0.3)
cdf_sz = calc_cdf_fast(sizes)
plt.subplot(gr[:, :3])
plt.xlabel('Flow size (B)')
plt.ylabel('Cumulative Probability')
plt.title('Flow Sizes')
plt.plot(sizes, cdf_sz, color='green')
cdf_durs = calc_cdf_fast(durs)
plt.subplot(gr[:, 4:])
plt.xlabel('Durations (s)')
plt.ylabel('Cumulative Probability')
plt.title('Flow Durations')
plt.plot(durs, cdf_durs, color='red')
plt.show()
return flows
def calc_cdf_fast(arr):
cdf = []
for val in arr:
count = 0
for other_val in arr:
if other_val <= val:
count += 1
cdf.append(float(count*1.0/len(arr)))
return cdf
# def flood(_flows, flooder_idx):
def flood(_sim_time, _client_rate, _flows, _collection_interval):
# print("flows: ", _flows, "\nflooder_idx: ", flooder_idx)
# for i in range(1000):
# # print("[Flooder %d]: sending packets toward target 10.0.0.100..." %
# # flooder_idx)
# print("[Flooder]: sending packets toward target 10.0.0.100...")
# # pkt = IP(src="10.%s.%s.%s" % (str(randint(0, 255)), str(
# # randint(0, 255)), str(randint(12, 99))), dst="10.0.0.100")/ICMP()
# pkt = IP(src="10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(
# randint(12, 99))), dst="10.0.0.100")/ICMP()/Raw(load='0'*1472) # max load is size 1472 because of 28 bytes ICMP packet, so everything can fit into 1500 MTU
# print(pkt.summary(), "(len: ", len(pkt), ")")
# # send(pkt, count=10, inter=0.1, iface="uph-eth0")
# send(pkt, count=1000000, iface="uph-eth0")
active_flows = list()
shuffle(_flows) # shuffle them randomly
for i in range(_sim_time):
# add new flows to active_flows list
for i in range(_client_rate):
# do this before updating existing flows (as opposed to simulator's order) so we dont have to update separately
# add _client_rate flows to the active list so we can update activity below
active_flows.add(_flows.pop())
# update existing flows
total_send_bytes = 0
for flow in active_flows:
if flow.duration == 0: # from ~line 563 in Simulator.java
active_flows.remove(flow) # just remove (removed first in the simulator but we do it here)
elif flow.duration <= (1.0/_collection_interval):
total_send_bytes += flow.bytes_left # dump rest of bytes
flow.bytes_left = 0 # update these to get removed next iteration
flow.duration = 0
elif flow.duration > (1.0/_collection_interval):
if flow.bytes_left == 0: # line 617 (constant average rate)
active_flows.remove(flow)
elif flow.bytes_left <= flow.rate:
total_send_bytes += flow.bytes_left # dump rest of bytes
flow.bytes_left = 0 # update these to get removed next iteration
flow.duration = 0
elif flow.bytes_left > flow.rate:
total_send_bytes += flow.rate # dump rest of bytes
flow.bytes_left -= flow.rate
flow.duration -= (1.0/_collection_interval) # 1s collection interv granularity currently
else
active_flows.remove(flow) # just remove (?)
else:
active_flows.remove(flow) # just remove (?)
# send the flows toward the edge switch (ups) connecting to the servers (h1-h10)
# do we want to update the flows then send, or do we want to update the flows and send at the same time above? We want to send with respect to each source so aggregating them here with total_send_bytes may not be the correct way;
print("[Flooder]: sending packets toward target 10.0.0.100...")
pkt = IP(src="10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(
randint(12, 99))), dst="10.0.0.100")/ICMP()/Raw(load='0'*1472) # 1500 byte MTU
print(pkt.summary(), "(len: ", len(pkt), ")")
# send(pkt, count=10, inter=0.1, iface="uph-eth0")
send(pkt, count=1000000, iface="uph-eth0")
time.sleep(1)
if __name__ == '__main__':
# start = time.time_ns()
# f = [0]*1800000
# for i in range(1800000):
# f[i] = i
# print("elapsed: ", str((time.time_ns()-start)/1e9), "s")
# print("len: ", str(len(f)))
# # print("f: ", f)
# exit(0)
# Note: only one server known to flooders "10.0.0.100"
sim_time = 180 # in seconds
client_rate = 10 # new incoming flows per second
collection_interval = 1.0
flows = generateFlows(sim_time, client_rate)
# num_flooders = 1
# nf = len(flows)/num_flooders # evenly divide flows for each flooder
# flooders = [None]*num_flooders
# for i in range(num_flooders):
# flooders[i] = threading.Thread(
# target=flood, args=(flows[int(i*nf):int((i+1)*nf)], i))
# flooders[i].start()
# # wait for flooders to finish
# for i in range(num_flooders):
# flooders[i].join()
flood(sim_time, client_rate, flows, collection_interval)
|
import re
import sys
import argparse
import os
import json
from pathlib import Path
from datetime import datetime
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import pandas as pd
cwd = os.getcwd()
sys.path.insert(1, cwd + '/src')
import PcapUtils
import MUDGenUtils
#from Analyses import *
from Constants import *
from MRTAFeed import MRTFeed
from MRTADashboard import MRTADashboard
from VMUDEnforcer import Virtual_MUD_enforcer
from MRTACharacterizator import MRTACharacterizator
from MRTAPairClustersProcessor import MRTAPairClustersProcessor
import device_mrt_pcaps_to_csv as mrttocsv
"""
MUD Resources :) @:
- NIST https://www.nccoe.nist.gov/projects/building-blocks/mitigating-iot-based-ddos/securing-home-iot-devices
- NIST https://github.com/usnistgov/MUD-PD
- https://github.com/iot-onboarding/mud-visualizer
NOTE: MUD resources and interest have increased a whole lot since February 2021!
- NIST https://www.nccoe.nist.gov/content/mud-related-resources
"""
""" Project TODO's
- TODO: (adj 26/7/21)
* Compact pcap-to-csv bidir flows tool, to have it scale on whatever pcap>CSV to feed MRTA
* DONE - Code MRTA module taking logic from colab file morgese-master-thesis-main.ipynb (https://colab.research.google.com/drive/1tLYoY6I0XJq5vSUoMhRG-UuT0J8HVe9a?usp=sharing)
- TODO: (adj 27/7/21)
* Flexibilize CSV/Dataframe column names as Constant values (e.g., [TS, TE, SA] instead of ['ts', 'te', 'sa', ...])
- TODO: (adj 28/7/21)
* Validate HDBSCAN parameters with AMI-precision over different batches of various dimensions, report average for selected params.
* Implement eventualities-robust OpenFlow rules generation from MUD profile, not to rely on MUDGee
* INVESTIGATE MUD-PD https://github.com/usnistgov/MUD-PD (installed in ~/Software, import pcaps crashes as of now...)
- TODO TODO TODO (adj 23/9/21)
* Flexibilize CSV generation from single pcaps in a folder.
- TODO TODO TODO TODO (adj 14/10/21)
* INSTEAD OF FILTERING PACKETS AND MATCHING AGAINST FLOWS, FIRST CONVERT PCAP TO FILTER INTO FLOWS, AND THEN FILTER IT
* In other words, switch the pcaps-to-flows-csv procedure one step eariler, and directly output MRT Flow CSVs
Chair Andrea Continella
Committee member: Roland and Andreas
External: Roland
Other: Thijs, Tim
0031 534893250 Sanne
"""
def main(arguments=None):
################################################################################################
# Arguments definition
################################################################################################
parser = argparse.ArgumentParser(description='TBD')
# Mode can be "mudgen", "reject", "analyze"
modes_list = [MODE_MUDGEN, MODE_REJECT, MODE_FLOWS_GENERATION, MODE_ANALYZE]
parser.add_argument('--mode', metavar='<mode of program execution>',
help=f"One of: [ {modes_list} ] .\n Consult documentation on github for detailed usage explanation!",
required=True)
# If mode is "mudgen", a MUD config file to feed MUDgee is needed
parser.add_argument('--mudgen_config', metavar='<JSON file for mudgee MUD generation>', help='name of JSON file for mudgee MUD generation', required=False)
# If mode is "reject",
# - the config file for the referred device and gateway information, and
# - the relative path to MUD (OpenFLow) rules in CSV
# must be specified
parser.add_argument('--reject_config', metavar='<JSON file of MUD config for filtering data>', help='name of MUD config JSON for specified MUD to enforce.\nRequired if mode is "reject"', required=False)
parser.add_argument('--reject_mud_rules', metavar='<Relative-path of CSV file of filtering rules (only OpenFlow standard supported ATM)>', help='CSV rules file generated by MUDgee, for specific device MUD to enforce.\nRequired if mode is "reject"', required=False)
parser.add_argument('--reject_to_named_dir', metavar='<String>', help='Name of directory that will be generated in outputs/<device>/ where the results of the "reject" operation will be stored.\nThis parameter is optional', required=False)
# Not udsed at the moment
parser.add_argument('--reject_online_interface', metavar='<String>', help='Name of the local interface on which to listen to device traffic."', required=False)
# Optional, if set, limits the number of packets that are processed when rejecting traffic
parser.add_argument('--pcap_limit', metavar='<integer>', help='Number to indicate how many packets will be processed by either functionality', required=False)
# Generation of custom NetFlow CSV file
parser.add_argument('--flowsgen_tgt_dir', metavar='<String>', help='Full or relative path to directory containing MUD-rejected pcap files', required=False)
analysis_actions_list = [ANALYSIS_ACTION_IPS_FLOWS_GRAPHS, ANALYSIS_ACTION_PORTS_FLOWS_GRAPHS, ANALYSIS_ACTION_PKTS_CSV, ANALYSIS_ACTION_IPS_MAP, ANALYSIS_ACTION_FILTER_KNOWN_PROVIDERS, ANALYSIS_ACTION_MRTA_CHARACTERIZE, ANALYSIS_ACTION_DEVICE_MRT_EVOLUTION_DATAGEN]
parser.add_argument('--analysis_action', metavar='<string to perform>', help='Indicates what action to perform in analysis, related to analysis pcap.\nSupported actions are \n{}.'.format(analysis_actions_list), required=False)
parser.add_argument('--analysis_tgt', metavar='<pcap/csv file path>', help='path to file containing MRT-related information. Consult documentation for exhaustive explanation.', required=False)
parser.add_argument('--analysis_capture_metadata', metavar='<file path to json object/dict>', help='Metadata dictionary object describing the capture to analyse. \nIt shall contain at least "device_id" (string), and "deployment_info" (any type as of now) that describes the setting where the device is (e.g., lon, lat, industry_type, ...)', required=False)
parser.add_argument('--analysis_devname', metavar='<name of device>', help='name of the device to which the filtering refers. It is needed to output the analysis results to the right folder [outputs/<devname>].', required=False)
parser.add_argument('--dsr_path', help='Dataset Scaler Reference path. Must be specified to set global scaling parameters when processing MRT flows, for --analysisi_action={}'.format(ANALYSIS_ACTION_MRTA_CHARACTERIZE), required=False)
parser.add_argument('--mrtfeeds_config', metavar='file path', help=f'To use with --mode {MODE_MONITOR}.\nSpecify the configuration monitor file from which mrt feeds to compare (JSON list of dev_metadata + csv feed) are taken.', required=False)
parser.add_argument('--monitor_features', help=f'To use with --mode {MODE_MONITOR}.\nSpecify the MRT feed features to cross-compare on the MRT feeds list specified in --mrtfeeds_config.\nUse format feature1,feature2,... See documentation for the list of supported MRT feed features.', required=False)
#parser.add_argument('--monitor_output', help=f'To use with --mode {MODE_MONITOR}.\nSpecify the path to which the monitor plots output will be exported.', required=False)
################################################################################################
# Arguments parsing
################################################################################################
args = parser.parse_args(arguments)
mode = args.mode
# NOTE: All parameters default to None values if not specified
# mudgen_config specifies gateway MAC, IPv4, IPv6; device MAC, name; PCAP file on which device MUD is generated
mudgen_config = MUD_CONFIGS_FOLDER + args.mudgen_config if args.mudgen_config is not None else None
# relative path to define, allows use of filtering rules from other origins than MUDgee
reject_config = args.reject_config if args.reject_config is not None else None
reject_mud_rules = args.reject_mud_rules if args.reject_mud_rules is not None else None
pcap_limit = int(args.pcap_limit) if (args.pcap_limit is not None and int(args.pcap_limit) > 0) else None
reject_to_named_dir = args.reject_to_named_dir if args.reject_to_named_dir is not None else None
reject_online_interface = args.reject_online_interface if args.reject_online_interface is not None else None
flowsgen_tgt_dir = args.flowsgen_tgt_dir if args.flowsgen_tgt_dir is not None else None
analysis_action = args.analysis_action if args.analysis_action is not None else None
analysis_capture_metadata = CHATACTERIZATION_METADATA_FOLDER + args.analysis_capture_metadata if args.analysis_capture_metadata is not None else None
analysis_tgt = args.analysis_tgt if args.analysis_tgt is not None else None
analysis_devname = args.analysis_devname if args.analysis_devname is not None else None
dsr_path = args.dsr_path if args.dsr_path is not None else None
mrtfeeds_config = args.mrtfeeds_config if args.mrtfeeds_config is not None else None
monitor_features = args.monitor_features if args.monitor_features is not None else None
################################################################################################
# Preliminary files existence checks
################################################################################################
# Manage case if files do not exist
if mudgen_config is not None and not os.path.isfile(mudgen_config):
print('>>> ERROR: Mudgen config [ {} ] does not exist'.format(mudgen_config), file=sys.stderr)
sys.exit(-1)
if reject_config is not None and not (os.path.isfile(reject_config) or os.path.isdir(reject_config)):
print('>>> ERROR: Reject config [ {} ] does not exist'.format(reject_config), file=sys.stderr)
sys.exit(-1)
if analysis_capture_metadata is not None and not os.path.isfile(analysis_capture_metadata):
print('>>> ERROR: Analysis characterization metadata [ {} ] does not exist'.format(analysis_capture_metadata), file=sys.stderr)
sys.exit(-1)
if reject_mud_rules is not None and not os.path.isfile(reject_mud_rules):
print('>>> ERROR: Mud filtering rules [ {} ] does not exist'.format(reject_mud_rules), file=sys.stderr)
sys.exit(-1)
if analysis_tgt is not None and not (os.path.isfile(analysis_tgt) or os.path.isdir(analysis_tgt)):
print('>>> ERROR: File/directory to analyse [ {} ] does not exist'.format(analysis_tgt), file=sys.stderr)
sys.exit(-1)
if dsr_path is not None and not os.path.isfile(dsr_path):
print('>>> ERROR: Dataset scaler reference does not exist at [ {} ]'.format(dsr_path), file=sys.stderr)
sys.exit(-1)
if mrtfeeds_config is not None and not os.path.isfile(mrtfeeds_config):
print('>>> ERROR: MRT feeds config [ {} ] does not exist'.format(mrtfeeds_config), file=sys.stderr)
sys.exit(-1)
# NOTE: Modes in if-elif-else as mutually exclusive
################################################################################################
# MODE MUDGEN
################################################################################################
# Create MUD config file to feed MUDgee
if mode == MODE_MUDGEN:
if mudgen_config is not None:
# Get info from MUD config file
print('>>> MUDGEN CONFIG FILE: {}'.format(mudgen_config))
with open(mudgen_config) as mg_cf:
mg_data = json.load(mg_cf)
device_name = mg_data['deviceConfig']['deviceName']
# Run mudgee
mudgee_gen_outcome = MUDGenUtils.run_mudgee(mudgen_config)
print('>>> MUD data to generate with MUDgee from info in config file {}'.format(mudgen_config))
else:
# Get info from MUD config file
print('>>> MUDGEN CONFIG FILE: {}'.format(MUD_DEFAULT_CONFIG_FILE))
with open(MUD_DEFAULT_CONFIG_FILE) as mg_cf:
mg_data = json.load(mg_cf)
device_name = mg_data['deviceConfig']['deviceName']
# Run mudgee
mudgee_gen_outcome = MUDGenUtils.run_mudgee(MUD_DEFAULT_CONFIG_FILE)
print('>>> MUD config file not provided for "mudgen". Defaulting on {}'.format(MUD_DEFAULT_CONFIG_FILE))
if mudgee_gen_outcome == 0:
print('>>> MUD data output in result/{}'.format(device_name))
else:
print('>>> ERROR: Some error occurred in generating MUD data.')
################################################################################################
# MODE REJECT
################################################################################################
elif mode == MODE_REJECT:
# Check all parameters entered
if reject_mud_rules is None:
print('>>> Parameter missing: --reject_mud_rules.')
sys.exit(-1)
# Check if MUD rules exist
if not os.path.isfile(reject_mud_rules):
print('>>> MUD-derived (OpenFlow) rules CSV file <{}> not found.'.format(reject_mud_rules))
sys.exit(-1)
if reject_config is None and reject_online_interface is None:
print('>>> Filtering modality not specified. Provide either of the parameters: --reject_config, --reject_online_interface')
sys.exit(-1)
# Get useful data
if reject_config is not None: # RUN IN PCAP
if os.path.isfile(reject_config):
with open(reject_config) as mc_data:
data = json.load(mc_data)
dev_name = data['deviceConfig']['deviceName']
dev_mac = data['deviceConfig']['device']
gw_mac = data['defaultGatewayConfig']['macAddress']
reject_pcap = data['filterPcapLocation']
# Check if pcap to process exists
if reject_pcap is not None and not os.path.isfile(reject_pcap):
print('>>> "{}" does not exist. Check --reject_config file key-values {} \n>>> (if null: are you trying to use a MUDgee config file?) '.format(reject_pcap, json.dumps(data, indent=4)), file=sys.stderr)
sys.exit(-1)
v_mud_enf = Virtual_MUD_enforcer(dev_mac, dev_name, gw_mac, reject_mud_rules)
# Run virtual MUD enforcer on pcap, for given
v_mud_enf.enforce_in_pcap(reject_pcap, pcap_limit, save_json=True, named_dir=reject_to_named_dir)
# If --reject_config parameter is a directory, iterate over all pcaps in directory and filter according to other parameters
elif os.path.isdir(reject_config):
config_folder_base = os.path.basename(os.path.normpath(reject_config))
print('>>> IN-CONFIG FOLDER: {}'.format(config_folder_base))
tgt_dir = os.fsencode(reject_config)
for file in os.listdir(tgt_dir):
filename = os.fsdecode(file)
if filename.endswith('.json'):
reject_config = os.fsdecode(tgt_dir) + '/' + filename
print('>>>>>>>>>>>>>>>>>')
print('######################## Filtering from config: \n{}'.format(reject_config))
print('>>>>>>>>>>>>>>>>>')
if os.path.isfile(reject_config):
with open(reject_config) as mc_data:
data = json.load(mc_data)
dev_name = data['deviceConfig']['deviceName']
dev_mac = data['deviceConfig']['device']
gw_mac = data['defaultGatewayConfig']['macAddress']
reject_pcap = data['filterPcapLocation']
# Check if pcap to process exists
if reject_pcap is not None and not os.path.isfile(reject_pcap):
print('>>> "{}" does not exist. Check --reject_config file key-values {} \n>>> (if null: are you trying to use a MUDgee config file?) '.format(reject_pcap, json.dumps(data, indent=4)), file=sys.stderr)
sys.exit(-1)
v_mud_enf = Virtual_MUD_enforcer(dev_mac, dev_name, gw_mac, reject_mud_rules)
# Run virtual MUD enforcer on pcap, for given
v_mud_enf.enforce_in_pcap(reject_pcap, pcap_limit, named_dir=reject_to_named_dir)
print('<<<<<<<<<<<<<<<<<')
print('######################## Done filtering from config: \n{}'.format(reject_config))
print('<<<<<<<<<<<<<<<<<')
else:
print('>>> Parameter --reject_config does not seem to be a file nor directory. Specify a valid path.\n>>> Parameter given: {}'.format(reject_config))
sys.exit(-1)
if reject_online_interface is not None: # RUN ONLINE LOCALLY
print('>>> Attempt listening on interface [ {} ] to filter traffic as specified by rules at [ {} ]'.format(reject_online_interface, reject_mud_rules))
# Local variable to my mac
dev_mac = '3c:22:fb:97:59:a1'
dev_name = 'macbook-local-test'
gw_mac = '88:36:6c:d7:1c:56'
v_mud_enf = Virtual_MUD_enforcer(dev_mac, dev_name, gw_mac, reject_mud_rules)
v_mud_enf.enforce_online(reject_online_interface)
# ONLINE MODE NOTES
"""
- Listen on determined interface
- Listen for MAC-specific (device-specific) packets
- As a MUD enforcer, it should DROP>STORE non-mud packets, and forward MUD packets
- As a MUD listener, it should just collect the already-filtered packets
"""
################################################################################################
# MODE FLOWS FILE GEN
################################################################################################
elif mode == MODE_FLOWS_GENERATION:
if flowsgen_tgt_dir is None or not os.path.isdir(flowsgen_tgt_dir):
raise ValueError(f">>> ERROR: Null or invalid --flowsgen_tgt_dir argument for mode {MODE_FLOWS_GENERATION}. Please enter a valid path to folder containing pcaps to convert to flows CSV file. Exiting.")
mrttocsv.module_each_pcap_to_complete_csv(flowsgen_tgt_dir)
################################################################################################
# MODE ANALYZE
################################################################################################
elif mode == MODE_ANALYZE:
if analysis_tgt is None or analysis_devname is None:
print('>>> Make sure to provide path to pcap/csv/directory to analyse, via the parameter --analysis_tgt\n>>> Also please specify device name with parameter --analysis_devname, needed to reference output folder for analysis actions.')
sys.exit(-1)
output_folder = OUTPUTS_FOLDER + analysis_devname + '/'
"""
NOTE: Discontinued
if analysis_action == ANALYSIS_ACTION_PKTS_CSV:
csv_gen_res = PcapUtils.get_pcap_csv(analysis_tgt, analysis_devname)
if csv_gen_res != -1:
print('>>> {} has been generated. Please analyse it on Kibana'.format(csv_gen_res))
else:
print('>>> An error occurred trying to generate CSV from pcap file {}'.format(analysis_tgt))
sys.exit(-1)
if analysis_action == ANALYSIS_ACTION_IPS_FLOWS_GRAPHS:
# Analyses functions
display_flows_ips_description_info_graph(analysis_tgt)
if analysis_action == ANALYSIS_ACTION_PORTS_FLOWS_GRAPHS:
display_flows_ports_description_info_graph(analysis_tgt)
if analysis_action == ANALYSIS_ACTION_IPS_MAP:
folium_map(analysis_tgt, analysis_devname)
if analysis_action == ANALYSIS_ACTION_FILTER_KNOWN_PROVIDERS:
ti_register = TIRegister(analysis_tgt, analysis_devname)
ti_register.filter_out_known_backends_pkts_from_pcap()
"""
############################################################################### MRTA CHARACTERIZE
if analysis_action == ANALYSIS_ACTION_MRTA_CHARACTERIZE:
###### Checks
if analysis_capture_metadata is None:
raise ValueError('>>> ERROR: analysis_capture_metadata parameter unspecified. Exiting'.format(mode))
if dsr_path is None:
raise ValueError('>>> ERROR: Dataset Scaler_generator Reference is unspecified. Exiting'.format(mode))
metadata = {}
try:
with open(analysis_capture_metadata) as md:
metadata = json.load(md)
except Exception as e:
print(e)
print('>>> Unable to get analysis capture metadata. A JSON-consistency issue?')
sys.exit(-1)
if metadata['device_id'] is None or metadata['deployment_info'] is None:
print('>>> device_id or deployment_info entries missing in analysis_capture_metadata [ {} ]. Exiting.'.format(analysis_capture_metadata))
sys.exit(-1)
##### Operations
if os.path.isdir(analysis_tgt):
dir = os.fsencode(analysis_tgt)
for data in os.listdir(dir):
data_name = os.fsdecode(data)
if data_name.endswith(CSV_CLEAN_LABEL): # RUNS ON ALL PER-PCAP CSVs, OUTPUTS CORRESPONDING AMOUNT OF CHARACTERIZATION FILES
path_to_file = analysis_tgt + '/' + data_name
mrta_characterizator = MRTACharacterizator(metadata, path_to_file, dsr_path)
mrta_characterizator.input_to_characterization_data()
# Output name, default or specified
now = datetime.now()
dt_string = now.strftime("%Y%m%d_%H-%M-%S")
characterization_name = 'ch_' + dt_string + '_' + analysis_devname + data_name + '.json'
output_path = output_folder + analysis_devname + '_mrt_characterizations/'
Path(output_path).mkdir(parents=True, exist_ok=True)
mrta_characterizator.save_characterization(output_path + characterization_name)
else:
mrta_characterizator = MRTACharacterizator(metadata, analysis_tgt, dsr_path)
mrta_characterizator.input_to_characterization_data()
# Output name, default or specified
now = datetime.now()
dt_string = now.strftime("%Y%m%d_%H-%M-%S")
characterization_name = 'ch_' + dt_string + '_' + analysis_devname + os.path.splitext(analysis_tgt)[0] + '.json'
output_path = output_folder + analysis_devname + '_mrt_characterizations/'
Path(output_path).mkdir(parents=True, exist_ok=True)
mrta_characterizator.save_characterization(output_path + characterization_name)
############################################################################### MRT EVOLUTION DATAGEN
# Given array of characterization file paths, compute two-by-two sequences of transition characterization, and output (produced dataset) to specific folder
if analysis_action == ANALYSIS_ACTION_DEVICE_MRT_EVOLUTION_DATAGEN:
if not os.path.isdir(analysis_tgt):
raise ValueError(f">>> ERROR: In order to run action [ {ANALYSIS_ACTION_DEVICE_MRT_EVOLUTION_DATAGEN} ] --analysis_tgt must be a directory, containing characterization files for a specific device. Exiting.")
analysis_tgt = os.path.abspath(analysis_tgt)
"""
*****************************************************************************************************
* TODO: Move to deticated class/file function
*****************************************************************************************************
"""
# Order files chronologically wrt start date of each characterization, to be then analyzed two-by-two
ordered_characterizations = {}
tgt_dir = os.fsencode(analysis_tgt)
for file in os.listdir(tgt_dir):
filename = os.fsdecode(file)
filename = os.fsdecode(tgt_dir) + filename if os.fsdecode(tgt_dir).endswith('/') else os.fsdecode(tgt_dir) + '/' + filename
if filename.endswith('.json'):
with open(filename, 'r') as file:
try:
f = json.load(file)
start_timestamp = f['metadata']['time_window'][0]
except KeyError as e:
raise ValueError(f">>> ERROR: Unable to fetch time information from characterization file {filename}. Is the JSON format valid?. Exiting.")
start_timestamp = float(datetime.timestamp(datetime.strptime(start_timestamp, STRFTIME_READABLE_FORMAT)))
ordered_characterizations[filename] = start_timestamp
ordered_characterizations = dict(sorted(ordered_characterizations.items(), key=lambda item: item[1]))
#print(ordered_characterizations)
"""
FEDLAB
NOTE : LEGIT ORDER : chrono_ch_files = [k for k in ordered_characterizations.keys()]
NOTE - CURRENTLY TESTING WITH ALPHABETICAL ORDER : `same attacks' scenario
[k for k in sorted(ordered_characterizations.keys(), key=lambda s:s.rsplit('_')[-1])]
NOTE - CURRENTLY TESTING WITH SHUFFLED ORDER : `different attacks' scenario
[k for k in random.sample(ordered_characterizations.keys(), len(ordered_characterizations.keys()))]
"""
chrono_ch_files = [k for k in ordered_characterizations.keys()]
#chrono_ch_files = [k for k in random.sample(ordered_characterizations.keys(), len(ordered_characterizations.keys()))]
for f in chrono_ch_files:
print(f)
#print(chrono_ch_files)
# Produce two-by-two MRT clusters transition data entries
entries_list = []
for ch1, ch2 in zip(chrono_ch_files, chrono_ch_files[1:]):
mrta_pcp = MRTAPairClustersProcessor(ch1, ch2)
mrta_pcp.populate_clusters_shifts_data()
mrta_pcp.set_transition_characterization_data()
transition_df = mrta_pcp.get_transition_characterization_data_df_entry()
mrta_pcp.print_distance_matrix(readable_only=False)
entries_list.append(transition_df)
df = pd.concat(entries_list, ignore_index=True)
#print('CREATED DATASET')
#print(df)
now = datetime.now()
dt_string = now.strftime("%Y%m%d_%H-%M-%S")
clusters_evol_df_name = 'clusters_evols_' + dt_string + '_' + analysis_devname + '.csv'
output_path = output_folder + analysis_devname + '_mrt_transitions_dfs/'
Path(output_path).mkdir(parents=True, exist_ok=True)
df.to_csv(output_path + clusters_evol_df_name)
################################################################################################
# MODE MONITOR
################################################################################################
elif mode == MODE_MONITOR:
""" Generate fluctuation graphs """
# See MRTADashboard:
# Generate MRTFeed objects [CSV feed + metadata per device]
# MRTFeed metric(s) to display
# Save location for graphs and overall data
# TODO/Future work: Specify time window section
# MRT Feeds information and building
if mrtfeeds_config is None:
raise ValueError(f'>>> ERROR: Attempting monitor options without having specified the mrtfeeds_config file. A valid mrtfeeds_config file must be specified in order to compare mrt feeds. Exiting.')
with open(mrtfeeds_config) as mrtf_conf:
mrtf_data = json.load(mrtf_conf)
mrtf_data_list = mrtf_data['list']
mrt_feeds_list = []
for l in mrtf_data_list:
mrt_feeds_list.append(MRTFeed(l['device_metadata'], l['csv_mrt_feed']))
# Features to monitor
monitor_features = monitor_features.split(',')
mrtadb = MRTADashboard()
mrtadb.populate(mrt_feeds_list)
for feature in monitor_features:
mrtadb.plot_monodim_metric(feature)
else:
print('>>> --mode argument "{}" is invalid. Exiting.'.format(mode))
sys.exit(-1)
"""
python run.py --mode analyze --analysis_tgt ./outputs/ieee-ezviz-complete/mirai-httpflooding-all-ezviz-rejected.json --analysis_action ips_flows_graphs --analysis_devname ieee-ezviz
"""
if __name__ == '__main__':
main()
sys.exit(0)
"""
**** DEMO COMMANDS FOR SINGLE-DEVICE FUNCTIONS ****
# Generate MUD profile
$> python3 run.py --mode mudgen --mudgen_config ieee-ezviz-demo-1.json
# Filter traffic off a pcap
$> python3 run.py --mode reject --reject_mud_rules result/ieee-ezviz-demo-1/ieee-ezviz-demo-1rule.csv --reject_config configs/reject_configs/ieee-ezviz-demo-1-A-floods --reject_to_named_dir time_1
$> python3 run.py --mode reject --reject_mud_rules result/ieee-ezviz-demo-1/ieee-ezviz-demo-1rule.csv --reject_config configs/reject_configs/ieee-ezviz-demo-1-B-scans --reject_to_named_dir time_2
# Generate NetFlow CSV
$> python3 run.py --mode flows_gen --flowsgen_tgt_dir outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_time_1
$> python3 run.py --mode flows_gen --flowsgen_tgt_dir outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_time_2
# Cluster flows in CSV and generate characterization file
$> python3 run.py --mode analyze --analysis_devname ieee-ezviz-demo-1 --analysis_action mrta_characterize --analysis_capture_metadata characterization_test.json --analysis_tgt outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_time_1/ieee-ezviz-demo-1_time_1-all-flows-csv/*-CLN.csv
[ieee-ezviz-demo-1_time_1-all-flows-gen-custom-format-CLN.csv]
$> python3 run.py --mode analyze --analysis_devname ieee-ezviz-demo-1 --analysis_action mrta_characterize --analysis_capture_metadata characterization_test.json --analysis_tgt outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_time_2/ieee-ezviz-demo-1_time_2-all-flows-csv/*-CLN.csv
[ieee-ezviz-demo-1_time_2-all-flows-gen-custom-format-CLN.csv]
# Generate the traffic evolution dataframe based on sequential pairwise traffic characterization files
$> python3 run.py --mode analyze --analysis_devname ieee-ezviz-demo-1 --analysis_action device_mrt_evolution_datagen --analysis_tgt outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_mrt_characterizations
**** MACROs reject traffic to characterization file ****
python3 MACRO_rjt_to_ch.py --devname ieee-ezviz-demo-1 --reject_config configs/reject_configs/ieee-ezviz-demo-1-A-mirai-floods --reject_to_named_dir time_1 --flowsgen_tgt_dir outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_time_1 --analysis_capture_metadata characterization_test.json
python3 MACRO_rjt_to_ch.py --devname ieee-ezviz-demo-1 --reject_config configs/reject_configs/ieee-ezviz-demo-1-B-scans --reject_to_named_dir time_2 --flowsgen_tgt_dir outputs/ieee-ezviz-demo-1/ieee-ezviz-demo-1_time_2 --analysis_capture_metadata characterization_test.json
"""
"""
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*
# TODO - CHANGE TO CSV-PER-PCAP APPROACH!!!!!!!!
# DONE >>> IT LOOKS LIKE IT WORKS!!
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*
SPLIT PCAP FILES ON SECONDS: editpcap -i 60 input.pcap output.pcap
https://serverfault.com/questions/131872/how-to-split-a-pcap-file-into-a-set-of-smaller-ones
https://www.wireshark.org/docs/man-pages/editcap.html
DSR PATH EZVIZ: '/Users/lucamrgs/Big_Data/IEEE-Huy-Kang/dataset_scaler_gen_reference.csv'
$> python3 run.py --mode mudgen --mudgen_config <file>.json
$> python3 src/generate_rjt_configs.py --tgt_dir <full path to dir with pcaps to reject from> --devname <devname> --dev_mac <dev mac> --gw_mac <> --gw_ip4 <> [--gw_ip6 <>]
$> python3 run.py --mode reject --reject_mud_rules result/<device-id>/<device-id>rule.csv --reject_config path/to/<name of generated rjt folder>
$> python3 run.py --mode flows_gen --flowsgen_tgt_dir outputs/<device-id>[/rjt pcaps folder]
$> python3 run.py --mode analyze --analysis_devname <device-id> --analysis_action mrta_characterize --dsr_path <path to dataset scaling generation reference csv> --analysis_capture_metadata <metadata-filename>.json --analysis_tgt outputs/<device-id>/<flows CSV folder>
$> python3 run.py --mode analyze --analysis_devname <device-id> --analysis_action device_mrt_evolution_datagen --analysis_tgt outputs/<device-id>/<mrt characterizations folder>
"""
|
""" ExportContainer_Dialog module. """
# ISC License
#
# Copyright (c) 2020–2022, Paul Wilhelm, M. Sc. <anfrage@paulwilhelm.de>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import annotations
from PyQt5.QtWidgets import QCheckBox
from magneticalc.QtWidgets2.QLabel2 import QLabel2
from magneticalc.QtWidgets2.QDialog2 import QDialog2
from magneticalc.QtWidgets2.QSaveAction import QSaveAction
from magneticalc.API import API
from magneticalc.Debug import Debug
from magneticalc.Field_Types import Field_Types_Names_Map, Field_Types_Abbreviations_Map, field_name_to_type
from magneticalc.Theme import Theme
class ExportContainer_Dialog(QDialog2):
""" ExportContainer_Dialog class. """
def __init__(
self,
gui: GUI # type: ignore
) -> None:
"""
Initializes the dialog.
@param gui: GUI
"""
QDialog2.__init__(self, title="Export Container", width=500)
Debug(self, ": Init", init=True)
self.gui = gui
self.addWidget(QLabel2("Please select items for export", bold=True, color=Theme.MainColor))
self.addSpacing(8)
self.addWidget(QLabel2(
"Fields must have been calculated before they can be exported.", italic=True, color=Theme.LiteColor
))
self.addSpacing(16)
self.Export_Names_Available_Map = {
"Wire Points" : self.gui.model.wire.valid,
"Wire Current" : self.gui.model.wire.valid
}
self.Export_Names_Available_Map.update({
field_name: self.gui.model.get_valid_field(field_type) is not None
for field_type, field_name in Field_Types_Names_Map.items()
})
self.checkboxes = {}
for i, item in enumerate(self.Export_Names_Available_Map.items()):
item_name, item_available = item
self.checkboxes[item_name] = QCheckBox(" " + item_name + ("" if item_available else " (not calculated)"))
self.checkboxes[item_name].setEnabled(item_available)
self.checkboxes[item_name].setChecked(item_available)
self.addWidget(self.checkboxes[item_name])
self.addSpacing(16)
buttons = self.addButtons({
"Cancel" : ("fa.close", self.reject),
"Save Container …" : ("fa.save", self.export)
})
buttons[1].setFocus()
def export(self) -> None:
"""
Exports wire points, current and fields to some HDF5 container file.
"""
Debug(self, ".export()")
Export_Names_Selection_Map = {
item_name: self.checkboxes[item_name].isChecked()
for item_name, item_available in self.Export_Names_Available_Map.items()
}
Export_Names_Abbreviations_Map = {
"Wire Current" : "Current",
"Wire Points" : "Wire"
}
Export_Names_Abbreviations_Map.update({
field_name: Field_Types_Abbreviations_Map[field_type]
for field_type, field_name in Field_Types_Names_Map.items()
})
export_abbreviations_str = "-".join([
Export_Names_Abbreviations_Map[item_name]
for item_name, item_selected in Export_Names_Selection_Map.items() if item_selected
])
action = QSaveAction(
self.gui,
title="Export Container",
date=True,
filename="MagnetiCalc_Export" + (("_" + export_abbreviations_str) if export_abbreviations_str else ""),
extension=".hdf5",
_filter="HDF5 Container (*.hdf5)"
)
if not action.filename:
self.reject()
return
container = {}
fields = {}
for item_name, item_selected in Export_Names_Selection_Map.items():
if not item_selected:
continue
if item_name == "Wire Points":
wire_points_components = self.gui.model.wire.points_sliced.T
wire_points = dict(zip(["x", "y", "z"], wire_points_components))
container.update({"wire_points": wire_points})
elif item_name == "Wire Current":
wire_current = self.gui.model.wire.dc
container.update({"wire_current": wire_current})
else:
field_type = field_name_to_type(item_name)
field_components = self.gui.model.get_valid_field(field_type).vectors.T
field_abbreviation = Field_Types_Abbreviations_Map[field_type]
fields.update(dict(zip(
[field_abbreviation + "_x", field_abbreviation + "_y", field_abbreviation + "_z"],
field_components
)))
if fields != {}:
fields.update(dict(zip(["nx", "ny", "nz"], self.gui.model.sampling_volume.dimension)))
fields.update(dict(zip(["x", "y", "z"], self.gui.model.sampling_volume.points.T)))
container.update({"fields": fields})
API.export_hdf5(action.filename, container)
self.accept()
|
import os, sys
from datetime import datetime, timedelta
from time import sleep
from timeit import default_timer as timer
from story import Story
from utils import ERROR, SUCCESS, WARNING, CAPTURE_WAIT
from utils import DateIterator, clear_screen, ffi_channel, fmt, fmt_text, force_input, get_lang
def build_paths(session, date_start, date_end):
path_list = []
for _i, day in DateIterator(date_start, date_end, 'Building the path list... %s'):
file_path = Story(session, day).get_path()
if file_path:
path_list.append(file_path)
assert path_list
path_list.append(session.key)
return path_list
def rusty_search(session, date_start, date_end, word): # FFI for giving the searching job to Rust
occurrences = []
list_to_send = build_paths(session, date_start, date_end)
list_to_send.append(word)
count_string, timing = ffi_channel(list_to_send, mode = 1)
print 'Parsing the data stream from Rust...'
for i, string in enumerate(count_string.split(' ')): # spaces in the data stream represent individual files
idx = map(int, string.split(':')) # ... and colons represent the indices where the word has occurred
# Rust fills the indices of the file paths with the number of occurrences
# So, "i" indicates the Nth day from the birthday
if idx[0] > 0:
occurrences.append((i, len(idx), idx))
return occurrences, timing
# NOTE: Exhaustive process (that's why I've written a Rust library for this!)
# The library accelerates the searching time by ~100 times!
def py_search(session, date_start, date_end, word):
occurrences, errors, no_stories, = [], 0, 0
start = timer()
date_iter = DateIterator(date_start, date_end)
for i, day in date_iter:
occurred, story = [], Story(session, day)
try:
if not story.get_path():
no_stories += 1
continue
data = story.decrypt() # AssertionError (if any) is caught here
idx, jump, data_len = 0, len(word), len(data)
# probably an inefficient way to find the word indices
while idx < data_len:
idx = data.find(word, idx)
if idx == -1: break
occurred.append(idx)
idx += jump
except AssertionError:
errors += 1
if errors > 10:
print ERROR, "More than 10 files couldn't be decrypted! Terminating the search..."
return [], (timer() - start)
if occurred and occurred[0] > 0: # "i" indicates the Nth day from the birthday
occurrences.append((i, len(occurred), occurred))
sum_value = sum(map(lambda stuff: stuff[1], occurrences))
date_iter.send_msg('[Found: %d]' % sum_value)
assert no_stories < (i + 1)
return occurrences, (timer() - start)
def find_line_boundary(text, idx, limit, direction_value): # find the closest boundary of text for a given limit
i, num = idx, 0
while text[i + direction_value] not in ('\n', '\t'):
if text[i] == ' ': num += 1
if num == limit: return i
i += direction_value
return i
def mark_text(text, indices, length, color = 'red'): # Mark text and return corrected indices
if sys.platform == 'win32': # damn OS doesn't even support coloring
return text, indices
text = list(text)
formatter = fmt(color), fmt()
lengths = map(len, formatter) # we gotta update the indices when we introduce colored text
i, limit = 0, len(indices)
new_indices = indices[:]
while i < limit:
idx = indices[i]
text[idx] = formatter[0] + text[idx]
text[idx + length - 1] += formatter[1]
new_indices[i] -= lengths[0]
j = i
while j < limit:
new_indices[j] += sum(lengths)
j += 1
i += 1
return ''.join(text), new_indices
def search(session, word = None, lang = None, start = None, end = None, grep = 7):
'''Invokes one of the searching functions and does some useful stuff'''
clear_screen()
now = datetime.now()
def check_date(date):
if date in ['today', 'now', 'end']:
return now
elif date in ['start', 'birthday']:
return session.birthday
try:
return datetime.strptime(date, '%Y-%m-%d')
except (TypeError, ValueError):
return None
sys.stdout.set_mode(1, 0.01)
# Phase 1: Get the user input required for searching through the stories
word = force_input(word, "\nEnter a word: ", ERROR + ' You must enter a word to continue!')
lang = get_lang(lang)
start, end = map(check_date, [start, end])
while not all([start, end]):
try:
print WARNING, 'Enter dates in the form YYYY-MM-DD (Mind you, with hyphen!)\n'
if not start:
lower_bound = session.birthday
start_date = raw_input('Start date (Press [Enter] to begin from the start of your diary): ')
start = datetime.strptime(start_date, '%Y-%m-%d') if start_date else session.birthday
assert (start >= lower_bound and start <= now), 'S'
if not end:
lower_bound = start
end_date = raw_input("End date (Press [Enter] for today's date): ")
end = datetime.strptime(end_date, '%Y-%m-%d') if end_date else now
assert (end > lower_bound and end <= now), 'E'
except AssertionError as msg:
print ERROR, '%s date should be after %s and before %s' % \
(msg, lower_bound.strftime('%b. %d, %Y'), now.strftime('%b. %d, %Y'))
if str(msg) == 'S':
start = None
else:
end = None
except ValueError:
print ERROR, 'Oops! Error in input. Try again...'
# Phase 2: Send the datetimes to the respective searching functions
print "\nSearching your stories for the word '%s'..." % word
search_function = rusty_search if lang == 'r' else py_search
try:
occurrences, timing = search_function(session, start, end, word)
except AssertionError:
print ERROR, 'There are no stories in the given location!'
return
def print_stuff(grep): # function to choose between pretty and ugly printing
sys.stdout.set_mode(0)
results_begin = '\nSearch results from %s to %s:' % (start.strftime('%B %d, %Y'), end.strftime('%B %d, %Y')) + \
"\n\nStories on these days have the word '%s' in them...\n" % word
if grep: # pretty printing the output (at the cost of decrypting time)
try:
timer_start = timer()
print results_begin
for i, (n, word_count, indices) in enumerate(occurrences):
colored = []
date = start + timedelta(n)
content = Story(session, date).decrypt()
numbers = str(i + 1) + '. ' + date.strftime('%B %d, %Y (%A)')
text, indices = mark_text(content, indices, jump) # precisely indicate the word in text
for idx in indices: # find the word occurrences
left_bound = find_line_boundary(text, idx, grep, -1)
right_bound = find_line_boundary(text, idx, grep, 1)
sliced = '\t' + '... ' + text[left_bound:right_bound].strip() + ' ...'
colored.append(sliced)
print numbers, '\n%s' % '\n'.join(colored) # print the numbers along with the word occurrences
timer_stop = timer()
except (KeyboardInterrupt, EOFError):
sleep(CAPTURE_WAIT)
grep = 0 # default back to ugly printing
clear_screen()
print "Yep, it takes time! Let's go back to the good ol' days..."
if not grep: # Yuck, but cleaner way to print the results
sys.stdout.set_mode(0)
print results_begin
for i, (n, word_count, _indices) in enumerate(occurrences):
date = session.birthday + timedelta(n)
numbers = ' ' + str(i + 1) + '. ' + date.strftime('%B %d, %Y (%A)')
spaces = 40 - len(numbers)
print numbers, ' ' * spaces, '[ %s ]' % word_count # print only the datetime and counts in each file
sys.stdout.set_mode(1, 0.015)
msg = fmt_text('Found a total of %d occurrences in %d stories!' % (total_count, num_stories), 'yellow')
print '\n%s %s\n' % (SUCCESS, msg)
print fmt_text(' Time taken for searching: ', 'blue') + \
fmt_text('%s seconds!' % timing, 'green')
if grep:
print fmt_text(' Time taken for pretty printing: ', 'blue') + \
fmt_text('%s seconds!' % (timer_stop - timer_start), 'green')
# Phase 3: Print the results (in a pretty or ugly way) using the giant function below
jump, num_stories = len(word), len(occurrences)
total_count = sum(map(lambda stuff: stuff[1], occurrences))
print SUCCESS, 'Done! Time taken: %s seconds! (%d occurrences in %d stories!)' \
% (timing, total_count, num_stories)
if not total_count:
print ERROR, "Bummer! There are no stories containing '%s'..." % word
return
print_stuff(grep)
# Phase 4: Get the user input and display the stories
while occurrences:
try:
sys.stdout.set_mode(2)
print '\nEnter a number to see the corresponding story...'
print "\r(Enter 'pretty' or 'ugly' to print those search results again, or press [Enter] to exit)"
ch = raw_input('\nInput: ')
if ch == 'pretty':
clear_screen()
print_stuff(grep = 7) # '7' is default, because it looks kinda nice
elif ch == 'ugly':
clear_screen()
print_stuff(grep = 0)
elif not ch:
return
elif int(ch) <= 0:
raise ValueError
else:
n_day, word_count, indices = occurrences[int(ch) - 1]
date = start + timedelta(n_day)
(data, top, bottom) = Story(session, date).view(return_text = True)
sys.stdout.set_mode(3)
print top, mark_text(data, indices, jump, 'skyblue')[0], bottom
except (ValueError, IndexError):
print ERROR, 'Oops! Bad input! Try again...'
|
import datetime
class Stat:
""" VFS stat() result
This differs from the builtin os.stat_result in that:
* It doesn"t include less commonly used attributes like inode number
* It guesses fallbacks, like if mtime is null use ctime, etc.
* Timestamps are datetime objects so they can be timezone aware
* Includes the kind if available (one of "directory", "file", "symlink", "device",
"other", None)
"""
__slots__ = "url", "kind", "size", "atime", "mtime", "ctime", "birthtime", "unix_permissions"
def __init__(
self,
url,
kind=None,
size=None,
atime=None,
mtime=None,
ctime=None,
birthtime=None,
unix_permissions=None
):
self.url = url
kind = kind and kind.lower()
self.kind = kind if kind in ["directory", "file", "symlink", "device"] else None
self.size = size
[atime, mtime, ctime, birthtime] = [
datetime.datetime.fromtimestamp(x) if type(x) in [float, int] else x
for x in [atime, mtime, ctime, birthtime]
]
self.atime = atime or mtime or ctime or birthtime
self.mtime = mtime or ctime or birthtime or atime
self.ctime = ctime or birthtime or mtime or atime
self.birthtime = birthtime or ctime or mtime or atime
self.unix_permissions = unix_permissions
def __repr__(self):
""" Represent a stat() in a more user-friendly text format """
kvstr = ", ".join(f"{k}={repr(getattr(self, k))}" for k in self.__slots__)
return f"Stat({kvstr})"
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import numpy as np
import time
import sys
# This cube was taken from
# http://www.opengl-tutorial.org/beginners-tutorials/tutorial-4-a-colored-cube/
CUBE = [
-1.0,
-1.0,
-1.0, # triangle 1 : begin
-1.0,
-1.0,
1.0,
-1.0,
1.0,
1.0, # triangle 1 : end
1.0,
1.0,
-1.0, # triangle 2 : begin
-1.0,
-1.0,
-1.0,
-1.0,
1.0,
-1.0, # triangle 2 : end
1.0,
-1.0,
1.0,
-1.0,
-1.0,
-1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
-1.0,
1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0,
-1.0,
-1.0,
1.0,
-1.0,
-1.0,
-1.0,
-1.0,
1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
-1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
-1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
1.0,
1.0,
-1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
-1.0,
-1.0,
1.0,
-1.0,
1.0,
1.0,
1.0,
-1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
-1.0,
1.0,
1.0,
1.0,
-1.0,
1.0,
]
def drawCube(xpt, ypt, zpt, size):
# Old method of drawing, each call is executed on the CPU -> slow
glPushMatrix()
glTranslatef(xpt, ypt, zpt)
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glVertex3f(size, size, -size)
# Top Right Of The Quad (Top)
glVertex3f(-size, size, -size)
# Top Left Of The Quad (Top)
glVertex3f(-size, size, size)
# Bottom Left Of The Quad (Top)
glVertex3f(size, size, size)
# Bottom Right Of The Quad (Top)
glVertex3f(size, -size, size)
# Top Right Of The Quad (Bottom)
glVertex3f(-size, -size, size)
# Top Left Of The Quad (Bottom)
glVertex3f(-size, -size, -size)
# Bottom Left Of The Quad (Bottom)
glVertex3f(size, -size, -size)
# Bottom Right Of The Quad (Bottom)
glVertex3f(size, size, size)
# Top Right Of The Quad (Front)
glVertex3f(-size, size, size)
# Top Left Of The Quad (Front)
glVertex3f(-size, -size, size)
# Bottom Left Of The Quad (Front)
glVertex3f(size, -size, size)
# Bottom Right Of The Quad (Front)
glVertex3f(size, -size, -size)
# Bottom Left Of The Quad (Back)
glVertex3f(-size, -size, -size)
# Bottom Right Of The Quad (Back)
glVertex3f(-size, size, -size)
# Top Right Of The Quad (Back)
glVertex3f(size, size, -size)
# Top Left Of The Quad (Back)
glVertex3f(-size, size, size)
# Top Right Of The Quad (Left)
glVertex3f(-size, size, -size)
# Top Left Of The Quad (Left)
glVertex3f(-size, -size, -size)
# Bottom Left Of The Quad (Left)
glVertex3f(-size, -size, size)
# Bottom Right Of The Quad (Left)
glVertex3f(size, size, -size)
# Top Right Of The Quad (Right)
glVertex3f(size, size, size)
# Top Left Of The Quad (Right)
glVertex3f(size, -size, size)
# Bottom Left Of The Quad (Right)
glVertex3f(size, -size, -size)
# Bottom Right Of The Quad (Right)
glEnd()
glPopMatrix()
def getCubeArray(xpt, ypt, zpt, size):
# scale by size
arr = size * np.array(list(CUBE))
# translate by coordinate
for i in range(0, len(CUBE), 3):
arr[0 + i] += xpt
arr[1 + i] += ypt
arr[2 + i] += zpt
return np.array(arr)
class GLBase(object):
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = b"\x1b"
def __init__(self, title, width, height):
# Number of the glut window.
self.title = title
self.width = width
self.height = height
self.window = 0
self.init = False
self.t0 = time.time()
self.frames = 0
# A general OpenGL initialization function. Sets all of the initial parameters.
def initGL(self): # We call this right after our OpenGL window is created.
glClearColor(
0.0, 0.0, 0.0, 0.0
) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() # Reset The Projection Matrix
gluPerspective(45.0, float(self.width) / float(self.height), 0.1, 100.0)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def reSizeGLScene(self, width, height):
self.width = width
self.height = height
if self.height == 0: # Prevent A Divide By Zero If The Window Is Too Small
self.height = 1
glViewport(
0, 0, self.width, self.height
) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(self.width) / float(self.height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The main drawing function.
# -----------OVERWRITE-----------
def drawGLScene(self):
if not self.init:
self.rtri = 0
self.rquad = 0
self.init = True
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View
# Move Left 1.5 units and into the screen 6.0 units.
glTranslatef(-1.5, 0.0, -6.0)
# We have smooth color mode on, this will blend across the vertices.
# Draw a triangle rotated on the Y axis.
glRotatef(self.rtri, 0.0, 1.0, 0.0) # Rotate
glBegin(GL_POLYGON) # Start drawing a polygon
glColor3f(1.0, 0.0, 0.0) # Red
glVertex3f(0.0, 1.0, 0.0) # Top
glColor3f(0.0, 1.0, 0.0) # Green
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glColor3f(0.0, 0.0, 1.0) # Blue
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# We are "undoing" the rotation so that we may rotate the quad on its own axis.
# We also "undo" the prior translate. This could also have been done using the
# matrix stack.
glLoadIdentity()
# Move Right 1.5 units and into the screen 6.0 units.
glTranslatef(1.5, 0.0, -6.0)
# Draw a square (quadrilateral) rotated on the X axis.
glRotatef(self.rquad, 1.0, 0.0, 0.0) # Rotate
glColor3f(0.3, 0.5, 1.0) # Bluish shade
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glVertex3f(-1.0, 1.0, 0.0) # Top Left
glVertex3f(1.0, 1.0, 0.0) # Top Right
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# What values to use? Well, if you have a FAST machine and a FAST 3D Card, then
# large values make an unpleasant display with flickering and tearing. I found that
# smaller values work better, but this was based on my experience.
self.rtri += 1.0 # Increase The Rotation Variable For The Triangle
self.rquad -= 1.0 # Decrease The Rotation Variable For The Quad
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
self.framerate()
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
# print(args[1])
# If escape is pressed, kill everything.
if args[1] == GLBase.ESCAPE:
glutLeaveMainLoop()
sys.exit(0)
def framerate(self):
t = time.time()
self.frames += 1
if t - self.t0 >= 5.0:
seconds = t - self.t0
fps = self.frames / seconds
print(f"{self.frames} frames in {seconds:3.1f} seconds = {fps:6.3f} FPS")
self.t0 = t
self.frames = 0
def run(self):
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a width x height window
glutInitWindowSize(self.width, self.height)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
self.window = glutCreateWindow(self.title)
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(self.drawGLScene)
# Uncomment this line to get full screen.
# glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(self.drawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(self.reSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(self.keyPressed)
# Initialize our window.
self.initGL()
# Start Event Processing Engine
glutMainLoop()
def main():
glBaseObj = GLBase("test", 640, 480)
glBaseObj.run()
if __name__ == "__main__":
main()
|
# Copyright 2005 Divmod, Inc. See LICENSE file for details
from axiom.test.historic import stubloader
from xmantissa.ixmantissa import IFulltextIndexer
from xmantissa.fulltext import PyLuceneIndexer
class PyLuceneIndexerTestCase(stubloader.StubbedTest):
def testUpgrade(self):
"""
The PyLuceneIndexer should be findable by its interface now. It also
should have been reset since it was most likely slightly corrupt, with
respect to deleted documents.
"""
index = IFulltextIndexer(self.store)
self.failUnless(isinstance(index, PyLuceneIndexer))
self.assertEqual(index.indexDirectory, 'foo.index')
# we called reset(), and there are no indexed items
self.assertEqual(index.indexCount, 0)
self.assertEqual(index.installedOn, self.store)
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from rdkit import Chem
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
homedir = os.path.dirname(os.path.realpath('__file__'))
homedir = homedir+"/data/"
# In[4]:
jobname = "tox_niehs"
smiles_cutoff = 250
# In[5]:
for taskname in ["verytoxic", "nontoxic", "epa", "ghs", "logld50"]:
filename=homedir+jobname+"_int_"+taskname+".csv"
fileout=homedir+jobname+"_int_"+taskname+"_smiles.csv"
data = pd.read_csv(filename)
X_cut = data[data['smiles'].map(len) < smiles_cutoff]
print("Database reduced to SMILES length under 250 from "+str(data.shape)+" to "+str(X_cut.shape))
X = X_cut[["smiles",taskname]]
X.to_csv(fileout, index=False)
# In[6]:
for taskname in ["verytoxic", "nontoxic", "epa", "ghs", "logld50"]:
filename=homedir+jobname+"_tv_"+taskname+".csv"
fileout=homedir+jobname+"_tv_"+taskname+"_smiles.csv"
data = pd.read_csv(filename)
X_cut = data[data['smiles'].map(len) < smiles_cutoff]
print("Database reduced to SMILES length under 250 from "+str(data.shape)+" to "+str(X_cut.shape))
X = X_cut[["smiles",taskname]]
X.to_csv(fileout, index=False)
|
"""Set up for the UrFU LTI Customizations package."""
from setuptools import setup
setup(
name='urfu-lti',
version='0.2.0',
description='UrFU LTI Customizations package.',
packages=[
'urfu_lti',
],
)
|
from filemapper.check.checkcommonextension import CheckCommonExtension
check = CheckCommonExtension()
# Multimedia Check Tests
def test0_filemapper_check_multimedia():
assert check.check_multimedia('multimedia_video_sample.mkv') is True
def test1_filemapper_check_multimedia():
assert check.check_multimedia('multimedia_video_sample.mp4') is True
def test2_filemapper_check_multimedia():
assert check.check_multimedia('multimedia_video_sample.mp3') is False
# Multimedia Subtitle Directory Tests
def test0_filemapper_check_subtitles_directory():
assert check.check_subtitles_directory('multimpedia_directory_sample (subs)') is True
def test1_filemapper_check_subtitles_directory():
assert check.check_subtitles_directory('multimpedia_directory_sample (subtitles)') is True
# TODO
def test2_filemapper_check_subtitles_directory():
assert check.check_subtitles_directory('multimpedia_directory_sample (sub)') is True
# TODO
def test3_filemapper_check_subtitles_directory():
assert check.check_subtitles_directory('multimpedia_directory_sample (subs)') is True
#Multimedia Subtitle File Tests
def test0_filemapper_check_subtitle_file():
assert check.check_subtitles('multimedia_subitle_sample.srt') is True
def test1_filemapper_check_subtitle_file():
assert check.check_subtitles('multimedia_subitle_sample.ass') is True
def test2_filemapper_check_subtitle_file():
assert check.check_subtitles('multimedia_subitle_sample.sub') is True
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsZonalStatistics.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alexander Bruy'
__date__ = '15/07/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
from qgis.PyQt.QtCore import QDir, QFile
from qgis.core import QgsVectorLayer, QgsRasterLayer, QgsFeature, QgsFeatureRequest
from qgis.analysis import QgsZonalStatistics
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
class TestQgsZonalStatistics(unittest.TestCase):
"""Tests for zonal stats class."""
def testStatistics(self):
"""Test zonal stats"""
TEST_DATA_DIR = unitTestDataPath() + "/zonalstatistics/"
myTempPath = QDir.tempPath() + "/"
testDir = QDir(TEST_DATA_DIR)
for f in testDir.entryList(QDir.Files):
QFile.remove(myTempPath + f)
QFile.copy(TEST_DATA_DIR + f, myTempPath + f)
myVector = QgsVectorLayer(myTempPath + "polys.shp", "poly", "ogr")
myRaster = QgsRasterLayer(myTempPath + "edge_problem.asc", "raster", "gdal")
zs = QgsZonalStatistics(myVector, myRaster, "", 1, QgsZonalStatistics.All)
zs.calculateStatistics(None)
feat = QgsFeature()
# validate statistics for each feature
request = QgsFeatureRequest().setFilterFid(0)
feat = next(myVector.getFeatures(request))
myMessage = ('Expected: %f\nGot: %f\n' % (12.0, feat[1]))
assert feat[1] == 12.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (8.0, feat[2]))
assert feat[2] == 8.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.666666666666667, feat[3]))
assert abs(feat[3] - 0.666666666666667) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[4]))
assert feat[4] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.47140452079103201, feat[5]))
assert abs(feat[5] - 0.47140452079103201) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[6]))
assert feat[6] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[7]))
assert feat[7] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[8]))
assert feat[8] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[9]))
assert feat[9] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[10]))
assert feat[10] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (2.0, feat[11]))
assert feat[11] == 2.0, myMessage
request.setFilterFid(1)
feat = next(myVector.getFeatures(request))
myMessage = ('Expected: %f\nGot: %f\n' % (9.0, feat[1]))
assert feat[1] == 9.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (5.0, feat[2]))
assert feat[2] == 5.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.555555555555556, feat[3]))
assert abs(feat[3] - 0.555555555555556) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[4]))
assert feat[4] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.49690399499995302, feat[5]))
assert abs(feat[5] - 0.49690399499995302) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[6]))
assert feat[6] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[7]))
assert feat[7] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[8]))
assert feat[8] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[9]))
assert feat[9] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[10]))
assert feat[10] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (2.0, feat[11]))
assert feat[11] == 2.0, myMessage
request.setFilterFid(2)
feat = next(myVector.getFeatures(request))
myMessage = ('Expected: %f\nGot: %f\n' % (6.0, feat[1]))
assert feat[1] == 6.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (5.0, feat[2]))
assert feat[2] == 5.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.833333333333333, feat[3]))
assert abs(feat[3] - 0.833333333333333) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[4]))
assert feat[4] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.372677996249965, feat[5]))
assert abs(feat[5] - 0.372677996249965) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[6]))
assert feat[6] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[7]))
assert feat[7] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[8]))
assert feat[8] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[9]))
assert feat[9] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[10]))
assert feat[10] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (2.0, feat[11]))
assert feat[11] == 2.0, myMessage
if __name__ == '__main__':
unittest.main()
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
from typing import Any, Dict, Type
from .base import ModelTransformer
__all__ = ["get_model_transformer"]
_MODEL_TRANSFORMERS: Dict[type, Type[ModelTransformer]] = {}
def get_model_transformer(model: Any, **kwargs: Any) -> ModelTransformer:
"""Creates a ModelTransformer for a given model or raises an exception if one is not available"""
for model_type, transformer in _MODEL_TRANSFORMERS.items():
if isinstance(model, model_type):
# Filter out kwargs that aren't applicable to the specific 'ModelTransformer'
accepted_kwargs = {
param for param in inspect.signature(transformer.__init__).parameters
}
kwargs = {k: v for k, v in kwargs.items() if k in accepted_kwargs}
return transformer(model, **kwargs)
raise NotImplementedError(
f"Importing ML models of type {type(model)}, not currently implemented"
)
try:
from .sklearn import _MODEL_TRANSFORMERS as _SKLEARN_MODEL_TRANSFORMERS
from .sklearn import (
SKLearnDecisionTreeTransformer,
SKLearnForestClassifierTransformer,
SKLearnForestRegressorTransformer,
SKLearnForestTransformer,
SKLearnTransformer,
)
__all__ += [
"SKLearnDecisionTreeTransformer",
"SKLearnForestClassifierTransformer",
"SKLearnForestRegressorTransformer",
"SKLearnForestTransformer",
"SKLearnTransformer",
]
_MODEL_TRANSFORMERS.update(_SKLEARN_MODEL_TRANSFORMERS)
except ImportError:
pass
try:
from .xgboost import _MODEL_TRANSFORMERS as _XGBOOST_MODEL_TRANSFORMERS
from .xgboost import (
XGBClassifier,
XGBoostClassifierTransformer,
XGBoostForestTransformer,
XGBoostRegressorTransformer,
XGBRegressor,
)
__all__ += [
"XGBoostClassifierTransformer",
"XGBClassifier",
"XGBoostForestTransformer",
"XGBoostRegressorTransformer",
"XGBRegressor",
]
_MODEL_TRANSFORMERS.update(_XGBOOST_MODEL_TRANSFORMERS)
except ImportError:
pass
try:
from .lightgbm import _MODEL_TRANSFORMERS as _LIGHTGBM_MODEL_TRANSFORMERS
from .lightgbm import (
LGBMClassifier,
LGBMClassifierTransformer,
LGBMForestTransformer,
LGBMRegressor,
LGBMRegressorTransformer,
)
__all__ += [
"LGBMRegressor",
"LGBMClassifier",
"LGBMForestTransformer",
"LGBMRegressorTransformer",
"LGBMClassifierTransformer",
]
_MODEL_TRANSFORMERS.update(_LIGHTGBM_MODEL_TRANSFORMERS)
except ImportError:
pass
|
import smart_imports
smart_imports.all()
class Mixin:
@contextlib.contextmanager
def check_discord_synced(self, accounts_ids):
with mock.patch('the_tale.portal.logic.sync_with_discord') as sync_with_discord:
yield
if not isinstance(accounts_ids, set):
accounts_ids = {accounts_ids}
self.assertEqual(sync_with_discord.call_count, len(accounts_ids))
self.assertEqual(accounts_ids, {call[0][0].id for call in sync_with_discord.call_args_list})
|
#!/usr/bin/env python
import sys
import os
from PySide import QtCore, QtGui
from PySide.QtGui import QFileDialog
from juma.core import app, signals
from juma.qt.TopEditorModule import TopEditorModule, QtMainWindow, SubEditorModule
##----------------------------------------------------------------##
class AssetEditor( TopEditorModule ):
_name = 'asset_editor'
_dependency = [ 'qt', 'moai' ]
def __init__(self):
super(AssetEditor, self).__init__()
self.runtime = None
def getWindowTitle( self ):
return 'Asset Editor'
def getRuntime(self):
if not self.runtime:
self.runtime = self.affirmModule('moai')
return self.runtime
def onLoad( self ):
self.mainWindow.setMenuWidget( self.getQtSupport().getSharedMenubar() )
self.findMenu( 'main/asset' ).addChild([
dict( name = 'refresh_assets', label = 'Refresh Assets', shortcut = 'ctrl+G' ),
], self )
self.findMenu('main/window').addChild([
'Mesh Exporter',
'Mesh Preview',
'----',
],
self )
return True
##----------------------------------------------------------------##
def openFile( self, fileformat, title, folder = None ):
if folder is None:
if self.getProject().getPath():
folder = self.getProject().getPath()
else:
folder = '~'
return QFileDialog.getOpenFileName(self.getMainWindow(), title, folder, fileformat)
##----------------------------------------------------------------##
def onMenu(self, node):
name = node.name
if name == 'mesh_exporter':
self.getModule('mesh_exporter').show()
elif name == 'mesh_preview':
self.getModule('mesh_preview').show()
elif name == 'refresh_assets':
self.getProject().assetLibrary.clearAssets()
runtime = self.getRuntime()
runtime.refreshAssets()
##----------------------------------------------------------------##
class AssetEditorModule( SubEditorModule ):
def getParentModuleId( self ):
return 'asset_editor'
def getSceneEditor( self ):
return self.getParentModule()
##----------------------------------------------------------------##
AssetEditor().register()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 12 13:34:33 2022
@author: oronald
"""
#Import required libraries
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import colors
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import xarray as xr
#Input data
model = np.load('C:/python_work/phd/paper2/new_data/no2/no2_model_train.npy')
model_val = np.load('C:/python_work/phd/paper2/new_data/no2/no2_model_validate.npy')
model_test = np.load('C:/python_work/phd/paper2/new_data/no2/no2_model_test.npy')
obs = np.load('C:/python_work/phd/paper2/new_data/no2/no2_obs_train.npy')
obs_val = np.load('C:/python_work/phd/paper2/new_data/no2/no2_obs_validate.npy')
obs_test = np.load('C:/python_work/phd/paper2/new_data/no2/no2_obs_test.npy')
#Replace NaN values in data with the mean value
col_mean_model = np.nanmean(model)
model[np.isnan(model)] = col_mean_model
col_mean_obs = np.nanmean(obs)
obs[np.isnan(obs)] = col_mean_obs
col_mean_model_val = np.nanmean(model_val)
model_val[np.isnan(model_val)] = col_mean_model_val
col_mean_obs_val = np.nanmean(obs_val)
obs_val[np.isnan(obs_val)] = col_mean_obs_val
col_mean_model_test = np.nanmean(model_test)
model_test[np.isnan(model_test)] = col_mean_model_test
col_mean_obs_test = np.nanmean(obs_test)
obs_test[np.isnan(obs_test)] = col_mean_obs_test
#Restrict the lower limit of the observations to zero (0). Working with negative values
#in the neural network is a challenge
obs = obs.clip(0)
obs_val = obs_val.clip(0)
obs_test = obs_test.clip(0)
"Scaling Data"
#Setting rescale conditions
scale_model = 1./ np.max(model)
scale_obs = 1./ np.max(obs)
norm_model = tf.keras.layers.Rescaling(scale_model, offset=0.0)
norm_obs = tf.keras.layers.Rescaling(scale_obs, offset=0.0)
scale_model_val = 1./ np.max(model_val)
scale_obs_val = 1./ np.max(obs_val)
norm_model_val = tf.keras.layers.Rescaling(scale_model_val, offset=0.0)
norm_obs_val = tf.keras.layers.Rescaling(scale_obs_val, offset=0.0)
scale_model_test = 1./ np.max(model_test)
norm_model_test = tf.keras.layers.Rescaling(scale_model_test, offset=0.0)
#There is no need to scale the test observations
#because they will not be processed in the neural network
#Rescaling the data
model = norm_model(model)
obs = norm_obs(obs)
model_val = norm_model_val(model_val)
obs_val = norm_obs_val(obs_val)
model_test = norm_model_test(model_test)
#Reshape the arrays to fit into the neural network algorithm.
model = tf.expand_dims(model, axis=-1)
obs = tf.expand_dims(obs, axis=-1)
model_val = tf.expand_dims(model_val, axis=-1)
obs_val = tf.expand_dims(obs_val, axis=-1)
model_test = tf.expand_dims(model_test, axis=-1)
" Building CNN Autoencoder "
input = layers.Input(shape=(96, 80, 1))
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(input)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
x = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(x)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
x = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(x)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
x = layers.Conv2D(256, (3, 3), activation="relu", padding="same")(x)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
# Decoder
x = layers.Conv2DTranspose(256, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(128, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(64, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x)
# Autoencoder
autoencoder = Model(input, x)
autoencoder.compile(optimizer="adam", loss="binary_crossentropy", metrics="mse")
autoencoder.summary()
history = autoencoder.fit(x=model, y=obs, batch_size=1, epochs=250,
validation_data=(model_val, obs_val))
#Saving autoencoder
autoencoder.save('C:/python_work/phd/paper2/new_data/no2/ai_model/no2_ai_dca')
mse_history = history.history['mse']
val_mse_history = history.history['val_mse']
np.save('C:/python_work/phd/paper2/new_data/no2/ai_model/mse_history', mse_history)
np.save('C:/python_work/phd/paper2/new_data/no2/ai_model/val_mse_history', val_mse_history)
#Plotting Metrics
fig = plt.subplots(figsize=(8, 4), dpi = 500)
mpl.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 16
plt.rcParams['axes.linewidth'] = 1
plt.plot(history.history['mse'], label='Training', color='black', linewidth=2)
plt.plot(history.history['val_mse'], label = 'Validation', color='orange', linewidth=2)
plt.xlabel('Epochs')
plt.ylabel('Mean Squared Error')
plt.legend()
#Load saved autoencoder if need arises
autoencoder_from_saved = tf.keras.models.load_model('C:/python_work/phd/paper2/new_data/no2/ai_model/no2_ai_dca')
" Making Predictions "
prediction = autoencoder_from_saved.predict(model_test)
sn1_p = (prediction[0,:,:,0] + prediction[1,:,:,0] + prediction[2,:,:,0])/3
sn2_p = (prediction[3,:,:,0] + prediction[4,:,:,0] + prediction[5,:,:,0])/3
sn3_p = (prediction[6,:,:,0] + prediction[7,:,:,0] + prediction[8,:,:,0])/3
sn4_p = (prediction[9,:,:,0] + prediction[10,:,:,0] + prediction[11,:,:,0])/3
sn1_m = (model_test[0,:,:,0] + model_test[1,:,:,0] + model_test[2,:,:,0])/3
sn2_m = (model_test[3,:,:,0] + model_test[4,:,:,0] + model_test[5,:,:,0])/3
sn3_m = (model_test[6,:,:,0] + model_test[7,:,:,0] + model_test[8,:,:,0])/3
sn4_m = (model_test[9,:,:,0] + model_test[10,:,:,0] + model_test[11,:,:,0])/3
sn1_o = (obs_test[0,:,:] + obs_test[1,:,:] + obs_test[2,:,:])/3
sn2_o = (obs_test[3,:,:] + obs_test[4,:,:] + obs_test[5,:,:])/3
sn3_o = (obs_test[6,:,:] + obs_test[7,:,:] + obs_test[8,:,:])/3
sn4_o = (obs_test[9,:,:] + obs_test[10,:,:] + obs_test[11,:,:])/3
# Do inverse scaling to restore correct dimesnsions
sn1_p = sn1_p/scale_model_val
sn2_p = sn2_p/scale_model_val
sn3_p = sn3_p/scale_model_val
sn4_p = sn4_p/scale_model_val
sn1_m = sn1_m/scale_model_test
sn2_m = sn2_m/scale_model_test
sn3_m = sn3_m/scale_model_test
sn4_m = sn4_m/scale_model_test
# Divide by 1e15
sn1_p_e = sn1_p/1e15
sn2_p_e = sn2_p/1e15
sn3_p_e = sn3_p/1e15
sn4_p_e = sn4_p/1e15
sn1_m_e = sn1_m/1e15
sn2_m_e = sn2_m/1e15
sn3_m_e = sn3_m/1e15
sn4_m_e = sn4_m/1e15
sn1_o_e = sn1_o/1e15
sn2_o_e = sn2_o/1e15
sn3_o_e = sn3_o/1e15
sn4_o_e = sn4_o/1e15
# Calculate differences
diff_sn1 = sn1_m_e - sn1_o_e
diff_sn2 = sn2_m_e - sn2_o_e
diff_sn3 = sn3_m_e - sn3_o_e
diff_sn4 = sn4_m_e - sn4_o_e
diff_sn1_dca = sn1_p_e - sn1_o_e
diff_sn2_dca = sn2_p_e - sn2_o_e
diff_sn3_dca = sn3_p_e - sn3_o_e
diff_sn4_dca = sn4_p_e - sn4_o_e
" Applying Linear Scaling "
# Get mean values
mean_o = np.mean((obs/scale_obs)[:,:,:,0], axis=0)
mean_m = np.mean((model/scale_model)[:,:,:,0], axis=0)
sn1_ls = (sn1_m + (mean_o - mean_m))/1e15
sn2_ls = (sn2_m + (mean_o - mean_m))/1e15
sn3_ls = (sn3_m + (mean_o - mean_m))/1e15
sn4_ls = (sn4_m + (mean_o - mean_m))/1e15
#Differences between LS and observations
diff_sn1_ls = sn1_ls - sn1_o_e
diff_sn2_ls = sn2_ls - sn2_o_e
diff_sn3_ls = sn3_ls - sn3_o_e
diff_sn4_ls = sn4_ls - sn4_o_e
" Making Plots"
coord = xr.Dataset({'lat': (['lat'], np.arange(-13.000000,7.000000, 0.21)),
'lon': (['lon'], np.arange(27.500000,43.500000, 0.20))})
fig=plt.figure(figsize=(16, 28), dpi=500)
mpl.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 22
plt.rcParams['axes.linewidth'] = 1
plt.gcf().subplots_adjust(hspace=0.08, wspace=0)
ax = plt.subplot(7,4,1, projection=ccrs.PlateCarree())
plt_sn1_o = plt.pcolormesh(coord['lon'], coord['lat'], sn1_o_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.title('DJF')
plt.text(0.13, 0.82, 'OMI', rotation='vertical',
transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,2, projection=ccrs.PlateCarree())
plt_sn2_o = plt.pcolormesh(coord['lon'], coord['lat'], sn2_o_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.title('MAM')
ax = plt.subplot(7,4,3, projection=ccrs.PlateCarree())
plt_sn3_o = plt.pcolormesh(coord['lon'], coord['lat'], sn3_o_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.title('JJA')
ax = plt.subplot(7,4,4, projection=ccrs.PlateCarree())
plt_sn4_o = plt.pcolormesh(coord['lon'], coord['lat'], sn4_o_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.title('SON')
ax = plt.subplot(7,4,5, projection=ccrs.PlateCarree())
plt_sn1_m = plt.pcolormesh(coord['lon'], coord['lat'], sn1_m_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.text(0.13, 0.7, 'WRF_Chem', rotation='vertical', transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,6, projection=ccrs.PlateCarree())
plt_sn2_m = plt.pcolormesh(coord['lon'], coord['lat'], sn2_m_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,7, projection=ccrs.PlateCarree())
plt_sn3_m = plt.pcolormesh(coord['lon'], coord['lat'], sn3_m_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,8, projection=ccrs.PlateCarree())
plt_sn4_m = plt.pcolormesh(coord['lon'], coord['lat'], sn4_m_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,9, projection=ccrs.PlateCarree())
plt_sn1_lr = plt.pcolormesh(coord['lon'], coord['lat'], sn1_ls, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.text(0.13, 0.588, 'WRF_LS', rotation='vertical', transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,10, projection=ccrs.PlateCarree())
plt_sn2_lr = plt.pcolormesh(coord['lon'], coord['lat'], sn2_ls, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,11, projection=ccrs.PlateCarree())
plt_sn3_lr = plt.pcolormesh(coord['lon'], coord['lat'], sn3_ls, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,12, projection=ccrs.PlateCarree())
plt_sn4_lr = plt.pcolormesh(coord['lon'], coord['lat'], sn4_ls, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,13, projection=ccrs.PlateCarree())
plt_sn1_p = plt.pcolormesh(coord['lon'], coord['lat'], sn1_p_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.text(0.13, 0.48, 'WRF_DCA', rotation='vertical', transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,14, projection=ccrs.PlateCarree())
plt_sn2_p = plt.pcolormesh(coord['lon'], coord['lat'], sn2_p_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,15, projection=ccrs.PlateCarree())
plt_sn3_p = plt.pcolormesh(coord['lon'], coord['lat'], sn3_p_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,16, projection=ccrs.PlateCarree())
plt_sn4_p = plt.pcolormesh(coord['lon'], coord['lat'], sn4_p_e, cmap='jet', vmin=-0.4, vmax=3.7)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
cb3_axes = plt.gcf().add_axes([0.89, 0.472, 0.013, 0.38])
cb3 = plt.colorbar(plt_sn4_p, cb3_axes,
label='$\mathregular{NO_2}$ VCD ($\mathregular{x10^{15}}$ molecules / $\mathregular{cm^2}$)',
orientation='vertical')
divnorm = colors.TwoSlopeNorm(vmin=-3.4, vcenter=0, vmax=3.4)
ax = plt.subplot(7,4,17, projection=ccrs.PlateCarree())
plt_sn1_df = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn1, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.text(0.13, 0.35, 'WRF_Chem - OMI', rotation='vertical', transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,18, projection=ccrs.PlateCarree())
plt_sn2_df = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn2, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,19, projection=ccrs.PlateCarree())
plt_sn3_df = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn3, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,20, projection=ccrs.PlateCarree())
plt_sn4_df = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn4, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,21, projection=ccrs.PlateCarree())
plt_sn1_dflr = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn1_ls, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.text(0.13, 0.248, 'WRF_LS - OMI', rotation='vertical', transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,22, projection=ccrs.PlateCarree())
plt_sn2_dflr = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn2_ls, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,23, projection=ccrs.PlateCarree())
plt_sn3_dfls = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn3_ls, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,24, projection=ccrs.PlateCarree())
plt_sn4_dfls = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn4_ls, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,25, projection=ccrs.PlateCarree())
plt_sn1_dfca = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn1_dca, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
plt.text(0.13, 0.134, 'WRF_DCA - OMI', rotation='vertical', transform=plt.gcf().transFigure)
ax = plt.subplot(7,4,26, projection=ccrs.PlateCarree())
plt_sn2_dfca = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn2_dca, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,27, projection=ccrs.PlateCarree())
plt_sn3_dfca = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn3_dca, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
ax = plt.subplot(7,4,28, projection=ccrs.PlateCarree())
plt_sn4_dfca = plt.pcolormesh(coord['lon'], coord['lat'], diff_sn4_dca, cmap='bwr', norm=divnorm)
ax.coastlines(resolution='10m', color='black', linewidth=0.9)
lakes_10m = cfeature.NaturalEarthFeature('physical','lakes','10m')
ax.add_feature(cfeature.BORDERS, linewidth=1.2)
ax.add_feature(lakes_10m, facecolor='none', edgecolor='k')
plt.ylim(-12.8, 6.8)
plt.xlim(28, 43)
cb4_axes = plt.gcf().add_axes([0.89, 0.155, 0.013, 0.255])
cb4 = plt.colorbar(plt_sn4_dfca, cb4_axes,
label='$\mathregular{NO_2}$ VCD ($\mathregular{x10^{15}}$ molecules / $\mathregular{cm^2}$)',
orientation='vertical')
" Making calculations for RMSE and NMB by month and plotting them "
# Choose month to use
#Observation means
dec_obs = np.mean(obs_test[0,:,:]/1e15)
jan_obs = np.mean(obs_test[1,:,:]/1e15)
feb_obs = np.mean(obs_test[2,:,:]/1e15)
mar_obs = np.mean(obs_test[3,:,:]/1e15)
apr_obs = np.mean(obs_test[4,:,:]/1e15)
may_obs = np.mean(obs_test[5,:,:]/1e15)
june_obs = np.mean(obs_test[6,:,:]/1e15)
july_obs = np.mean(obs_test[7,:,:]/1e15)
aug_obs = np.mean(obs_test[8,:,:]/1e15)
sep_obs = np.mean(obs_test[9,:,:]/1e15)
oct_obs = np.mean(obs_test[10,:,:]/1e15)
nov_obs = np.mean(obs_test[11,:,:]/1e15)
#Observation standard deviations
dec_obs_std = np.std(obs_test[0,:,:]/1e15)
jan_obs_std = np.std(obs_test[1,:,:]/1e15)
feb_obs_std = np.std(obs_test[2,:,:]/1e15)
mar_obs_std = np.std(obs_test[3,:,:]/1e15)
apr_obs_std = np.std(obs_test[4,:,:]/1e15)
may_obs_std = np.std(obs_test[5,:,:]/1e15)
june_obs_std = np.std(obs_test[6,:,:]/1e15)
july_obs_std = np.std(obs_test[7,:,:]/1e15)
aug_obs_std = np.std(obs_test[8,:,:]/1e15)
sep_obs_std = np.std(obs_test[9,:,:]/1e15)
oct_obs_std = np.std(obs_test[10,:,:]/1e15)
nov_obs_std = np.std(obs_test[11,:,:]/1e15)
#WRF-chem model means
dec_model = np.mean(model_test[0,:,:,0]/scale_model_test/1e15)
jan_model = np.mean(model_test[1,:,:,0]/scale_model_test/1e15)
feb_model = np.mean(model_test[2,:,:,0]/scale_model_test/1e15)
mar_model = np.mean(model_test[3,:,:,0]/scale_model_test/1e15)
apr_model = np.mean(model_test[4,:,:,0]/scale_model_test/1e15)
may_model = np.mean(model_test[5,:,:,0]/scale_model_test/1e15)
june_model = np.mean(model_test[6,:,:,0]/scale_model_test/1e15)
july_model = np.mean(model_test[7,:,:,0]/scale_model_test/1e15)
aug_model = np.mean(model_test[8,:,:,0]/scale_model_test/1e15)
sep_model = np.mean(model_test[9,:,:,0]/scale_model_test/1e15)
oct_model = np.mean(model_test[10,:,:,0]/scale_model_test/1e15)
nov_model = np.mean(model_test[11,:,:,0]/scale_model_test/1e15)
#WRF-chem model standard deviations
dec_model_std = np.std(model_test[0,:,:,0]/scale_model_test/1e15)
jan_model_std = np.std(model_test[1,:,:,0]/scale_model_test/1e15)
feb_model_std = np.std(model_test[2,:,:,0]/scale_model_test/1e15)
mar_model_std = np.std(model_test[3,:,:,0]/scale_model_test/1e15)
apr_model_std = np.std(model_test[4,:,:,0]/scale_model_test/1e15)
may_model_std = np.std(model_test[5,:,:,0]/scale_model_test/1e15)
june_model_std = np.std(model_test[6,:,:,0]/scale_model_test/1e15)
july_model_std = np.std(model_test[7,:,:,0]/scale_model_test/1e15)
aug_model_std = np.std(model_test[8,:,:,0]/scale_model_test/1e15)
sep_model_std = np.std(model_test[9,:,:,0]/scale_model_test/1e15)
oct_model_std = np.std(model_test[10,:,:,0]/scale_model_test/1e15)
nov_model_std = np.std(model_test[11,:,:,0]/scale_model_test/1e15)
#WRF-DCA prediction means
dec_dca = np.mean(prediction[0,:,:,0]/scale_model_val/1e15)
jan_dca = np.mean(prediction[1,:,:,0]/scale_model_val/1e15)
feb_dca = np.mean(prediction[2,:,:,0]/scale_model_val/1e15)
mar_dca = np.mean(prediction[3,:,:,0]/scale_model_val/1e15)
apr_dca = np.mean(prediction[4,:,:,0]/scale_model_val/1e15)
may_dca = np.mean(prediction[5,:,:,0]/scale_model_val/1e15)
june_dca = np.mean(prediction[6,:,:,0]/scale_model_val/1e15)
july_dca = np.mean(prediction[7,:,:,0]/scale_model_val/1e15)
aug_dca = np.mean(prediction[8,:,:,0]/scale_model_val/1e15)
sep_dca = np.mean(prediction[9,:,:,0]/scale_model_val/1e15)
oct_dca = np.mean(prediction[10,:,:,0]/scale_model_val/1e15)
nov_dca = np.mean(prediction[11,:,:,0]/scale_model_val/1e15)
#WRF-DCA prediction standard deviations
dec_dca_std = np.std(prediction[0,:,:,0]/scale_model_val/1e15)
jan_dca_std = np.std(prediction[1,:,:,0]/scale_model_val/1e15)
feb_dca_std = np.std(prediction[2,:,:,0]/scale_model_val/1e15)
mar_dca_std = np.std(prediction[3,:,:,0]/scale_model_val/1e15)
apr_dca_std = np.std(prediction[4,:,:,0]/scale_model_val/1e15)
may_dca_std = np.std(prediction[5,:,:,0]/scale_model_val/1e15)
june_dca_std = np.std(prediction[6,:,:,0]/scale_model_val/1e15)
july_dca_std = np.std(prediction[7,:,:,0]/scale_model_val/1e15)
aug_dca_std = np.std(prediction[8,:,:,0]/scale_model_val/1e15)
sep_dca_std = np.std(prediction[9,:,:,0]/scale_model_val/1e15)
oct_dca_std = np.std(prediction[10,:,:,0]/scale_model_val/1e15)
nov_dca_std = np.std(prediction[11,:,:,0]/scale_model_val/1e15)
#WRF-LS model means
dec_ls= np.mean(((model_test[0,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
jan_ls= np.mean(((model_test[1,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
feb_ls= np.mean(((model_test[2,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
mar_ls= np.mean(((model_test[3,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
apr_ls= np.mean(((model_test[4,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
may_ls= np.mean(((model_test[5,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
june_ls= np.mean(((model_test[6,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
july_ls= np.mean(((model_test[7,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
aug_ls= np.mean(((model_test[8,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
sep_ls= np.mean(((model_test[9,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
oct_ls= np.mean(((model_test[10,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
nov_ls= np.mean(((model_test[11,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
#WRF-LS standard deviations
dec_ls_std = np.std(((model_test[0,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
jan_ls_std = np.std(((model_test[1,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
feb_ls_std = np.std(((model_test[2,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
mar_ls_std = np.std(((model_test[3,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
apr_ls_std = np.std(((model_test[4,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
may_ls_std = np.std(((model_test[5,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
june_ls_std = np.std(((model_test[6,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
july_ls_std = np.std(((model_test[7,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
aug_ls_std = np.std(((model_test[8,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
sep_ls_std = np.std(((model_test[9,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
oct_ls_std = np.std(((model_test[10,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
nov_ls_std = np.std(((model_test[11,:,:,0]/scale_model_test)+(mean_o - mean_m))/1e15)
#Calculate Normalized Mean Bias (NMB) for WRF-chem
NMB_dec = np.mean((model_test[0,:,:,0]/scale_model_test/1e15) - (obs_test[0,:,:]/1e15))/np.mean(obs_test[0,:,:]/1e15)
NMB_jan = np.mean((model_test[1,:,:,0]/scale_model_test/1e15) - (obs_test[1,:,:]/1e15))/np.mean(obs_test[1,:,:]/1e15)
NMB_feb = np.mean((model_test[2,:,:,0]/scale_model_test/1e15) - (obs_test[2,:,:]/1e15))/np.mean(obs_test[2,:,:]/1e15)
NMB_mar = np.mean((model_test[3,:,:,0]/scale_model_test/1e15) - (obs_test[3,:,:]/1e15))/np.mean(obs_test[3,:,:]/1e15)
NMB_apr = np.mean((model_test[4,:,:,0]/scale_model_test/1e15) - (obs_test[4,:,:]/1e15))/np.mean(obs_test[4,:,:]/1e15)
NMB_may = np.mean((model_test[5,:,:,0]/scale_model_test/1e15) - (obs_test[5,:,:]/1e15))/np.mean(obs_test[5,:,:]/1e15)
NMB_june = np.mean((model_test[6,:,:,0]/scale_model_test/1e15) - (obs_test[6,:,:]/1e15))/np.mean(obs_test[6,:,:]/1e15)
NMB_july = np.mean((model_test[7,:,:,0]/scale_model_test/1e15) - (obs_test[7,:,:]/1e15))/np.mean(obs_test[7,:,:]/1e15)
NMB_aug = np.mean((model_test[8,:,:,0]/scale_model_test/1e15) - (obs_test[8,:,:]/1e15))/np.mean(obs_test[8,:,:]/1e15)
NMB_sep = np.mean((model_test[9,:,:,0]/scale_model_test/1e15) - (obs_test[9,:,:]/1e15))/np.mean(obs_test[9,:,:]/1e15)
NMB_oct = np.mean((model_test[10,:,:,0]/scale_model_test/1e15) - (obs_test[10,:,:]/1e15))/np.mean(obs_test[10,:,:]/1e15)
NMB_nov = np.mean((model_test[11,:,:,0]/scale_model_test/1e15) - (obs_test[11,:,:]/1e15))/np.mean(obs_test[11,:,:]/1e15)
#Calculate Normalized Mean Bias (NMB) for WRF-DCA
NMB_dca_dec = np.mean((prediction[0,:,:,0]/scale_model_val/1e15) - (obs_test[0,:,:]/1e15))/np.mean(obs_test[0,:,:]/1e15)
NMB_dca_jan = np.mean((prediction[1,:,:,0]/scale_model_val/1e15) - (obs_test[1,:,:]/1e15))/np.mean(obs_test[1,:,:]/1e15)
NMB_dca_feb = np.mean((prediction[2,:,:,0]/scale_model_val/1e15) - (obs_test[2,:,:]/1e15))/np.mean(obs_test[2,:,:]/1e15)
NMB_dca_mar = np.mean((prediction[3,:,:,0]/scale_model_val/1e15) - (obs_test[3,:,:]/1e15))/np.mean(obs_test[3,:,:]/1e15)
NMB_dca_apr = np.mean((prediction[4,:,:,0]/scale_model_val/1e15) - (obs_test[4,:,:]/1e15))/np.mean(obs_test[4,:,:]/1e15)
NMB_dca_may = np.mean((prediction[5,:,:,0]/scale_model_val/1e15) - (obs_test[5,:,:]/1e15))/np.mean(obs_test[5,:,:]/1e15)
NMB_dca_june = np.mean((prediction[6,:,:,0]/scale_model_val/1e15) - (obs_test[6,:,:]/1e15))/np.mean(obs_test[6,:,:]/1e15)
NMB_dca_july = np.mean((prediction[7,:,:,0]/scale_model_val/1e15) - (obs_test[7,:,:]/1e15))/np.mean(obs_test[7,:,:]/1e15)
NMB_dca_aug = np.mean((prediction[8,:,:,0]/scale_model_val/1e15) - (obs_test[8,:,:]/1e15))/np.mean(obs_test[8,:,:]/1e15)
NMB_dca_sep = np.mean((prediction[9,:,:,0]/scale_model_val/1e15) - (obs_test[9,:,:]/1e15))/np.mean(obs_test[9,:,:]/1e15)
NMB_dca_oct = np.mean((prediction[10,:,:,0]/scale_model_val/1e15) - (obs_test[10,:,:]/1e15))/np.mean(obs_test[10,:,:]/1e15)
NMB_dca_nov = np.mean((prediction[11,:,:,0]/scale_model_val/1e15) - (obs_test[11,:,:]/1e15))/np.mean(obs_test[11,:,:]/1e15)
#Calculate Normalized Mean Bias (NMB) for WRF-LS. One may choose not to divide by 1e15, because it will be cancelled out
# in the calculation
NMB_ls_dec = np.mean(((model_test[0,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[0,:,:])/np.mean(obs_test[0,:,:])
NMB_ls_jan = np.mean(((model_test[1,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[1,:,:])/np.mean(obs_test[1,:,:])
NMB_ls_feb = np.mean(((model_test[2,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[2,:,:])/np.mean(obs_test[2,:,:])
NMB_ls_mar = np.mean(((model_test[3,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[3,:,:])/np.mean(obs_test[3,:,:])
NMB_ls_apr = np.mean(((model_test[4,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[4,:,:])/np.mean(obs_test[4,:,:])
NMB_ls_may = np.mean(((model_test[5,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[5,:,:])/np.mean(obs_test[5,:,:])
NMB_ls_june = np.mean(((model_test[6,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[6,:,:])/np.mean(obs_test[6,:,:])
NMB_ls_july = np.mean(((model_test[7,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[7,:,:])/np.mean(obs_test[7,:,:])
NMB_ls_aug = np.mean(((model_test[8,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[8,:,:])/np.mean(obs_test[8,:,:])
NMB_ls_sep = np.mean(((model_test[9,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[9,:,:])/np.mean(obs_test[9,:,:])
NMB_ls_oct = np.mean(((model_test[10,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[10,:,:])/np.mean(obs_test[10,:,:])
NMB_ls_nov = np.mean(((model_test[11,:,:,0]/scale_model_test)+(mean_o - mean_m)) - obs_test[11,:,:])/np.mean(obs_test[11,:,:])
#Calculate Root Mean Square Error (RMSE) for WRF-Chem
RMSE_dec = np.sqrt(np.mean(((model_test[0,:,:,0]/scale_model_test/1e15) - (obs_test[0,:,:]/1e15))**2))
RMSE_jan = np.sqrt(np.mean(((model_test[1,:,:,0]/scale_model_test/1e15) - (obs_test[1,:,:]/1e15))**2))
RMSE_feb = np.sqrt(np.mean(((model_test[2,:,:,0]/scale_model_test/1e15) - (obs_test[2,:,:]/1e15))**2))
RMSE_mar = np.sqrt(np.mean(((model_test[3,:,:,0]/scale_model_test/1e15) - (obs_test[3,:,:]/1e15))**2))
RMSE_apr = np.sqrt(np.mean(((model_test[4,:,:,0]/scale_model_test/1e15) - (obs_test[4,:,:]/1e15))**2))
RMSE_may = np.sqrt(np.mean(((model_test[5,:,:,0]/scale_model_test/1e15) - (obs_test[5,:,:]/1e15))**2))
RMSE_june = np.sqrt(np.mean(((model_test[6,:,:,0]/scale_model_test/1e15) - (obs_test[6,:,:]/1e15))**2))
RMSE_july = np.sqrt(np.mean(((model_test[7,:,:,0]/scale_model_test/1e15) - (obs_test[7,:,:]/1e15))**2))
RMSE_aug = np.sqrt(np.mean(((model_test[8,:,:,0]/scale_model_test/1e15) - (obs_test[8:,:]/1e15))**2))
RMSE_sep = np.sqrt(np.mean(((model_test[9,:,:,0]/scale_model_test/1e15) - (obs_test[9,:,:]/1e15))**2))
RMSE_oct = np.sqrt(np.mean(((model_test[10,:,:,0]/scale_model_test/1e15) - (obs_test[10,:,:]/1e15))**2))
RMSE_nov = np.sqrt(np.mean(((model_test[11,:,:,0]/scale_model_test/1e15) - (obs_test[11,:,:]/1e15))**2))
#Calculate Root Mean Square Error (RMSE) for WRF-DCA
RMSE_dca_dec = np.sqrt(np.mean(((prediction[0,:,:,0]/scale_model_val/1e15) - (obs_test[0,:,:]/1e15))**2))
RMSE_dca_jan = np.sqrt(np.mean(((prediction[1,:,:,0]/scale_model_val/1e15) - (obs_test[1,:,:]/1e15))**2))
RMSE_dca_feb = np.sqrt(np.mean(((prediction[2,:,:,0]/scale_model_val/1e15) - (obs_test[2,:,:]/1e15))**2))
RMSE_dca_mar = np.sqrt(np.mean(((prediction[3,:,:,0]/scale_model_val/1e15) - (obs_test[3,:,:]/1e15))**2))
RMSE_dca_apr = np.sqrt(np.mean(((prediction[4,:,:,0]/scale_model_val/1e15) - (obs_test[4,:,:]/1e15))**2))
RMSE_dca_may = np.sqrt(np.mean(((prediction[5,:,:,0]/scale_model_val/1e15) - (obs_test[5,:,:]/1e15))**2))
RMSE_dca_june = np.sqrt(np.mean(((prediction[6,:,:,0]/scale_model_val/1e15) - (obs_test[6,:,:]/1e15))**2))
RMSE_dca_july = np.sqrt(np.mean(((prediction[7,:,:,0]/scale_model_val/1e15) - (obs_test[7,:,:]/1e15))**2))
RMSE_dca_aug = np.sqrt(np.mean(((prediction[8,:,:,0]/scale_model_val/1e15) - (obs_test[8:,:]/1e15))**2))
RMSE_dca_sep = np.sqrt(np.mean(((prediction[9,:,:,0]/scale_model_val/1e15) - (obs_test[9,:,:]/1e15))**2))
RMSE_dca_oct = np.sqrt(np.mean(((prediction[10,:,:,0]/scale_model_val/1e15) - (obs_test[10,:,:]/1e15))**2))
RMSE_dca_nov = np.sqrt(np.mean(((prediction[11,:,:,0]/scale_model_val/1e15) - (obs_test[11,:,:]/1e15))**2))
#Calculate Root Mean Square Error (RMSE) for WRF_LS
RMSE_ls_dec = np.sqrt(np.mean(((model_test[0,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[0,:,:]))/1e15)**2))
RMSE_ls_jan = np.sqrt(np.mean(((model_test[1,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[1,:,:]))/1e15)**2))
RMSE_ls_feb = np.sqrt(np.mean(((model_test[2,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[2,:,:]))/1e15)**2))
RMSE_ls_mar = np.sqrt(np.mean(((model_test[3,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[3,:,:]))/1e15)**2))
RMSE_ls_apr = np.sqrt(np.mean(((model_test[4,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[4,:,:]))/1e15)**2))
RMSE_ls_may = np.sqrt(np.mean(((model_test[5,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[5,:,:]))/1e15)**2))
RMSE_ls_june = np.sqrt(np.mean(((model_test[6,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[6,:,:]))/1e15)**2))
RMSE_ls_july = np.sqrt(np.mean(((model_test[7,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[7,:,:]))/1e15)**2))
RMSE_ls_aug = np.sqrt(np.mean(((model_test[8,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[8:,:]))/1e15)**2))
RMSE_ls_sep = np.sqrt(np.mean(((model_test[9,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[9,:,:]))/1e15)**2))
RMSE_ls_oct = np.sqrt(np.mean(((model_test[10,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[10,:,:]))/1e15)**2))
RMSE_ls_nov = np.sqrt(np.mean(((model_test[11,:,:,0]/scale_model_test)/1e15+((mean_o - mean_m) - (obs_test[11,:,:]))/1e15)**2))
#Plotting Statistics
import pandas as pd
from matplotlib.transforms import Affine2D
d_no2 = pd.read_csv('C:/python_work/phd/paper2/new_data/no2/no2_phase2/no2_amount_phase2.csv')
d_rmse = pd.read_csv('C:/python_work/phd/paper2/new_data/no2/no2_phase2/no2_rmse_phase2.csv')
d_bias = pd.read_csv('C:/python_work/phd/paper2/new_data/no2/no2_phase2/no2_bias_phase2.csv')
fig = plt.subplots(figsize=(14, 8), dpi = 500)
mpl.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 16
plt.rcParams['axes.linewidth'] = 1
plt.gcf().subplots_adjust(hspace=0.3, wspace=0.25)
ax1 = plt.subplot(2,2,1)
trans1 = Affine2D().translate(-0.1, 0.0) + ax1.transData
trans2 = Affine2D().translate(+0.1, 0.0) + ax1.transData
trans3 = Affine2D().translate(+0.15, 0.0) + ax1.transData
p1=plt.plot('month', 'OMI', data=d_no2, color='brown', linewidth=2)
plt.errorbar('month', 'OMI', 'OMI_std', data=d_no2, color='brown', linestyle='None',
marker='o', label=None, elinewidth=0.5, transform=trans1)
p2=plt.plot('month', 'WRF-Chem', data=d_no2, color='black', linewidth=2)
plt.errorbar('month', 'WRF-Chem', 'WRF-Chem_std', data=d_no2, color='black', linestyle='None',
marker='o', label=None, elinewidth=0.5)
p3=plt.plot('month', 'WRF-LS', data=d_no2, color='limegreen', linewidth=2)
plt.errorbar('month', 'WRF-LS', 'WRF-LS_std', data=d_no2, color='limegreen', linestyle='None',
marker='o', label=None, elinewidth=0.5, transform=trans3)
p4=plt.plot('month', 'WRF-DCA', data=d_no2, color='orange', linewidth=2)
plt.errorbar('month', 'WRF-DCA', 'WRF-DCA_std', data=d_no2, color='orange', linestyle='None',
marker='o', label=None, elinewidth=0.5, transform=trans2)
plt.ylabel('$\mathregular{NO_2}$ VCD ($\mathregular{x10^{15}}$ molecules / $\mathregular{cm^2}$)')
plt.text(0.9, 0.89, '(a)', transform=ax1.transAxes)
plt.ylim([0, 10])
labels =['OMI','WRF-Chem', 'WRF-LS', 'WRF-DCA']
plt.legend([p1, p2, p3, p4], labels=labels, loc='upper left',
bbox_to_anchor=(1.3, -0.3), edgecolor='none')
ax1 = plt.subplot(2,2,2)
p5=plt.plot('month', 'WRF-Chem', data=d_rmse, color='black', linestyle='None', Marker='o')
p6=plt.plot('month', 'WRF-LS', data=d_rmse, color='limegreen', linestyle='None', Marker='o')
p7=plt.plot('month', 'WRF-DCA', data=d_rmse, color='orange', linestyle='None', Marker='o')
p8=plt.ylabel('RMSE ($\mathregular{x10^{15}}$ molecules / $\mathregular{cm^2}$)')
plt.text(0.9, 0.9, '(b)', transform=ax1.transAxes)
plt.ylim([0, 8])
#plt.legend()
ax1 = plt.subplot(2,2,3)
p9=plt.plot('month', 'WRF-Chem', data=d_bias, color='black', linestyle='None', Marker='o')
p10=plt.plot('month', 'WRF-LS', data=d_bias, color='limegreen', linestyle='None', Marker='o')
p11=plt.plot('month', 'WRF-DCA', data=d_bias, color='orange', linestyle='None', Marker='o')
plt.ylabel('NMB')
plt.axhline(0, color='black', linestyle='--')
plt.text(0.9, 0.89, '(c)', transform=ax1.transAxes)
plt.ylim([-2, 6])
labels_d =['WRF-Chem', 'WRF-LS', 'WRF-DCA']
plt.legend([p9, p10, p11], labels=labels_d, loc='upper left',
bbox_to_anchor=(1.3, 0.5), edgecolor='none')
" Correlations and Scatter Plots "
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
import pandas as pd
from scipy.stats import gaussian_kde
# Scatter Plots
fig=plt.figure(figsize=(16, 14), dpi=500)
mpl.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 1
plt.gcf().subplots_adjust(hspace=0.1, wspace=0.1)
sc_norm = colors.TwoSlopeNorm(6, 1, 13)
#WRF-Chem model
ax = plt.subplot(3,4,1)
x1 = sn1_o_e.flatten()
y1 = (np.asarray(sn1_m_e)).flatten()
x1y1 = np.vstack([x1,y1])
z1 = gaussian_kde(x1y1)(x1y1)
idx1 = z1.argsort()
x1, y1, z1 = x1[idx1], y1[idx1], z1[idx1]
plt.scatter(x1, y1, c=z1, marker='.', norm=sc_norm, cmap='gnuplot')
plt.ylabel('WRF-Chem $\mathregular{NO_2}$')
plt.title('DJF (R=0.15)')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
ax = plt.subplot(3,4,2)
x2 = sn2_o_e.flatten()
y2 = (np.asarray(sn2_m_e)).flatten()
x2y2 = np.vstack([x2,y2])
z2 = gaussian_kde(x2y2)(x2y2)
idx2 = z2.argsort()
x2, y2, z2 = x2[idx2], y2[idx2], z2[idx2]
plt.scatter(x2, y2, c=z2, marker='.', norm=sc_norm, cmap='gnuplot')
plt.title('MAM (R=0.07)')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
ax = plt.subplot(3,4,3)
x3 = sn3_o_e.flatten()
y3 = (np.asarray(sn3_m_e)).flatten()
x3y3 = np.vstack([x3,y3])
z3 = gaussian_kde(x3y3)(x3y3)
idx3 = z3.argsort()
x3, y3, z3 = x3[idx3], y3[idx3], z3[idx3]
plt.scatter(x3, y3, c=z3, marker='.', norm=sc_norm, cmap='gnuplot')
plt.title('JJA (R=0.64)')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
ax = plt.subplot(3,4,4)
x4 = sn4_o_e.flatten()
y4 = (np.asarray(sn4_m_e)).flatten()
x4y4 = np.vstack([x4,y4])
z4 = gaussian_kde(x4y4)(x4y4)
idx4 = z4.argsort()
x4, y4, z4 = x4[idx4], y4[idx4], z4[idx4]
plt.scatter(x4, y4, c=z4, marker='.', norm=sc_norm, cmap='gnuplot')
plt.title('SON (R=0.53)')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
#WRF-LS
ax = plt.subplot(3,4,5)
x5 = sn1_o_e.flatten()
y5 = (np.asarray(sn1_ls)).flatten()
x5y5 = np.vstack([x5,y5])
z5 = gaussian_kde(x5y5)(x5y5)
idx5 = z5.argsort()
x5, y5, z5 = x5[idx5], y5[idx5], z5[idx5]
plt.scatter(x5, y5, c=z5, marker='.', norm=sc_norm, cmap='gnuplot')
plt.ylabel('WRF-LS $\mathregular{NO_2}$')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
plt.title('DJF (R=0.15)')
ax = plt.subplot(3,4,6)
x6 = sn2_o_e.flatten()
y6 = (np.asarray(sn2_ls)).flatten()
x6y6 = np.vstack([x6,y6])
z6 = gaussian_kde(x6y6)(x6y6)
idx6 = z6.argsort()
x6, y6, z6 = x6[idx6], y6[idx6], z6[idx6]
plt.scatter(x6, y6, c=z6, marker='.', norm=sc_norm, cmap='gnuplot')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
plt.title('MAM (R=0.1)')
ax = plt.subplot(3,4,7)
x7 = sn3_o_e.flatten()
y7 = (np.asarray(sn3_ls)).flatten()
x7y7 = np.vstack([x7,y7])
z7 = gaussian_kde(x7y7)(x7y7)
idx7 = z7.argsort()
x7, y7, z7 = x7[idx7], y7[idx7], z7[idx7]
plt.scatter(x7, y7, c=z7, marker='.', norm=sc_norm, cmap='gnuplot')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
plt.title('JJA (R=0.67)')
ax = plt.subplot(3,4,8)
x8 = sn4_o_e.flatten()
y8 = (np.asarray(sn4_ls)).flatten()
x8y8 = np.vstack([x8,y8])
z8 = gaussian_kde(x8y8)(x8y8)
idx8 = z8.argsort()
x8, y8, z8 = x8[idx8], y8[idx8], z8[idx8]
plot8= plt.scatter(x8, y8, c=z8, marker='.', norm=sc_norm, cmap='gnuplot')
plt.title('SON (R=0.47)')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
#WRF-DCA
ax = plt.subplot(3,4,9)
x9 = sn1_o_e.flatten()
y9 = (np.asarray(sn1_p_e)).flatten()
x9y9 = np.vstack([x9,y9])
z9 = gaussian_kde(x9y9)(x9y9)
idx9 = z9.argsort()
x9, y9, z9 = x9[idx9], y9[idx9], z9[idx9]
plt.scatter(x9, y9, c=z9, marker='.', norm=sc_norm, cmap='gnuplot')
plt.ylabel('WRF-DCA $\mathregular{NO_2}$')
plt.xlabel('OMI $\mathregular{NO_2}$')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
plt.title('DJF (R=0.75)')
ax = plt.subplot(3,4,10)
x10 = sn2_o_e.flatten()
y10 = (np.asarray(sn2_p_e)).flatten()
x10y10 = np.vstack([x10,y10])
z10 = gaussian_kde(x10y10)(x10y10)
idx10 = z10.argsort()
x10, y10, z10 = x10[idx10], y10[idx10], z10[idx10]
plt.scatter(x10, y10, c=z10, marker='.', norm=sc_norm, cmap='gnuplot')
plt.xlabel('OMI $\mathregular{NO_2}$')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
plt.title('MAM (R=0.59)')
ax = plt.subplot(3,4,11)
x11 = sn3_o_e.flatten()
y11 = (np.asarray(sn3_p_e)).flatten()
x11y11 = np.vstack([x11,y11])
z11 = gaussian_kde(x11y11)(x11y11)
idx11 = z11.argsort()
x11, y11, z11 = x11[idx11], y11[idx11], z11[idx11]
plt.scatter(x11, y11, c=z11, marker='.', norm=sc_norm, cmap='gnuplot')
plt.xlabel('OMI $\mathregular{NO_2}$')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
plt.title('JJA (R=0.91)')
ax = plt.subplot(3,4,12)
x12 = sn4_o_e.flatten()
y12 = (np.asarray(sn4_p_e)).flatten()
x12y12 = np.vstack([x12,y12])
z12 = gaussian_kde(x12y12)(x12y12)
idx12 = z12.argsort()
x12, y12, z12 = x12[idx12], y12[idx12], z12[idx12]
plot8= plt.scatter(x12, y12, c=z12, marker='.', norm=sc_norm, cmap='gnuplot')
plt.xlabel('OMI $\mathregular{NO_2}$')
plt.title('SON (R=0.91)')
sc_axes = plt.gcf().add_axes([1, 0.155, 0.013, 0.7])
plt.colorbar(plot8, sc_axes, label='Density')
line = mlines.Line2D([0, 1], [0, 1], color='red', linestyle='--', linewidth=2.5)
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
fig.tight_layout()
" Appendix - For plotting histograms and calculating correlations"
# Historgrams
bins = np.linspace(-4, 2.5, 10)
plt.hist(diff_sn1, bins, facecolor='k', label='WRF-Chem')
plt.hist(diff_sn1_dca, bins, facecolor='orange', alpha=0.7, label='WRF-DCA')
plt.legend()
# Calculate Correlation
corr_wrf = np.corrcoef(x1, y1)
corr_dca = np.corrcoef(x12, y12)
|
"""
[Weekly #20] Paradigms
https://www.reddit.com/r/dailyprogrammer/comments/2sx7nn/weekly_20_paradigms/
So recently there has been a massive surge in the interest of functional programming, but let's not forget the other
paradigms too!
* Object oriented
* Imperative
* Logic (Prolog)
There are more than I have listed above, but how do you feel about these paradigms?
What's a paradigm you've had interest in but not the time to explore?
What are the advantages and disadvantages of these in both development and in the real-world?
Slightly off-topic but I would love to hear of anyone that started programming functionally versus the usual
imperative/OOP route.
"""
def main():
pass
if __name__ == "__main__":
main()
|
############################################################
#
# functions relative to subdomains
#
############################################################
try:
from mpi4py import MPI
#print('- mpi4py : found')
except:
print('- mpi4py : not found, please install it')
exit(0)
from numpy import zeros,arange,ones,cumsum,sqrt,array,meshgrid
from gmg.fortran_multigrid import buffertodomain
from time import time
#from plotutils import plot2d
def set_family(myrank,np,mp,ix,iy):
procs=arange(np*mp)
i = procs%np
j = procs//np
col=((i//ix)%2)+2*((j//iy)%2)
if col[myrank]==0:
rank0=myrank
if col[myrank]==1:
rank0=myrank-ix
if col[myrank]==2:
rank0=myrank-iy*np
if col[myrank]==3:
rank0=myrank-ix-iy*np
# print(col[myrank])
if (ix<np) and (iy<mp):
family=array([rank0,rank0+ix,rank0+iy*np,rank0+ix+iy*np])
family=family.reshape((2,2))
elif (ix<np):
if col[myrank] in (0,1):
family=array([rank0,rank0+ix])
else:
family=array([rank0+iy*np,rank0+ix+iy*np])
family=family.reshape((1,2))
elif (iy<mp):
if col[myrank] in (0,2):
family=array([rank0,rank0+iy*np])
else:
family=array([rank0+ix,rank0+iy*np+ix])
family=family.reshape((2,1))
else:
if myrank==0:
print('pb with family')
print(ix,iy,np,mp)
print(col)
exit(0)
# if myrank==0:
# print('defined a new family shape %s / ix,iy=%i,%i / np,mp=%i,%i'%(family.shape,ix,iy,np,mp))
return family
class Subdomains(object):
def __init__(self,nh,n,m,comm,family,method=2):
""" (n,m) is the shape of the small subdomain before gathering """
# print('family shape=',family)
np = family.shape[1]
mp = family.shape[0]
sizes = ones(np*mp)*(n*m)
offsets = zeros(np*mp)
offsets[1:] = cumsum(sizes)[:-1]
self.nh = nh
self.n = n
self.m = m
self.family = family
self.np = np
self.mp = mp
self.n1 = 2*nh+(n-2*nh)*np
self.m1 = 2*nh+(m-2*nh)*mp
self.method = method
self.nbtimes = 1 # redundancy factor for timing purpose (should be 1 except for timing)
myrank = comm.Get_rank()
self.myrank = myrank
j1,i1=(family==myrank).nonzero()
self.i1=i1[0]*(n-2*nh)
self.j1=j1[0]*(m-2*nh)
# if self.myrank==0:
# print("define buffers for family",family,"np*mp=",np*mp,"n,m=",n,m)
self.localcomm = MPI.COMM_WORLD.Split(family[0,0],0)
self.sbuff = zeros(n*m)
self.rbuff = zeros(n*m*np*mp)
self.sizes=sizes
self.offsets=offsets
def gather(self,x,y):
# if self.myrank==0:
# print("gather",x.shape,self.sbuff.shape,self.rbuff.shape,self.np,self.mp,self.n1,self.m1)
for k in range(self.nbtimes):
self.localcomm.Allgatherv(x.ravel(),
[self.rbuff,self.sizes,self.offsets,MPI.DOUBLE])
b = self.rbuff.reshape( (self.mp,self.np,self.m,self.n))
buffertodomain(b,y,self.nh,self.m1,self.n1)
def split(self,x,y):
# it's important to *copy* the subarray to x to ensure
# contiguity of the data in x
# this instruction might work best in Fortran
#x=zeros((self.m,self.n))
y[:,:]=x[self.j1:self.j1+self.m,self.i1:self.i1+self.n]
#print(self.m1,self.n1)
# return x
def timeit(self):
nt = 100
x=zeros( (self.m,self.n))+self.myrank
x=zeros( (self.m,self.n))+self.myrank
y=zeros( (self.m1,self.n1))
z=zeros( (self.m,self.n))
t0 = time()
for kt in xrange(nt):
self.gather(x,y)
t1 = time()
dta=t1-t0
t0 = time()
for kt in xrange(nt):
subdomains.split(y,z)
t1 = time()
dtb=t1-t0
return dta,dtb
if __name__ =='__main__':
comm=MPI.COMM_WORLD
myrank = comm.Get_rank()
nbofproc = MPI.COMM_WORLD.Get_size()
nh=3
n=40
m=80
method = 2
ix=1
iy=1
np = 2#int(sqrt(nbofproc))
mp = nbofproc//np
n1 = 2*nh+(n-2*nh)*np
m1 = 2*nh+(m-2*nh)*mp
# if np*mp!=nbofproc:
# if myrank==0:
# print('Need exactly 4 processes to run the test, use')
# print('mpirun -np 4 python subdomains.py')
# exit()
# family = arange(4).reshape((2,2),order='F')
family = set_family(myrank,np,mp,ix,iy)
if myrank==0:
print('----------------------------------------')
print('Family:')
print(family)
subdomains = Subdomains(nh,n,m,comm,family,method)
if myrank==0:
print('----------------------------------------')
print('Time:')
# dta,dtb=subdomains.timeit()
# if myrank==0:
# print('gather : %f'%(dta))
# print('split : %f'%(dtb))
# comm.Barrier()
x=zeros( (m,n))+myrank
xp,yp=meshgrid(arange(n)*1.,arange(m)*1.)
if myrank==0:
x=xp.astype(float)#xp.swapaxes(0,1)
else:
x=x*0.
x=yp.astype(float)
z=zeros( (m,n))
y=zeros((m1,n1))
subdomains.gather(x,y)
# subdomains.split(y,z)
if myrank==0:
print('----------------------------------------')
print('y:')
# print(y)
print('----------------------------------------')
print('z:')
# print(z)
plot2d(y,'y')
|
__version__ = '0.1.4.2'
|
from tk_builder.image_readers.image_reader import ImageReader
import numpy
class NumpyImageReader(ImageReader):
fname = None
full_image_nx = int
full_image_ny = int
numpy_image_data = None # type: numpy.ndarray
def __init__(self, numpy_image_data):
"""
Parameters
----------
numpy_image_data : numpy.ndarray
"""
self.numpy_image_data = numpy_image_data
self.full_image_ny, self.full_image_nx = numpy_image_data.shape
def __getitem__(self, key):
return self.numpy_image_data[key]
|
from threading import Timer
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class Ticker:
def __init__(self, name, duration):
self.name = name
self.duration = duration * 60
def display(self):
print("{} - {}:{:02d}".format(self.name, minutes(self.duration), seconds(self.duration)))
def tick(self):
self.duration -= 1
if __name__ == "__main__":
def say_hello():
print("hello!")
rt = RepeatedTimer(1, say_hello)
|
import sys
import jinja2
from devices import DEVICES
from scrapli_cfg import ScrapliCfg
from scrapli import Scrapli
# we can just search at / because the "base_config" is already fully qualified
JINJA_LOADER = jinja2.FileSystemLoader(searchpath="/")
JINJA_ENV = jinja2.Environment(
loader=JINJA_LOADER, trim_blocks=True, undefined=jinja2.StrictUndefined
)
def prepare_device(test_devices):
# push base config via scrapli-cfg to ensure consistent testing experience
for device in test_devices:
base_config = DEVICES[device]["base_config"]
conn_dict = {
"host": DEVICES[device]["host"],
"port": DEVICES[device]["port"],
"auth_username": DEVICES[device]["auth_username"],
"auth_password": DEVICES[device]["auth_password"],
"auth_secondary": DEVICES[device]["auth_secondary"],
"auth_strict_key": DEVICES[device]["auth_strict_key"],
"platform": device,
# nxos on macos w/out acceleration is... slooooooooooow
"timeout_ops": 120,
}
with Scrapli(**conn_dict) as conn:
if device == "cisco_iosxe":
# getting existing crypto "stuff" from the device to stuff it into config template
# to avoid netconf complaints -- replacing crypto was strings caused things to not
# get registered in the keychain and netconf-yang connections would get denied
crypto_pki = conn.send_command(command="show run | section crypto")
template = JINJA_ENV.get_template(f"{base_config}.j2")
loaded_base_config = template.render(crypto_pki=crypto_pki.result)
else:
with open(base_config, "r") as f:
loaded_base_config = f.read()
with ScrapliCfg(conn=conn) as cfg_conn:
cfg_conn.load_config(config=loaded_base_config, replace=True)
cfg_conn.commit_config()
if device == "cisco_iosxe":
conn.send_config(config="no file prompt quiet")
def main():
test_devices = sys.argv[1].split(",")
prepare_device(test_devices)
if __name__ == "__main__":
main()
|
"""
Tests for IRC bot logging.
"""
from datetime import datetime, timedelta
from json import dumps, loads
from pycon_bot import log
from treq import post
from twisted.internet import defer, task
from twisted.trial import unittest
from zope.interface import verify
class PyConSiteLogTargetTests(unittest.TestCase):
"""
Tests for a log target that targets the PyCon site.
"""
def setUp(self):
self.target = log.PyConSiteLogTarget("host", "key")
self.target._utcnow = self._utcnow
self._dates = dates()
self.target._post = self._post
self.request_body = None
self.post_deferred = None
def _utcnow(self):
"""A mock utcnow implementation for testing.
"""
return next(self._dates)
def _post(self, url, body):
"""A mock post implementation for testing.
Asserts that the URL is the target's URL. Keeps track of the
request body under ``self.request_body``. Sets ``self.post_deferred``
to a new Deferred, and returns it. (The caller is expected to fire
this at some point.)
"""
self.assertEqual(url, self.target._url)
self.request_body = body
self.post_deferred = d = defer.Deferred()
return d
def test_default_implementations(self):
"""Default implementations of stub targets are what they should be.
"""
self.assertEqual(log.PyConSiteLogTarget._utcnow, datetime.utcnow)
self.assertIdentical(log.PyConSiteLogTarget._post, post)
def test_url(self):
"""The log target determines the correct URL.
"""
expected = "https://host/pycon_api/set_irc_logs/key/"
self.assertEqual(self.target._url, expected)
def test_interface(self):
"""The log target implements the log target interface.
"""
verify.verifyObject(log.ILogTarget, self.target)
def test_flush_empty(self):
"""Flushing works when the buffer is empty.
Flushing with no buffered messages returns a deferred that is
already fired, the buffer is unmodified, no request is made.
"""
old_buffer = self.target._buffer
d = self.target.flush()
self.successResultOf(d)
self.assertIdentical(self.target._buffer, old_buffer)
def test_flush(self):
"""Flushing works when the buffer isn't empty.
Flushing with buffered messages returns a deferred that fires
when upload completes. A POST request is made to the API URL.
The buffer is emptied synchronously when the flushing starts.
"""
self.target.log(1, "user1", "message")
self.target.log(1, "user2", "another message")
d = self.target.flush()
self.assertEqual(d.called, False)
expected_body = [
{
u'proposal': 1,
u'user': u'user1',
u'line': u'message',
u'timestamp': u'1989-02-07 00:30:00.000000'
},
{
u'proposal': 1,
u'user': u'user2',
u'line': u'another message',
u'timestamp': u'1989-02-07 00:30:01.000000'
}
]
self.assertEqual(loads(self.request_body), expected_body)
self.assertEqual(self.target._buffer, [])
self.post_deferred.callback(None)
self.assertEqual(self.successResultOf(d), None)
EPOCH = datetime(1989, 2, 7, 00, 30)
ENCODED_EPOCH = u"1989-02-07 00:30:00.000000"
ONE_SECOND = timedelta(seconds=1)
def dates():
"""Generator that produces test dates.
Starts at ``EPOCH``, adds one second each iteration.
"""
date = EPOCH
while True:
yield date
date += ONE_SECOND
class JSONDateTimeEncoderTests(unittest.TestCase):
"""Tests for datetime-aware JSON encoder.
"""
def setUp(self):
self.encoder = log.JSONDateTimeEncoder()
def test_encode(self):
encoded = self.encoder.encode({"datetime": EPOCH})
expected = dumps({"datetime": ENCODED_EPOCH})
self.assertEqual(encoded, expected)
class AutoFlushingLogTargetTests(unittest.TestCase):
def setUp(self):
self.wrapped_target = w = FakeLogTarget()
self.clock = task.Clock()
self.target = log.AutoFlushingLogTarget(w, _clock=self.clock)
def test_log(self):
"""The log method is dispatched to the wrapped log target.
"""
self.assertEqual(self.wrapped_target.logged_messages, [])
args = 1, "nickname", "message"
self.target.log(*args)
self.assertEqual(self.wrapped_target.logged_messages, [args])
def test_flush(self):
"""The flush method is dispatched to the wrapped log target.
"""
self.assertEqual(self.wrapped_target.flushes, 0)
d = self.target.flush()
self.assertEqual(self.successResultOf(d), None)
self.assertEqual(self.wrapped_target.flushes, 1)
def test_autoflush(self):
"""The wrapped target is flushed automatically every 10 seconds.
"""
self.assertEqual(self.wrapped_target.flushes, 0)
self.clock.advance(10)
self.assertEqual(self.wrapped_target.flushes, 1)
self.clock.advance(5)
self.assertEqual(self.wrapped_target.flushes, 1)
self.clock.advance(5)
self.assertEqual(self.wrapped_target.flushes, 2)
class FakeLogTarget(object):
def __init__(self):
self.logged_messages = []
self.flushes = 0
def log(self, proposal, nickname, message):
self.logged_messages.append((proposal, nickname, message))
def flush(self):
self.flushes += 1
return defer.succeed(None)
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from scipy.interpolate import NearestNDInterpolator
from scipy import optimize
plt.gca().set_aspect('equal')
plt.style.use('science')
grid = np.loadtxt("basis.dat")
finegrid = np.loadtxt("finegrid.dat")
Nfine = len(finegrid)
residual = np.loadtxt("residual.dat")
print(len(residual))
print(len([residual[i]
for i in range(len(residual)) if np.sqrt(residual[i]) > 0.2e-9]))
# print("grid: ", list(zip(finegrid, finegrid)))
test = [np.array([x, y])
for x in finegrid for y in finegrid]
interp = NearestNDInterpolator(test, -residual)
def interplate(x0):
# print("inter ", x0)
# print("got ", interp([np.array([x0[0], x0[1]])]))
return interp(x0[0], x0[1])
residual = np.sqrt(np.reshape(residual, (Nfine, Nfine)))
xv, yv = np.meshgrid(finegrid, finegrid)
# plt.imshow(xv, yv, residual)
plt.contourf(xv, yv, residual, 16)
plt.colorbar()
<<<<<<< HEAD
# tri = Delaunay(np.log(grid), qhull_options="QJ")
=======
tri = Delaunay(np.log(grid), qhull_options="QJ")
>>>>>>> b7b189f8832d73d71f65147bc5276a44071e4823
# plt.triplot(grid[:, 0], grid[:, 1], tri.simplices)
# print(len(grid))
# print(len(tri.simplices))
# print(tri.simplices[0])
# coordx = []
# coordy = []
<<<<<<< HEAD
# for sim in tri.simplices:
# initial = [grid[s] for s in sim]
# x = (initial[0][0]+initial[1][0]+initial[2][0])/3
# y = (initial[0][1]+initial[1][1]+initial[2][1])/3
# # initial.append([x, y])
# x0 = np.array([x, y])
# initial = np.array(initial)
# # print(initial.shape)
# mimum = optimize.fmin(interplate, x0,
# xtol=0.05, ftol=1e-10, initial_simplex=initial)
# coordx.append(mimum[0])
# coordy.append(mimum[1])
# print(initial)
plt.scatter(grid[:, 0], grid[:, 1], c="yellow", alpha=0.5, s=6)
# plt.scatter(coordx, coordy, c="red", alpha=0.5, s=8)
=======
for sim in tri.simplices:
initial = [grid[s] for s in sim]
x = (initial[0][0]+initial[1][0]+initial[2][0])/3
y = (initial[0][1]+initial[1][1]+initial[2][1])/3
# initial.append([x, y])
x0 = np.array([x, y])
initial = np.array(initial)
# print(initial.shape)
# mimum = optimize.fmin(interplate, x0,
# xtol=0.05, ftol=1e-10, initial_simplex=initial)
# coordx.append(mimum[0])
# coordy.append(mimum[1])
# print(initial)
plt.scatter(grid[:, 0], grid[:, 1], c="red", alpha=0.5, s=6)
plt.scatter(coordx, coordy, c="red", alpha=0.5, s=15)
>>>>>>> b7b189f8832d73d71f65147bc5276a44071e4823
plt.xscale("log")
plt.yscale("log")
plt.xlabel("$\\omega_1$")
plt.ylabel("$\\omega_2$")
plt.savefig("residual.pdf")
plt.show()
|
""" Pipelines expressed as dask graphs
"""
from dask import delayed
from arl.data.parameters import get_parameter
from arl.image.deconvolution import restore_cube
from arl.graphs.graphs import create_deconvolve_graph, create_invert_graph, create_residual_graph, \
create_selfcal_graph_list, create_predict_graph
def create_continuum_imaging_pipeline_graph(vis_graph_list, model_graph: delayed,
c_deconvolve_graph=create_deconvolve_graph,
c_invert_graph=create_invert_graph,
c_predict_graph=create_predict_graph,
c_residual_graph=create_residual_graph,
**kwargs) -> delayed:
""" Create graph for the continuum imaging pipeline.
Same as ICAL but with no selfcal.
:param vis_graph_list:
:param model_graph:
:param c_deconvolve_graph: Default: create_deconvolve_graph
:param c_invert_graph: Default: create_invert_graph
:param c_residual_graph: Default: Default: create_residual graph
:param kwargs: Parameters for functions in graphs
:return:
"""
return create_ical_pipeline_graph(vis_graph_list, model_graph,
c_deconvolve_graph=c_deconvolve_graph,
c_invert_graph=c_invert_graph,
c_predict_graph=c_predict_graph,
c_residual_graph=c_residual_graph,
first_selfcal=None,
**kwargs)
def create_spectral_line_imaging_pipeline_graph(vis_graph_list, model_graph: delayed,
continuum_model_graph=None,
c_deconvolve_graph=create_deconvolve_graph,
c_invert_graph=create_invert_graph,
c_predict_graph=create_predict_graph,
c_residual_graph=create_residual_graph,
**kwargs) -> delayed:
"""Create graph for spectral line imaging pipeline
Uses the ical pipeline after subtraction of a continuum model
:param vis_graph_list: List of visibility graphs
:param model_graph: Spectral line model graph
:param continuum_model_graph: Continuum model graph
:param c_deconvolve_graph: Default: create_deconvolve_graph
:param c_invert_graph: Default: create_invert_graph,
:param c_residual_graph: Default: Default: create_residual graph
:param kwargs: Parameters for functions in graphs
:return: graphs of (deconvolved model, residual, restored)
"""
if continuum_model_graph is not None:
vis_graph_list = c_predict_graph(vis_graph_list, continuum_model_graph, **kwargs)
return create_ical_pipeline_graph(vis_graph_list, model_graph,
c_deconvolve_graph=c_deconvolve_graph,
c_predict_graph=c_predict_graph,
c_invert_graph=c_invert_graph,
c_residual_graph=c_residual_graph,
first_selfcal=None,
**kwargs)
def create_ical_pipeline_graph(vis_graph_list, model_graph: delayed,
c_deconvolve_graph=create_deconvolve_graph,
c_invert_graph=create_invert_graph,
c_residual_graph=create_residual_graph,
c_predict_graph=create_predict_graph,
c_selfcal_graph=create_selfcal_graph_list,
first_selfcal=None, **kwargs) -> delayed:
"""Create graph for ICAL pipeline
:param vis_graph_list:
:param model_graph:
:param c_deconvolve_graph: Default: create_deconvolve_graph
:param c_invert_graph: Default: create_invert_graph,
:param c_residual_graph: Default: Default: create_residual_graph
:param kwargs: Parameters for functions in graphs
:return:
"""
psf_graph = c_invert_graph(vis_graph_list, model_graph, dopsf=True, **kwargs)
if first_selfcal is not None and first_selfcal == 0:
vis_graph_list = c_selfcal_graph(vis_graph_list, model_graph, c_predict_graph, **kwargs)
residual_graph = c_residual_graph(vis_graph_list, model_graph, **kwargs)
deconvolve_model_graph = c_deconvolve_graph(residual_graph, psf_graph, model_graph, **kwargs)
nmajor = get_parameter(kwargs, "nmajor", 5)
if nmajor > 1:
for cycle in range(nmajor):
if first_selfcal is not None and cycle >= first_selfcal:
vis_graph_list = c_selfcal_graph(vis_graph_list, deconvolve_model_graph,
c_predict_graph, **kwargs)
residual_graph = c_residual_graph(vis_graph_list, deconvolve_model_graph, **kwargs)
deconvolve_model_graph = c_deconvolve_graph(residual_graph, psf_graph,
deconvolve_model_graph, **kwargs)
residual_graph = c_residual_graph(vis_graph_list, deconvolve_model_graph, **kwargs)
restore_graph = delayed(restore_cube, pure=True, nout=1)(deconvolve_model_graph,
psf_graph[0], residual_graph[0],
**kwargs)
return delayed((deconvolve_model_graph, residual_graph, restore_graph))
|
"""
Sheepdog example demonstrating what happens when errors occur.
Our simple function raises an error for one of the arguments, and we'll see
that it gives us a None in the results and we can fetch more error details too.
"""
import sheepdog
def f(a, b):
if a == 2:
raise ValueError("a cannot be 2!!!")
return a + b
args = [(1, 1), (1, 2), (2, 2)]
print("Running f(a,b) for arguments:")
print(args)
config = {
"host": "fear",
}
request_id = sheepdog.map_async(f, args, config)
results = sheepdog.get_results(request_id, './sheepdog.sqlite', verbose=True)
results = [r[1] for r in results]
print("\nReceived results:")
print(results)
errors = sheepdog.get_errors(request_id, './sheepdog.sqlite')
print("\nReceived errors:")
for error in errors:
print("Argument: {}".format(error[0]))
print("Error: =====================================")
print(error[1])
print("============================================")
|
#!/usr/bin/env python3
"""tests for gashlycrumb.py"""
import os
import re
import random
import string
from subprocess import getstatusoutput
prg = "./gashlycrumb.py"
# --------------------------------------------------
def file_flag():
"""Either -f or --file"""
return "-f" if random.randint(0, 1) else "--file"
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ["-h", "--help"]:
rv, out = getstatusoutput(f"{prg} {flag}")
assert rv == 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_bad_file():
"""Test for bad --file"""
bad = random_string()
letter = random.choice(string.ascii_lowercase)
rv, out = getstatusoutput(f"{prg} {letter} -f {bad}")
assert rv != 0
expected = f"No such file or directory: '{bad}'"
assert re.search(expected, out)
# --------------------------------------------------
def test_a():
"""Test for 'a'"""
rv, out = getstatusoutput(f"{prg} a")
assert rv == 0
expected = "A is for Amy who fell down the stairs."
assert out.strip() == expected
# --------------------------------------------------
def test_b_c():
"""Test for 'b c'"""
rv, out = getstatusoutput(f"{prg} b c")
assert rv == 0
expected = "B is for Basil assaulted by bears.\n" "C is for Clara who wasted away."
assert out.strip() == expected
# --------------------------------------------------
def test_y():
"""Test for 'y'"""
rv, out = getstatusoutput(f"{prg} Y")
assert rv == 0
expected = "Y is for Yorick whose head was bashed in."
assert out.strip() == expected
# --------------------------------------------------
def test_o_alternate():
""" Test for 'o' from 'alternate.txt' """
rv, out = getstatusoutput(f"{prg} o P q -f alternate.txt")
assert rv == 0
expected = (
"O is for Orville, who fell in a canyon.\n"
"P is for Paul, strangled by his banyan.\n"
"Q is for Quintanna, flayed in the night."
)
assert out.strip() == expected
# --------------------------------------------------
def test_bad_letter():
"""Test for bad input"""
rv, out = getstatusoutput(f"{prg} 5 CH")
assert rv == 0
expected = 'I do not know "5".\n' 'I do not know "CH".'
assert out.strip() == expected
# --------------------------------------------------
def random_string():
"""generate a random string"""
k = random.randint(5, 10)
return "".join(random.choices(string.ascii_letters + string.digits, k=k))
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import torch
import popart
import onnx
from bert_model import ExecutionMode
from tests.utils import run_py, copy_weights_to_torch, run_fwd_model, check_tensors, check_model
from tests import torch_lamb
def get_mapping(config, init=None):
if init is None:
init = {}
if config.execution_mode == ExecutionMode.DEFAULT:
embedding_proj = {
"bert.embeddings.word_embeddings.weight": "Embedding/Embedding_Dict",
"bert.embeddings.position_embeddings.weight": "Embedding/Positional_Dict",
"bert.embeddings.token_type_embeddings.weight": "Embedding/Segment_Dict",
"bert.embeddings.LayerNorm.weight": "Embedding/Gamma",
"bert.embeddings.LayerNorm.bias": "Embedding/Beta",
}
init.update(**embedding_proj)
if config.split_qkv:
for i in range(config.num_layers):
layer = {
f"bert.encoder.layer.{i}.attention.self.query.weight": f"Layer{i}/Attention/Q",
f"bert.encoder.layer.{i}.attention.self.key.weight": f"Layer{i}/Attention/K",
f"bert.encoder.layer.{i}.attention.self.value.weight": f"Layer{i}/Attention/V",
f"bert.encoder.layer.{i}.attention.output.dense.weight": f"Layer{i}/Attention/Out",
f"bert.encoder.layer.{i}.attention.output.LayerNorm.weight": f"Layer{i}/Attention/Gamma",
f"bert.encoder.layer.{i}.attention.output.LayerNorm.bias": f"Layer{i}/Attention/Beta",
f"bert.encoder.layer.{i}.intermediate.dense.weight": f"Layer{i}/FF/1/W",
f"bert.encoder.layer.{i}.intermediate.dense.bias": f"Layer{i}/FF/1/B",
f"bert.encoder.layer.{i}.output.dense.weight": f"Layer{i}/FF/2/W",
f"bert.encoder.layer.{i}.output.dense.bias": f"Layer{i}/FF/2/B",
f"bert.encoder.layer.{i}.output.LayerNorm.weight": f"Layer{i}/FF/Gamma",
f"bert.encoder.layer.{i}.output.LayerNorm.bias": f"Layer{i}/FF/Beta",
}
init.update(**layer)
else:
for i in range(config.num_layers):
layer = {
f"bert.encoder.layer.{i}.attention.self.query.weight": f"Layer{i}/Attention/QKV",
f"bert.encoder.layer.{i}.attention.self.key.weight": f"Layer{i}/Attention/QKV",
f"bert.encoder.layer.{i}.attention.self.value.weight": f"Layer{i}/Attention/QKV",
f"bert.encoder.layer.{i}.attention.output.dense.weight": f"Layer{i}/Attention/Out",
f"bert.encoder.layer.{i}.attention.output.LayerNorm.weight": f"Layer{i}/Attention/Gamma",
f"bert.encoder.layer.{i}.attention.output.LayerNorm.bias": f"Layer{i}/Attention/Beta",
f"bert.encoder.layer.{i}.intermediate.dense.weight": f"Layer{i}/FF/1/W",
f"bert.encoder.layer.{i}.intermediate.dense.bias": f"Layer{i}/FF/1/B",
f"bert.encoder.layer.{i}.output.dense.weight": f"Layer{i}/FF/2/W",
f"bert.encoder.layer.{i}.output.dense.bias": f"Layer{i}/FF/2/B",
f"bert.encoder.layer.{i}.output.LayerNorm.weight": f"Layer{i}/FF/Gamma",
f"bert.encoder.layer.{i}.output.LayerNorm.bias": f"Layer{i}/FF/Beta",
}
init.update(**layer)
else:
embedding_proj = {
"bert.embeddings.word_embeddings.weight": "BertModel/Encoder/Embeddings/Token/weight",
"bert.embeddings.position_embeddings.weight": "BertModel/Encoder/Embeddings/Position/weight",
"bert.embeddings.token_type_embeddings.weight": "BertModel/Encoder/Embeddings/Segment/weight",
"bert.embeddings.LayerNorm.weight": "BertModel/Encoder/Embeddings/Norm/Gamma",
"bert.embeddings.LayerNorm.bias": "BertModel/Encoder/Embeddings/Norm/Beta",
}
init.update(**embedding_proj)
if config.split_qkv:
for i in range(config.num_layers):
layer = {
f"bert.encoder.layer.{i}.attention.self.query.weight": f'BertModel/Encoder/Layer{i}/Attention/Q',
f"bert.encoder.layer.{i}.attention.self.key.weight": f'BertModel/Encoder/Layer{i}/Attention/K',
f"bert.encoder.layer.{i}.attention.self.value.weight": f'BertModel/Encoder/Layer{i}/Attention/V',
f"bert.encoder.layer.{i}.attention.output.dense.weight": f'BertModel/Encoder/Layer{i}/Attention/Out',
f"bert.encoder.layer.{i}.attention.output.LayerNorm.weight": f'BertModel/Encoder/Layer{i}/Attention/Norm/Gamma',
f"bert.encoder.layer.{i}.attention.output.LayerNorm.bias": f'BertModel/Encoder/Layer{i}/Attention/Norm/Beta',
f"bert.encoder.layer.{i}.intermediate.dense.weight": f'BertModel/Encoder/Layer{i}/FF/1/Dense/Weight',
f"bert.encoder.layer.{i}.intermediate.dense.bias": f'BertModel/Encoder/Layer{i}/FF/1/Dense/Bias',
f"bert.encoder.layer.{i}.output.dense.weight": f'BertModel/Encoder/Layer{i}/FF/2/Dense/Weight',
f"bert.encoder.layer.{i}.output.dense.bias": f'BertModel/Encoder/Layer{i}/FF/2/Dense/Bias',
f"bert.encoder.layer.{i}.output.LayerNorm.weight": f'BertModel/Encoder/Layer{i}/FF/Norm/Gamma',
f"bert.encoder.layer.{i}.output.LayerNorm.bias": f'BertModel/Encoder/Layer{i}/FF/Norm/Beta',
}
init.update(**layer)
else:
for i in range(config.num_layers):
layer = {
f"bert.encoder.layer.{i}.attention.self.query.weight": f'BertModel/Encoder/Layer{i}/Attention/QKV',
f"bert.encoder.layer.{i}.attention.self.key.weight": f'BertModel/Encoder/Layer{i}/Attention/QKV',
f"bert.encoder.layer.{i}.attention.self.value.weight": f'BertModel/Encoder/Layer{i}/Attention/QKV',
f"bert.encoder.layer.{i}.attention.output.dense.weight": f'BertModel/Encoder/Layer{i}/Attention/Out',
f"bert.encoder.layer.{i}.attention.output.LayerNorm.weight": f'BertModel/Encoder/Layer{i}/Attention/Norm/Gamma',
f"bert.encoder.layer.{i}.attention.output.LayerNorm.bias": f'BertModel/Encoder/Layer{i}/Attention/Norm/Beta',
f"bert.encoder.layer.{i}.intermediate.dense.weight": f'BertModel/Encoder/Layer{i}/FF/1/Dense/Weight',
f"bert.encoder.layer.{i}.intermediate.dense.bias": f'BertModel/Encoder/Layer{i}/FF/1/Dense/Bias',
f"bert.encoder.layer.{i}.output.dense.weight": f'BertModel/Encoder/Layer{i}/FF/2/Dense/Weight',
f"bert.encoder.layer.{i}.output.dense.bias": f'BertModel/Encoder/Layer{i}/FF/2/Dense/Bias',
f"bert.encoder.layer.{i}.output.LayerNorm.weight": f'BertModel/Encoder/Layer{i}/FF/Norm/Gamma',
f"bert.encoder.layer.{i}.output.LayerNorm.bias": f'BertModel/Encoder/Layer{i}/FF/Norm/Beta',
}
init.update(**layer)
return init
def get_transform(config, init=None):
if init is None:
init = {}
def q_transform(arr):
return arr[:, 0:config.hidden_size].T
def k_transform(arr):
return arr[:, config.hidden_size:config.hidden_size * 2].T
def v_transform(arr):
return arr[:, config.hidden_size * 2:config.hidden_size * 3].T
if config.split_qkv:
for i in range(config.num_layers):
layer = {
f"bert.encoder.layer.{i}.attention.self.query.weight": np.transpose,
f"bert.encoder.layer.{i}.attention.self.key.weight": np.transpose,
f"bert.encoder.layer.{i}.attention.self.value.weight": np.transpose,
f"bert.encoder.layer.{i}.attention.output.dense.weight": np.transpose,
f"bert.encoder.layer.{i}.intermediate.dense.weight": np.transpose,
f"bert.encoder.layer.{i}.output.dense.weight": np.transpose,
}
init.update(**layer)
else:
for i in range(config.num_layers):
layer = {
f"bert.encoder.layer.{i}.attention.self.query.weight": q_transform,
f"bert.encoder.layer.{i}.attention.self.key.weight": k_transform,
f"bert.encoder.layer.{i}.attention.self.value.weight": v_transform,
f"bert.encoder.layer.{i}.attention.output.dense.weight": np.transpose,
f"bert.encoder.layer.{i}.intermediate.dense.weight": np.transpose,
f"bert.encoder.layer.{i}.output.dense.weight": np.transpose,
}
init.update(**layer)
return init
def fwd_graph(popart_model, torch_model, mode, mapping=None, transform=None, replication_factor=1, replicated_tensor_sharding = False):
# ------------------- PopART --------------------
config = popart_model.config
builder = popart_model.builder
sequence_info = popart.TensorInfo(
"UINT32", [config.micro_batch_size * config.sequence_length])
indices = builder.addInputTensor(sequence_info)
positions = builder.addInputTensor(sequence_info)
segments = builder.addInputTensor(sequence_info)
data = {
indices: np.random.randint(
0, config.vocab_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32),
positions: np.random.randint(
0, config.sequence_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32),
segments: np.random.randint(
0, 2, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32)
}
user_options = {}
if mode == ExecutionMode.PHASED:
user_options = {
"batchSerializationFactor": 1,
"executionPhases": popart_model.total_execution_phases
}
output = popart_model(indices, positions, segments)
ipus = 2
else:
output = popart_model.build_graph(indices, positions, segments)
ipus = popart_model.total_ipus
proto = builder.getModelProto()
outputs, _ = run_py(proto,
data,
output,
user_options=user_options,
execution_mode=mode,
replication_factor=replication_factor,
replicated_tensor_sharding=replicated_tensor_sharding,
ipus=ipus)
# ----------------- PopART -> PyTorch ----------------
proto = onnx.load_model_from_string(proto)
inputs = {
"input_ids": data[indices].reshape(replication_factor * config.micro_batch_size,
config.sequence_length).astype(np.int32),
"position_ids": data[positions].reshape(replication_factor * config.micro_batch_size,
config.sequence_length).astype(np.int32),
"token_type_ids": data[segments].reshape(replication_factor * config.micro_batch_size,
config.sequence_length).astype(np.int32)
}
torch_to_onnx = get_mapping(config, init=mapping)
transform_weights = get_transform(config, init=transform)
# ------------------- PyTorch -------------------------
# Turn off dropout
torch_model.eval()
copy_weights_to_torch(torch_model, proto,
torch_to_onnx, transform_weights)
torch_outputs = run_fwd_model(inputs, torch_model)
check_tensors(torch_outputs, outputs)
def bwd_graph(popart_model,
torch_model,
mode,
popart_loss_fn,
torch_loss_fn,
mapping=None,
transform=None,
replication_factor=1,
replicated_tensor_sharding=False,
opt_type="SGD"):
np.random.seed(1984)
random.seed(1984)
torch.manual_seed(1984)
# ------------------- PopART --------------------
config = popart_model.config
builder = popart_model.builder
sequence_info = popart.TensorInfo(
"UINT32", [config.micro_batch_size * config.sequence_length])
indices = builder.addInputTensor(sequence_info)
positions = builder.addInputTensor(sequence_info)
segments = builder.addInputTensor(sequence_info)
data = {
indices: np.random.randint(
0, config.vocab_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32),
positions: np.random.randint(
0, config.sequence_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32),
segments: np.random.randint(
0, 2, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32)
}
num_reps = 5
user_options = {}
if mode == ExecutionMode.PHASED:
user_options = {
"batchSerializationFactor": 1,
"executionPhases": popart_model.total_execution_phases
}
output = popart_model(indices, positions, segments)
ipus = 2
else:
output = popart_model.build_graph(indices, positions, segments)
ipus = popart_model.total_ipus
loss = popart_loss_fn(output)
proto = builder.getModelProto()
if opt_type == "SGD":
optimizer = popart.ConstSGD(1e-3)
elif opt_type == "LAMB":
optMap = {
"defaultLearningRate": (1e-3, True),
"defaultBeta1": (0.9, True),
"defaultBeta2": (0.999, True),
"defaultWeightDecay": (0.0, True),
"maxWeightNorm": (10.0, True),
"defaultEps": (1e-8, True),
"lossScaling": (1.0, True),
}
optimizer = popart.Adam(optMap,
mode=popart.AdamMode.Lamb)
elif opt_type == "LAMB_NO_BIAS":
optMap = {
"defaultLearningRate": (1, False),
"defaultBeta1": (0, False),
"defaultBeta2": (0, False),
"defaultWeightDecay": (0.0, False),
"defaultEps": (1e-8, False),
"lossScaling": (1.0, False),
}
optimizer = popart.Adam(optMap,
mode=popart.AdamMode.LambNoBias)
else:
raise ValueError(f"Unknown opt_type={opt_type}")
patterns = popart.Patterns()
if mode == ExecutionMode.PHASED:
patterns.enablePattern("TiedGatherPattern", False)
patterns.enablePattern("SparseAccumulatePattern", False)
outputs, post_proto = run_py(proto,
data,
output,
loss=loss,
optimizer=optimizer,
user_options=user_options,
execution_mode=mode,
patterns=patterns,
replication_factor=replication_factor,
replicated_tensor_sharding=replicated_tensor_sharding,
ipus=ipus,
num_reps=num_reps)
# ----------------- PopART -> PyTorch ----------------
proto = onnx.load_model_from_string(proto)
inputs = {
"input_ids": data[indices].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32),
"position_ids": data[positions].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32),
"token_type_ids": data[segments].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32)
}
torch_to_onnx = get_mapping(config, init=mapping)
transform_weights = get_transform(config, init=transform)
# ------------------- PyTorch -------------------------
# Turn off dropout
torch_model.eval()
copy_weights_to_torch(torch_model, proto,
torch_to_onnx, transform_weights)
if opt_type == "SGD":
optim = torch.optim.SGD(torch_model.parameters(), 1e-3,
weight_decay=0.0, momentum=0.0)
elif opt_type == "LAMB":
optim = torch_lamb.Lamb(torch_model.parameters(),
lr=1e-3, weight_decay=0.0, biasCorrection=True)
for _ in range(num_reps):
torch_outputs = torch_model(
**{k: torch.from_numpy(t).long() for k, t in inputs.items()})
torch_loss = torch_loss_fn(torch_outputs)
torch_loss.backward()
optim.step()
optim.zero_grad()
check_tensors([output.detach().numpy()
for output in torch_outputs], outputs, margin=1.5e-06)
check_model(torch_model, post_proto,
torch_to_onnx, transform_weights,
margin=5e-5)
|
import logging
import argparse
import math
import os
import sys
from time import strftime, localtime
import random
import numpy
from pytorch_pretrained_bert import BertModel
from sklearn import metrics
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
from data_utils import build_tokenizer, build_embedding_matrix, Tokenizer4Bert, ABSADataset
from models import LSTM, IAN, MemNet, RAM, TD_LSTM, Cabasc, ATAE_LSTM, TNet_LF, AOA, MGAN
from models.aen import CrossEntropyLoss_LSR, AEN_BERT
from models.bert_spc import BERT_SPC
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class Inferer:
def __init__(self, opt):
self.opt = opt
if 'bert' in opt.model_name:
tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
bert = BertModel.from_pretrained(opt.pretrained_bert_name)
self.model = opt.model_class(bert, opt).to(opt.device)
self.model.load_state_dict(torch.load(opt.state_dict_path))
logger.info(f"Loaded model {opt.model_name} from {opt.state_dict_path}")
else:
tokenizer = build_tokenizer(
fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
max_seq_len=opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
embedding_matrix = build_embedding_matrix(
word2idx=tokenizer.word2idx,
embed_dim=opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(opt.embed_dim), opt.dataset))
self.model = opt.model_class(embedding_matrix, opt).to(opt.device)
self.model.load_state_dict(torch.load(opt.state_dict_path))
self.valset = ABSADataset(opt.dataset_file['val'], tokenizer)
self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
if opt.device.type == 'cuda':
logger.info('cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=opt.device.index)))
def _evaluate_acc_f1(self, data_loader):
n_correct, n_total = 0, 0
t_targets_all, t_outputs_all = None, None
# switch model to evaluation mode
self.model.eval()
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(data_loader):
t_inputs = [t_sample_batched[col].to(self.opt.device) for col in self.opt.inputs_cols]
t_targets = t_sample_batched['polarity'].to(self.opt.device)
t_outputs = self.model(t_inputs)
n_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
n_total += len(t_outputs)
if t_targets_all is None:
t_targets_all = t_targets
t_outputs_all = t_outputs
else:
t_targets_all = torch.cat((t_targets_all, t_targets), dim=0)
t_outputs_all = torch.cat((t_outputs_all, t_outputs), dim=0)
acc = n_correct / n_total
f1 = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0, 1, 2], average='macro')
return acc, f1
def _predict(self,data_loader):
import pandas as pd
pred_df = pd.DataFrame(columns=['unique_hash','sentiment'])
idx = 0
# switch model to evaluation mode
self.model.eval()
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(data_loader):
t_input = [t_sample_batched[col].to(self.opt.device) for col in self.opt.inputs_cols]
t_hash = t_sample_batched['hash']
t_outputs = self.model(t_input)
pred = (torch.argmax(t_outputs, -1))
pred_df.loc[idx] = [t_hash,pred]
idx += 1
pred_df.to_csv(f"{self.opt.model_name}_preds.csv",index=False)
return f"{self.opt.model_name}_preds.csv"
def run(self):
# Loss and Optimizer
_params = filter(lambda p: p.requires_grad, self.model.parameters())
test_data_loader = DataLoader(dataset=self.testset, batch_size=1, shuffle=False)
val_data_loader = DataLoader(dataset=self.valset, batch_size=32, shuffle=False)
self.model.eval()
val_acc, val_f1 = self._evaluate_acc_f1(val_data_loader)
logger.info('>> test_acc: {:.4f}, test_f1: {:.4f}'.format(val_acc, val_f1))
self.model.eval()
pred_csv = self._predict(test_data_loader)
logger.info(f'Predictions saved to {pred_csv}')
if __name__ == "__main__":
model_classes = {
'lstm': LSTM,
'td_lstm': TD_LSTM,
'atae_lstm': ATAE_LSTM,
'ian': IAN,
'memnet': MemNet,
'ram': RAM,
'cabasc': Cabasc,
'tnet_lf': TNet_LF,
'aoa': AOA,
'mgan': MGAN,
'bert_spc': BERT_SPC,
'aen_bert': AEN_BERT,
}
class Option(object): pass
opt = Option()
opt.model_name = 'aen_bert'
opt.model_class = model_classes[opt.model_name]
opt.dataset = 'twitter'
opt.dataset_file = {
'test': sys.argv[2],
'val': './datasets/acl-14-short-data/test.raw'
}
opt.state_dict_path = sys.argv[1]
opt.embed_dim = 300
opt.hidden_dim = 300
opt.max_seq_len = 80
opt.polarities_dim = 3
opt.hops = 3
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
opt.pretrained_bert_name = "bert-base-uncased"
opt.dropout = 0.1
opt.l2reg = 0.01
opt.device = 'cuda'
opt.inputs_cols = ['text_raw_bert_indices', 'aspect_bert_indices']
opt.bert_dim = 768
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
inf = Inferer(opt)
inf.run()
|
import logging.handlers
import basebot
class LogBot(basebot.Bot):
filename_format = "%s.log"
message_format = "[%(asctime)s] %(message)s"
def __init__(self, *args, **kwargs):
super(LogBot, self).__init__(*args, **kwargs)
self.logs = {}
def join_room(self, room, password=None):
super(LogBot, self).join_room(room, password)
handler = logging.handlers.RotatingFileHandler(
self.filename_format % room, 'a', 6291456, 5)
handler.setFormatter(logging.Formatter(self.message_format))
logger = logging.Logger(room)
logger.addHandler(handler)
self.logs[room] = logger
self.message(room, "(this conversation is being recorded)")
def on_privmsg(self, cmd, args, prefix):
parent = super(LogBot, self)
if hasattr(parent, "on_privmsg"):
parent.on_privmsg(cmd, args, prefix)
sender = prefix.split("!", 1)[0]
to, msg = args
if to in self.logs:
self.logs[to].info("<%s> %s" % (sender, msg))
def on_join(self, cmd, args, prefix):
parent = super(LogBot, self)
if hasattr(parent, "on_join"):
parent.on_join(cmd, args, prefix)
sender = prefix.split("!", 1)[0]
room = args[0]
if room in self.logs:
self.logs[room].info("<%s> joined %s" % (sender, room))
def on_part(self, cmd, args, prefix):
parent = super(LogBot, self)
if hasattr(parent, "on_part"):
parent.on_part(cmd, args, prefix)
sender = prefix.split("!", 1)[0]
room = args[0]
if room in self.logs:
self.logs[room].info("<%s> left %s" % (sender, room))
def cmd(self, cmd, *args):
super(LogBot, self).cmd(cmd, *args)
if cmd.lower() == "privmsg":
target, message = args
if target in self.logs:
self.logs[target].info("<%s> %s" % (self.nick, message))
|
import logging
def get_config(name, src):
config = {}
if name.upper() == 'HMDB-ARID':
config['num_classes'] = 10
elif name.upper() == 'UCF-HMDB':
config['num_classes'] = 14
elif name.upper() == 'MK200-UCF':
config['num_classes'] = 45
else:
logging.error("Configs for dataset '{}' with source dataset {} not found".format(name, src))
raise NotImplemented
logging.debug("Dataset: '{}' with src '{}', configs: {}".format(name.upper(), src.upper(), config))
return config
if __name__ == "__main__":
logging.getLogger().setLevel(logging.DEBUG)
logging.info(get_config("HMDB-ARID", "HMDB51"))
|
import json
mimetype = 'application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
def test_create_success(app, client):
data = {
"audioFileType":"song",
"audioFileMetadata":{
"name":"Fast Lane",
"duration":240
}
}
response = client.post('/', data=json.dumps(data), headers=headers)
assert response.status_code == 200
def test_create_failure_400(app, client):
"""duration missing in the data/payload"""
data = {
"audioFileType":"song",
"audioFileMetadata":{
"name":"Fast Lane"
}
}
response = client.post('/', data=json.dumps(data), headers=headers)
assert response.status_code == 400
def test_create_failure_500(app, client):
"""adding new field (Rating) to payload gives 500"""
data = {
"audioFileType":"song",
"audioFileMetadata":{
"name":"Fast Lane",
"Rating": 4.7
}
}
response = client.post('/', data=json.dumps(data), headers=headers)
assert response.status_code == 500
def test_get_one_success(app, client):
audioFileType = 'song'
audioFileID = "6043b4f86910bbcc3fb43e09"
response = client.get('/media/{}/{}'.format(audioFileType, audioFileID))
assert response.content_type == mimetype
assert response.status_code == 200
def test_get_one_failure(app, client):
"""wrong audioFileType gives 500"""
audioFileID = "604330705ac133f1fb5159"
response = client.get('/media/{}/{}'.format('invalid_type', audioFileID))
assert response.status_code == 500
def test_get_all_success(app, client):
audioFileType = 'song'
response = client.get('/media/{}/'.format(audioFileType))
assert response.content_type == mimetype
assert response.status_code == 200
def test_put_success(app, client):
data = {
"audioFileType":"song",
"audioFileMetadata":{
"name":"Self Made",
"duration":248
}
}
audioFileMetadata = data['audioFileMetadata']
audioFileType = data['audioFileType']
audioFileID = "6043b4f86910bbcc3fb43e09"
response = client.put('/media/{}/{}'.format(audioFileType, audioFileID), data = json.dumps(data), headers=headers)
assert response.content_type == mimetype
assert response.status_code == 200
def test_put_failure_400(app, client):
"""invalid audioFileID gives 400"""
data = {
"audioFileType":"song",
"audioFileMetadata":{
"name":"Bad Made"
}
}
audioFileMetadata = data['audioFileMetadata']
audioFileType = data['audioFileType']
audioFileID = "604330705c133f1fb5159"
response = client.put('/media/{}/{}'.format(audioFileType, audioFileID), data = json.dumps(data), headers=headers)
assert response.status_code == 400
def test_put_failure_500(app, client):
"""Adding new field (Author) in payload gives 400"""
data = {
"audioFileType":"song",
"audioFileMetadata":{
"name":"Bad Made",
"Author":"Totti"
}
}
audioFileMetadata = data['audioFileMetadata']
audioFileType = data['audioFileType']
audioFileID = "6043b4f86910bbcc3fb43e09"
response = client.put('/media/{}/{}'.format(audioFileType, audioFileID), data = json.dumps(data), headers=headers)
assert response.status_code == 500
def test_delete_success(app, client):
audioFileType = 'song'
audioFileID = "6043b4f86910bbcc3fb43e09"
response = client.delete('/media/{}/{}'.format(audioFileType, audioFileID))
assert response.content_type == mimetype
assert response.status_code == 200
def test_delete_failure(app, client):
"""invalid audioFileID gives 400"""
audioFileType = 'song'
audioFileID = "6043b809d97d9ceab834"
response = client.delete('/media/{}/{}'.format(audioFileType, audioFileID))
assert response.content_type == mimetype
assert response.status_code == 400
|
from infi.pyutils.lazy import cached_method, clear_cache
from ..base import partition
# pylint: disable=W0212
class LinuxPartition(partition.Partition):
def __init__(self, containing_disk, parted_partition):
super(LinuxPartition, self).__init__()
self._parted_partition = parted_partition
self._containing_disk = containing_disk
@cached_method
def get_size_in_bytes(self):
return self._parted_partition.get_size_in_bytes()
@cached_method
def get_block_access_path(self):
return self._parted_partition.get_access_path()
@cached_method
def get_containing_disk(self):
return self._containing_disk
@cached_method
def get_current_filesystem(self):
from .filesystem import LinuxFileSystem
filesystem_type = self._parted_partition.get_filesystem_name()
return LinuxFileSystem(filesystem_type)
class LinuxPrimaryPartition(LinuxPartition, partition.PrimaryPartition):
# pylint: disable=W0223
# The methods below are overriden by platform-specific implementations
pass
class LinuxExtendedPartition(LinuxPartition, partition.ExtendedPartition):
# pylint: disable=W0223
# The methods below are overriden by platform-specific implementations
pass
class LinuxLogicalPartition(LinuxPartition, partition.LogicalPartition):
# pylint: disable=W0223
# The methods below are overriden by platform-specific implementations
pass
class LinuxGUIDPartition(LinuxPartition, partition.GUIDPartition):
# pylint: disable=W0223
# The methods below are overriden by platform-specific implementations
pass
class LinuxPartitionTable(object):
def __init__(self, disk_drive):
super(LinuxPartitionTable, self).__init__()
self._disk_drive = disk_drive
def _translate_partition_object(self, parted_partition):
from infi.parted import GUIDPartition
if isinstance(parted_partition, GUIDPartition):
return LinuxGUIDPartition(self._disk_drive, parted_partition)
if parted_partition.get_type() == "Primary":
return LinuxPrimaryPartition(self._disk_drive, parted_partition)
if parted_partition.get_type() == "Extended":
return LinuxExtendedPartition(self._disk_drive, parted_partition)
if parted_partition.get_type() == "Logical":
return LinuxLogicalPartition(self._disk_drive, parted_partition)
# If there is only primary, then the type is empty
return LinuxPrimaryPartition(self._disk_drive, parted_partition)
@cached_method
def get_partitions(self):
parted_disk = self._disk_drive._get_parted_disk_drive()
return [self._translate_partition_object(parted_partition)
for parted_partition in parted_disk.get_partitions()]
@cached_method
def get_disk_drive(self):
return self._disk_drive
def create_partition_for_whole_table(self, file_system_object, alignment_in_bytes=None):
self._disk_drive._get_parted_disk_drive().create_partition_for_whole_drive(file_system_object.get_name(), alignment_in_bytes)
clear_cache(self)
return self.get_partitions()[0]
class LinuxMBRPartitionTable(LinuxPartitionTable, partition.MBRPartitionTable):
@classmethod
def create_partition_table(cls, disk_drive, alignment_in_bytes=None):
disk_drive._get_parted_disk_drive().create_a_new_partition_table("msdos", alignment_in_bytes)
return cls(disk_drive)
class LinuxGUIDPartitionTable(LinuxPartitionTable, partition.GUIDPartitionTable):
@classmethod
def create_partition_table(cls, disk_drive, alignment_in_bytes=None):
disk_drive._get_parted_disk_drive().create_a_new_partition_table("gpt", alignment_in_bytes)
return cls(disk_drive)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 10:59:57 2019
@author: kyle
"""
# Internal modules
from bayesian_methods import lambdas_grid, interpolators
import numpy as np
import Tasmanian
from scipy.interpolate import RegularGridInterpolator
from time import time
import matplotlib.pyplot as plt
def time_grid_interpolators():
"""
"""
dim_lmd = 3
dim_out = 10
fig, ax = plt.subplots(111)
#
# Hypercube [-1,1]^dim_lmd
#
x_bnd = np.array([-np.ones(dim_lmd), np.ones(dim_lmd)]).T
#
# Vary the mesh resolution
#
for pts_per_dim in [2**i for i in np.arange(1,9)]:
# Specify resolution
resolution = [pts_per_dim for dummy in range(dim_lmd)]
# Form the lambda grid
X_1D, x_ravel = lambdas_grid(x_bnd, resolution)
# Generate output data (random)
grid_vals = [np.random.rand(*resolution) for dummy in range(dim_out)]
# Form Interpolators
interpolators = []
for j in range(dim_out):
interpolator = RegularGridInterpolator(X_1D, grid_vals[j],
bounds_error=False)
interpolators.append(interpolator)
# Evaluate all interpolators at n points
n = 10000
x_eval = np.random.rand(n, dim_lmd)
tic = time()
for interpolator in interpolators:
interpolator(x_eval)
toc = time() - tic
print(toc)
tic = time()
for i in range(n):
for interpolator in interpolators:
interpolator(x_eval[i])
toc = time()-tic
print(' ', toc)
def time_sparse_grid_interpolators():
"""
"""
print(dir(Tasmanian.SparseGrid))
dim_lmd = 2
dim_out = 10
order = 1 # Piecewise linear
depth = 10 #
grid = Tasmanian.SparseGrid()
grid.makeGlobalGrid(dim_lmd,dim_out, depth, 'iptotal', 'leja')
x = grid.getNeededPoints()
n = grid.getNumPoints()
print(x.shape)
y = np.random.rand(n,dim_out)
grid.loadNeededPoints(y)
n = 100000
x_eval = np.random.rand(n,dim_lmd)
tic = time()
for i in range(n):
grid.evaluate(np.array(x_eval[i,:]))
toc = time()-tic
print(toc)
if __name__=='__main__':
#time_grid_interpolators()
time_sparse_grid_interpolators()
|
import argparse
import json
import os
import pickle
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import chain
from tqdm import trange, tqdm
import numpy as np
from multiprocessing import get_context
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from attacks.synonyms_utils import CachedSynonyms
from common.utils import clean_sent_for_syns, get_spacy_pos
from attacks.glue_datasets import get_dataset
from attacks.text_fooler_filters import get_filter
NEIGHBORS_FILE = '../data/text_fooler_synonyms.json'
GLUE_DATA_DIR = os.environ.get('GLUE_DIR', '/media/disk2/maori/data')
def analyze_dataset_chunk(inputs):
# init analysis configurations
start, end, args, device = inputs
dataset = _load_dataset(args)
with open(args.neighbors_path) as f:
neighbors = json.load(f)
cached_synonyms = CachedSynonyms()
is_two_inputs = dataset.is_two_inputs_dataset()
if end is not None:
updater = start == args.first_sample
temp_name = args.out_path + f'_chunk_{start}_{end}.pkl'
if os.path.isfile(temp_name):
# continue from checkpoint
cached_synonyms = CachedSynonyms.from_pkl(temp_name)
print(f'Loading saved checkpoint to compute chunk {start + len(cached_synonyms)}-{end} instead of starting at {start}')
start = start + len(cached_synonyms)
range_gen = trange(start, end) if updater else range(start, end)
else:
updater = start[0] == -1
if updater:
start = start[1:]
range_gen = tqdm(start) if updater else start # start is an iterable with specific indices to re-compute
temp_name = args.out_path + f'_fix_chunk_{start[0]}_{start[-1]}.pkl'
if os.path.isfile(temp_name):
# continue from checkpoint
cached_synonyms = CachedSynonyms.from_pkl(temp_name)
print(f'Loading saved checkpoint to compute fixed chunk of {len(start)-len(cached_synonyms)} instead of {len(start)}')
start = start[len(cached_synonyms):]
# init counters
total_words_capitalized = total_cands_due_to_captialize = total_words_punctuation = total_cands_due_to_punctuation = 0
total_cands = total_words = total_syns = total_syns_due_to_captialize = total_syns_due_to_punctuation = 0
total_blacklisted_words = total_cands_lost = 0
# build synonym cache for each sentence
checkpoint_counter = args.checkpoint_every
for i in range_gen:
if checkpoint_counter == 0 and temp_name is not None:
checkpoint_counter = args.checkpoint_every
with open(temp_name, 'wb') as f:
pickle.dump(cached_synonyms, f)
checkpoint_counter -= 1
sents = []
inds = []
sentence = dataset.get_unsplitted_sentence_to_perturb(i)
curr_size = len(cached_synonyms)
if cached_synonyms.is_sig_already_exists(sentence, i):
assert len(cached_synonyms) == curr_size+1
continue
split_sentence = sentence.split(' ')
perts_filter = get_filter(filter_by_tf_pos=args.perform_tf_pos_filtering, filter_by_sem=args.perform_use_semantic_filtering,
orig_sent=split_sentence, return_mask=True, tfhub_cache_path=args.tfhub_cache_path,
sim_score_window=args.sim_score_window, sim_score_threshold=args.sim_score_threshold,
filter_spacy_pos=args.perform_spacy_pos_filtering, spacy_pos_window=args.spacy_pos_score_window,
filter_by_gpt2_ppl=args.perform_gp2_ppl_filtering, ppl_score_window=args.ppl_score_window,
ppl_score_ratio_threshold=args.ppl_score_ratio_threshold, device=f'cuda:{device}')
orig_split_sent = deepcopy(split_sentence)
clean_split_sentence, punct, capitalized = clean_sent_for_syns(split_sentence)
total_words_punctuation += len(punct)
total_words_capitalized += len(capitalized)
# Preparing candidates thus need to capitalize and punctuate to match the original
bbi = set()
if is_two_inputs:
bbi = get_blacklisted_perturbations_indices_in_two_inputs_tasks(clean_split_sentence, dataset.get_second_input(i))
total_blacklisted_words += len(bbi)
counts = prepare_pertrubed_sentences(
bbi, capitalized, clean_split_sentence, inds, is_two_inputs, neighbors, orig_split_sent, punct, sents, total_cands,
total_cands_due_to_captialize, total_cands_due_to_punctuation, total_cands_lost, total_words, args.clip_tokens)
total_cands, total_cands_due_to_captialize, total_cands_due_to_punctuation, total_cands_lost, total_words = counts
# Filtering invalid candidates
mask = perts_filter(sents, inds)
# add syns that were not filtered out
syns_for_i = defaultdict(list)
for m, j, sent in zip(mask, inds, sents):
if m:
total_syns += 1
total_syns_due_to_captialize += int(j in capitalized)
w = sent[j] # clean it back to have the correct code for it
if j in punct:
total_syns_due_to_punctuation += 1
# w = w[:-1]
syns_for_i[j].append(w)
cached_synonyms.add_new_sent(sentence, syns_for_i, i)
assert len(cached_synonyms) == curr_size + 1
if temp_name is not None:
with open(temp_name, 'wb') as f:
pickle.dump(cached_synonyms, f)
if updater:
print(f'Analyzed {(end - start) if end is not None else len(start)} sentences with {total_words} words.\n'
f'Found a total of {total_cands} candidates of which {total_syns} synonyms were verified')
print(f'The number of capitalized words were {total_words_capitalized} which accounted for {total_cands_due_to_captialize} '
f'candidates and {total_syns_due_to_captialize} synonyms')
print(f'The number of punctuated words were {total_words_punctuation} which accounted for {total_cands_due_to_punctuation} '
f'candidates and {total_syns_due_to_punctuation} synonyms')
if is_two_inputs:
print(f'The data contained two inputs which caused a total of {total_blacklisted_words} to be blacklisted which '
f'forfeited {total_cands_lost} candidates')
return cached_synonyms
def prepare_pertrubed_sentences(bbi, capitalized, clean_split_sentence, inds, is_two_inputs, neighbors, orig_split_sent, punct, sents,
total_cands, total_cands_due_to_captialize, total_cands_due_to_punctuation, total_cands_lost, total_words,
clip_tokens):
for j, w in enumerate(clean_split_sentence):
if j >= clip_tokens:
# no need to compute tokens for later words
break
total_words += 1
syns = [syn for syn in neighbors.get(w, []) if syn != w]
if is_two_inputs and j in bbi:
# can count how many perturbations were skipped because of it, or at least how many words were skipped
total_cands_lost += len(syns)
continue
total_cands += len(syns)
if len(syns) == 0:
continue
if j in capitalized:
total_cands_due_to_captialize += len(syns)
syns = [syn.capitalize() for syn in syns]
if j in punct:
total_cands_due_to_punctuation += len(syns)
syns = [punct[j][0] + syn + punct[j][1] for syn in syns]
for syn in syns:
sents.append(deepcopy(orig_split_sent))
sents[-1][j] = syn
inds.append(j)
return total_cands, total_cands_due_to_captialize, total_cands_due_to_punctuation, total_cands_lost, total_words
def _load_dataset(args):
return get_dataset(args.dataset.lower().replace('-', ''), args.data_dir, train=args.train)
BLACK_LIST_POSES = {'PROPN', 'NOUN', 'ADJ', 'VERB'}
def get_blacklisted_perturbations_indices_in_two_inputs_tasks(split_sentence1, sentence2):
s2_poses = get_spacy_pos(sentence2)
black_list_words = {w for w, p in zip(sentence2.split(), s2_poses) if p in BLACK_LIST_POSES}
return {i for i, w in enumerate(split_sentence1) if w in black_list_words}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--neighbors_path', default=NEIGHBORS_FILE)
parser.add_argument('--data_dir', default=GLUE_DATA_DIR)
parser.add_argument('--dataset', choices=('SST-2', 'BoolQ', 'IMDB'), default='SST-2')
parser.add_argument('--tfhub_cache_path', type=str, default='/media/disk1/maori/cache')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_processes', type=int, default=8)
parser.add_argument('--perform_tf_pos_filtering', action='store_true')
parser.add_argument('--perform_use_semantic_filtering', action='store_true')
parser.add_argument('--sim_score_window', type=int, default=15)
parser.add_argument('--sim_score_threshold', type=float, default=0.7)
parser.add_argument('--perform_spacy_pos_filtering', action='store_true')
parser.add_argument('--spacy_pos_score_window', type=int, default=15)
parser.add_argument('--perform_gp2_ppl_filtering', action='store_true')
parser.add_argument('--ppl_score_window', type=int, default=101)
parser.add_argument('--ppl_score_ratio_threshold', type=float, default=0.8)
parser.add_argument('--skip_train', action='store_true')
parser.add_argument('--skip_dev', action='store_true')
parser.add_argument('--do_hashing', action='store_true')
parser.add_argument('--first_sample', type=int, default=0)
parser.add_argument('--debug_samples', type=int, default=-1)
parser.add_argument('--checkpoint_every', type=int, default=50)
parser.add_argument('--clip_tokens', type=int, default=480, help='Will only compute synonyms to words numbered under that value')
args = parser.parse_args()
assert not (args.perform_spacy_pos_filtering and args.perform_tf_pos_filtering)
# assert not (args.perform_use_semantic_filtering and args.perform_gp2_ppl_filtering)
os.environ['TFHUB_CACHE_DIR'] = args.tfhub_cache_path
np.random.seed(args.seed)
ctx = get_context('spawn')
# create a vocabulary of all words in the synonyms candidates
args.vocab_path = os.path.splitext(args.neighbors_path)[0] + '_vocab.json'
if not os.path.exists(args.vocab_path):
print('Creating vocabulary...', end=' ')
with open(args.neighbors_path) as f:
neighbors = json.load(f)
vocabulary = {w: i for i, w in enumerate(set(neighbors.keys()).union(chain.from_iterable(neighbors.values())))}
with open(args.vocab_path, 'w') as f:
json.dump(vocabulary, f)
print('Done')
for i, is_train in enumerate((False, True)):
dataset = None
args.train = is_train
if args.do_hashing:
dataset = _load_dataset(args)
print(f'Creating hashes for train={is_train}')
ordered_ds = {(' '.join(dataset.get_sentence_to_perturb(i)[0])).lower(): i for i in trange(len(dataset))}
args.out_path = os.path.join(os.path.dirname(args.neighbors_path), args.dataset,
'hashes_' + ('train' if is_train else 'dev') + '.json')
dnmae = os.path.dirname(args.out_path)
if not os.path.isdir(dnmae):
os.makedirs(dnmae)
with open(args.out_path, 'w') as f:
json.dump(ordered_ds, f)
if is_train and args.skip_train:
print("skipping train set")
continue
if not is_train and args.skip_dev:
print("skipping dev set")
continue
if dataset is None:
dataset = _load_dataset(args)
args.base_progress = 0.5 * i
print(f'Starting on train={is_train}')
args.out_path = os.path.join(os.path.dirname(args.neighbors_path), args.dataset,
'synonyms_' + ('train' if is_train else 'dev'))
dnmae = os.path.dirname(args.out_path + '.pkl')
if not os.path.isdir(dnmae):
os.makedirs(dnmae)
n_samples = len(dataset)
start = args.first_sample
assert 0 <= start < n_samples
n_samples -= start
if args.debug_samples > 0:
n_samples = min(n_samples, args.debug_samples)
end = start + n_samples
if args.first_sample > 0 or args.debug_samples > 0:
args.out_path = args.out_path + f'_{start}_{end}'
print('total samples: ', n_samples)
if args.n_processes > 1:
splits = np.linspace(start, end, num=args.n_processes + 1).astype(int)
import torch
devices = [i % torch.cuda.device_count() for i in range(args.n_processes)]
with ctx.Pool(args.n_processes) as executor:
# args, starting_sample_num=0, last_sample_num=np.inf, cuda_device=0, partial_proc_num=None, return_dataset=True
outs = list(executor.map(analyze_dataset_chunk, zip(splits[:-1], splits[1:], [args]*args.n_processes, devices)))
cached_synonyms = CachedSynonyms.merge_multiple_cached_synonyms(*outs)
else:
cached_synonyms = analyze_dataset_chunk((start, end, args, 0))
with open(args.out_path + '.pkl', 'wb') as f:
pickle.dump(cached_synonyms, f)
# make sure we prepared all sentences
assert len(cached_synonyms) == n_samples
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# inv.inv data plugin
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC modules
from noc.inv.models.object import Object
from noc.sa.interfaces.base import ObjectIdParameter, IntParameter
from .base import InvPlugin
class RackPlugin(InvPlugin):
name = "rack"
js = "NOC.inv.inv.plugins.rack.RackPanel"
def init_plugin(self):
super(RackPlugin, self).init_plugin()
self.add_view(
"api_plugin_%s_set_rackload" % self.name,
self.api_set_rack_load,
url="^(?P<id>[0-9a-f]{24})/plugin/%s/rackload/$" % self.name,
method=["POST"],
validate={
"cid": ObjectIdParameter(),
"position_front": IntParameter(),
"position_rear": IntParameter(),
"shift": IntParameter(),
},
)
def get_data(self, request, o):
r = {
"id": str(o.id),
"rack": dict((k, o.get_data("rack", k)) for k in ("units", "width", "depth")),
"content": [],
"load": [],
}
r["rack"]["label"] = o.name
# Fill content
for c in o.get_content():
units = c.get_data("rackmount", "units")
pos = c.get_data("rackmount", "position")
side = c.get_data("rackmount", "side") or "f"
r["load"] += [
{
"id": str(c.id),
"name": c.name,
"model": c.model.name,
"units": units,
"position_front": pos if units and side == "f" else None,
"position_rear": pos if units and side == "r" else None,
"shift": c.get_data("rackmount", "shift") or 0,
}
]
if units and pos:
if c.get_data("management", "managed"):
mo = c.get_data("management", "managed_object")
else:
mo = None
r["content"] += [
{
"id": str(c.id),
"units": units,
"pos": pos,
"name": c.name,
"managed_object_id": mo,
"side": side,
"shift": c.get_data("rackmount", "shift") or 0,
}
]
return r
def api_set_rack_load(self, request, id, cid, position_front, position_rear, shift):
o = self.app.get_object_or_404(Object, id=id)
co = self.app.get_object_or_404(Object, id=cid)
if co.container != o.id:
return self.app.response_not_found()
if position_front:
co.set_data("rackmount", "position", position_front)
co.set_data("rackmount", "side", "f")
co.save()
co.log(
"Set rack position to front #%d" % position_front,
user=request.user.username,
system="WEB",
op="CHANGE",
)
elif position_rear:
co.set_data("rackmount", "position", position_rear)
co.set_data("rackmount", "side", "r")
co.save()
co.log(
"Set rack position to rear #%d" % position_rear,
user=request.user.username,
system="WEB",
op="CHANGE",
)
else:
co.reset_data("rackmount", "position")
co.reset_data("rackmount", "side")
co.save()
co.log("Reset rack position", user=request.user.username, system="WEB", op="CHANGE")
if shift < 0 or shift > 2:
shift = 0
if co.get_data("rackmount", "shift") != shift:
co.set_data("rackmount", "shift", shift)
co.save()
co.log(
"Set position shift to %d holes" % shift,
user=request.user.username,
system="WEB",
op="CHANGE",
)
|
from collections import deque
class Parser:
@staticmethod
def isValid(data):
parsed = Parser.parseRaw(data)
if data.startswith("%xt%") and len(parsed) >= 3:
return True
else:
return False
@staticmethod
def parseRaw(data):
parsed = deque(data.split("%"))
parsed.popleft()
parsed.pop()
return parsed
@staticmethod
def parseVertical(data):
parsed = data.split("|")
return parsed
|
import tkinter
import PIL
from PIL import ImageTk
class Top_Menu:
def __init__(self,root, gridx, gridy, open_wld, map_update, map_list, save_wld,
zoom_in, zoom_out, add_object, toggle_grid):
frame = tkinter.Frame(root, bd=2, relief=tkinter.SUNKEN)
frame.grid(row=gridx, column=gridy, sticky=tkinter.N+tkinter.E+tkinter.W)
frame.grid_rowconfigure(0, weight=0)
frame.grid_columnconfigure(0, weight=0)
open_wld_button = tkinter.Button(frame, text="Open Wld Folder", command=open_wld)
save_wld_button = tkinter.Button(frame, text="Save Wld", command=save_wld)
self.map_names = [ m.data['file_name'] for m in map_list ]
self.selected_map = tkinter.StringVar(frame)
self.selected_map.set(self.map_names[0])
self.selected_map_menu = tkinter.OptionMenu(frame, self.selected_map, *self.map_names)
self.selected_map.trace('w', map_update)
toggle_grid_button = tkinter.Button(frame, text="Toggle Grid", command=toggle_grid)
add_object_button = tkinter.Button(frame, text="Add Object", command=add_object)
zoom_in_button = tkinter.Button(frame, text="Zoom In", command=zoom_in)
zoom_out_button = tkinter.Button(frame, text="Zoom Out", command=zoom_out)
open_wld_button.grid(row=0, column=0)
self.selected_map_menu.grid(row=0, column=1)
save_wld_button.grid(row=0, column=2)
add_object_button.grid(row=0, column=3)
toggle_grid_button.grid(row=0, column=4)
zoom_in_button.grid(row=0, column=5)
zoom_out_button.grid(row=0, column=6)
def get_selected_map(self):
if self.selected_map.get() in self.map_names:
return self.map_names.index(self.selected_map.get())
else:
return -1
|
#!/usr/bin/python
"""
This is a python implementation of the core
MixText functionality. This is originally intended
to be used with BinStore to allow any machine to configure
BinStore enabled implants.
"""
import sys
MIX_TEXT_KEY_BYTE=0x47
def mix(src, rand):
global MIX_TEXT_KEY_BYTE
prev = ""
retval = ""
i = 0
rand &= 0xff
prev = (i ^ rand ^ MIX_TEXT_KEY_BYTE)
retval += chr(prev)
i += 1
for char in src:
c = ord(char)
value = (c ^ (i ^ prev ^ MIX_TEXT_KEY_BYTE)) & 0xff
retval += chr(value)
prev += value
prev &= 0xff
i += 1
return retval
def unmix(src):
global MIX_TEXT_KEY_BYTE
i = 0
retval = ""
prev = ord(src[i])
i += 1
for char in src[i:]:
c = ord(char)
value = (c ^ MIX_TEXT_KEY_BYTE ^ prev ^ i) & 0xff
retval += chr(value)
prev += c
prev &= 0xff
i += 1
return retval
def printBytes(string):
for c in string:
sys.stdout.write("%02x " % ord(c))
sys.stdout.write("\n")
if __name__ == "__main__":
# some real basic "unit testing"
string = "\xff\xfe\x43\x00"
print "original string:"
printBytes(string)
result = mix(string, 0x24)
if len(result) != len(string) + 1:
raise Exception, "mix'd strings should be one byte bigger"
print "mix'd string: "
printBytes(result)
result2 = unmix(result)
print "unmix'd string: "
printBytes(result2)
if result2 != string:
raise Exception, "unmixing did not return original input"
|
illness_list=[ "syöpä",
"syövän",
"syöpiä",
"kasvain",
"tuumori",
"kasvaimen",
"kasvaimia",
"tauti"]
treatment_list=["hoito",
"hoidon",
"hoidot",
"hoitoa",
"tutkimus",
"leikkaus",
"lääkitys",
"säteily",
"tarkastus",
"tarkastuksen"]
death_list=[ "kuolla",
"kuollut",
"kuolen",
"kuoli"]
social_list=[ "ystävä",
"kaveri",
"rakastettu",
"läheinen",
"rakas",
"äiti",
"isä"]
financial_list=["raha",
"hinta",
"hintava",
"arvo",
"maksaa",
"maksoi",
"määrä"]
|
valid_speed_modes =\
{
"normal": 1.0,
"slow": 2.0,
"turbo": 0.1
}
teams_search_key = 'teams'
bs_generator_api = 'https://corporatebs-generator.sameerkumar.website/'
waiting_intervals = 2
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get(isamAppliance, resource_id, check_mode=False, force=False):
"""
Retrieving a list of stanzas - Runtime Environment
"""
return isamAppliance.invoke_get("Retrieving a list of stanzas - Runtime Environment",
"/isam/runtime/{0}/configuration/stanza".format(resource_id))
def add(isamAppliance, resource_id, stanza_id, check_mode=False, force=False):
"""
Adding a configuration stanza name - Runtime Environment
"""
if force is True or _check(isamAppliance, resource_id, stanza_id) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Adding a configuration stanza name - Runtime Environment",
"/isam/runtime/{0}/configuration/stanza/{1}".format(resource_id, stanza_id),
{})
return isamAppliance.create_return_object()
def delete(isamAppliance, resource_id, stanza_id, check_mode=False, force=False):
"""
Deleting a stanza - Runtime Environment
"""
if force is True or _check(isamAppliance, resource_id, stanza_id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Deleting a stanza - Runtime Environment",
"/isam/runtime/{0}/configuration/stanza/{1}".format(resource_id, stanza_id))
return isamAppliance.create_return_object()
def _check(isamAppliance, resource_id, stanza_id):
"""
Check if entry exists
"""
ret_obj = get(isamAppliance, resource_id)
for stanza in ret_obj['data']:
if stanza == stanza_id:
logger.info("Stanza found in resource: " + resource_id)
return True
logger.info("Stanza *not* found in resource: " + resource_id)
return False
def compare(isamAppliance1, isamAppliance2, resource_id):
"""
Compare stanzas within resource between two appliances
"""
import ibmsecurity.isam.web.runtime.configuration.entry
# The following array contains entries that will be ignored (across all configuration files/stanzas)
ignore_entries = ['master-host', 'bind-pwd', 'listen-interface']
# Retrieve all stanzas and corresponding entries for comparison
ret_obj1 = get(isamAppliance1, resource_id)
entries = {}
for stanza in ret_obj1['data']:
entries[stanza] = {}
stanza_entries = ibmsecurity.isam.web.runtime.configuration.entry.get_all(isamAppliance1, resource_id, stanza)
for k, v in stanza_entries['data'].iteritems():
if k not in ignore_entries:
entries[stanza][str(k)] = v
ret_obj1['data'] = entries
ret_obj2 = get(isamAppliance2, resource_id)
entries = {}
for stanza in ret_obj2['data']:
entries[stanza] = {}
stanza_entries = ibmsecurity.isam.web.runtime.configuration.entry.get_all(isamAppliance2, resource_id, stanza)
for k, v in stanza_entries['data'].iteritems():
if k not in ignore_entries:
entries[stanza][str(k)] = v
ret_obj2['data'] = entries
return ibmsecurity.utilities.tools.json_compare(ret_obj1=ret_obj1, ret_obj2=ret_obj2, deleted_keys=[])
|
#!/usr/bin/env python3
# After this script: ./john --format=NETNTLM freeradius.john
#
# 12/19/2011 - Josh Kelley
###############################################################################
import sys
OUTPUT = "freeradius.john"
LOG_FILE = sys.argv[1]
if len(sys.argv) < 2:
print (sys.argv[0]+" <freeradius log file>")
exit()
fileIn = open(LOG_FILE,'r')
fileOut = open(OUTPUT,'w')
i = 0
for line in fileIn:
lineClean = line.strip()
lineSplit = lineClean.split(':')
if lineSplit[0] == "mschap":
i = i + 1
if lineSplit[0] == "username":
username = lineSplit[1].strip()
i = i + 1
if lineSplit[0] == "challenge":
challenge = ""
for x in lineSplit[1:]:
challenge = challenge + x
challenge = challenge.strip()
i = i + 1
if lineSplit[0] == "response":
response = ""
for x in lineSplit[1:]:
response = response + x
response = response.strip()
i = i + 1
if i == 4:
lineNew = str(username, "%s:$NETNTLM$", challenge, response)
fileOut.write(lineNew, "\n")
i=0
fileIn.close()
fileOut.close()
|
from . import *
import networkx as nx
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# https://www.infobyip.com/detectmonitordpi.php
DPI = 96
LINE_WIDTH = 2.0
LABEL_FONT_SIZE = 'xx-large'
TICK_FONT_SIZE = 'x-large'
def plot_vec(data, ax, aspect=3, yticklabels=None, vmin=None, vmax=None, log=False, column_index=0, n_columns=1):
shape_tpl = data.shape
data = data.reshape((shape_tpl[0], 1))
if vmin is None:
vmin = np.min(data)
if vmax is None:
vmax = np.max(data)
# create labels centered at each heatmap block
row_range = np.arange(shape_tpl[0])
ax.get_xaxis().set_visible(False)
ax.set_yticks(row_range + 0.5)
if yticklabels is None:
yticklabels = map(lambda x: str(x), row_range)
ax.set_yticklabels(yticklabels, fontsize=TICK_FONT_SIZE)
if column_index == 0:
ax.set_ylabel("Node", fontsize=LABEL_FONT_SIZE)
ax.set_title("Scores", fontsize=LABEL_FONT_SIZE)
# only plot colorbar if column_index is the last column: n_columns - 1
colorbar = None
if log:
mappable = ax.pcolor(data, cmap=plt.cm.Reds, edgecolors='k', linewidth=LINE_WIDTH, norm=colors.LogNorm(vmin=vmin, vmax=vmax), vmin=vmin, vmax=vmax)
if column_index == n_columns - 1:
colorbar = plt.colorbar(mappable, ax=ax, orientation='horizontal', ticks=[vmin, vmax], pad=0.05, fraction=0.08, aspect=aspect, format=mpl.ticker.LogFormatter(), use_gridspec=True)
colorbar.set_ticks([vmin, vmax])
colorbar.set_ticklabels(['{:.0e}'.format(vmin), '{:.0e}'.format(vmax)])
else:
mappable = ax.pcolor(data, cmap=plt.cm.Reds, edgecolors='k', linewidth=LINE_WIDTH, vmin=vmin, vmax=vmax)
if column_index == n_columns - 1:
colorbar = plt.colorbar(mappable, ax=ax, orientation='horizontal', ticks=[vmin, vmax], pad=0.05, fraction=0.08, aspect=aspect, use_gridspec=True)
if column_index == n_columns - 1:
colorbar.ax.tick_params(labelsize=TICK_FONT_SIZE)
def plot_graph(G, ax, pos=None, colormap=None, title='', title_fontsize=LABEL_FONT_SIZE, title_y=1.0, vmin=None, vmax=None, **kwargs):
labels = {}
for node in G.nodes():
labels[node] = str(node)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title(title, fontsize=title_fontsize, y=title_y)
if pos is None:
pos = nx.spring_layout(G, k=1/np.sqrt(G.order()), iterations=200)
# draw scatter manually to add node borders
xs = []
ys = []
for node_id, center in pos.items():
xs.append(center[0])
ys.append(center[1])
nx.draw_networkx_edges(G, pos, ax=ax, width=LINE_WIDTH)
# choose a light red for contrast with black
rgba = plt.cm.Reds(0.4)
ax.scatter(xs, ys, s=600, c=rgba, marker='o', edgecolor='black', linewidth=2.0, alpha=1.0)
nx.draw_networkx_labels(G, pos, ax=ax, labels=labels, font_size=TICK_FONT_SIZE)
return pos
def plot_pathway_interactors(G, pathway_node, fig, ax, data, nodelist, title=None, title_fontsize=LABEL_FONT_SIZE, title_y=1.0, colormap=None, vmin=None, vmax=None, pos=None, **kwargs):
if pathway_node not in G:
raise FactorLibException("pathway_node must be a node in G")
if colormap is None:
colormap = plt.cm.Reds
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.axis('off')
if vmin is None:
vmin = np.min(data)
if vmax is None:
vmax = np.max(data)
x0, y0, x1, y1 = ax.get_position().get_points().flatten()
gs = mpl.gridspec.GridSpec(1, 2, width_ratios=[1,5], left=x0, bottom=y0, right=x1, top=y1)
ax_1 = plt.Subplot(fig, gs[0])
ax_2 = plt.Subplot(fig, gs[1])
fig.add_subplot(ax_1)
fig.add_subplot(ax_2)
# - {{ ax_1 heatmap
# use node identifiers as heatmap labels but use node indices as graph labels because they dont fit well
# associate node number with the identifier
node_to_label = {}
for i, node in enumerate(nodelist):
node_to_label[node] = str(i)
node_to_label[pathway_node] = pathway_node
G = nx.relabel_nodes(G, node_to_label)
label_list = []
for i, node in enumerate(nodelist):
label_list.append('{}: {}'.format(node, i))
plot_vec(data, ax_1, yticklabels=label_list, vmin=vmin, vmax=vmax, log=True)
# ax_1 heatmap }} -
# - {{ ax_2 graph
ax_2.get_xaxis().set_visible(False)
ax_2.get_yaxis().set_visible(False)
if pos is None:
pos = nx.spring_layout(G, k=1/np.sqrt(G.order()), iterations=200)
nx.draw_networkx_edges(G, pos, ax=ax_2, width=LINE_WIDTH)
# draw scatter manually to add node borders
xs = []
ys = []
for node_id, center in pos.items():
if node_id != pathway_node:
xs.append(center[0])
ys.append(center[1])
# choose a light red for contrast with black
rgba = plt.cm.Reds(0.4)
ax_2.scatter(xs, ys, s=600, c=rgba, marker='o', edgecolor='black', linewidth=LINE_WIDTH, alpha=1.0)
# label all nodes other than pathway_node
G.remove_node(pathway_node)
pathway_node_center = pos.pop(pathway_node)
nx.draw_networkx_labels(G, pos, ax=ax_2, font_size=TICK_FONT_SIZE)
# label pathway_node
ax_2.text(pathway_node_center[0], pathway_node_center[1], pathway_node, horizontalalignment='center', verticalalignment='center', fontsize=LABEL_FONT_SIZE, bbox=dict(facecolor=rgba, edgecolor='black', linewidth=LINE_WIDTH))
ax_2.set_title(title, fontsize=title_fontsize, y=title_y)
# ax_2 graph }} -
return pos
def plot_latent_and_graph(G, fig, ax, data=None, nodelist=None, nodelabels=None,
column_index=0, pos=None, colormap=None, max_col=30, log=True,
title='', title_y=1.0, title_fontsize=LABEL_FONT_SIZE,
vmin=None, vmax=None, **kwargs):
"""
Plot a vector and a graph
"""
if data is not None and nodelist is None:
raise FactorLibException("<data> and <nodelist> must be both set or both None")
if colormap is None:
colormap = plt.cm.Reds
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.axis('off')
if data is None:
# generate data to be smooth on graph
comps = sorted(nx.connected_components(G), key=lambda x: len(x), reverse=True)
comp = list(comps[0])
data = np.zeros((G.order(), 1))
for node in comp:
data[node] = 0.6 + random.uniform(0.1, 0.3)
node = random.choice(comp)
data[node] = 0.95
for comp in comps[1:]:
for node in comp:
data[node] = random.uniform(0.1, 0.3)
else:
# use provided data
data = data.reshape((len(nodelist), 1))
# split ax into 2+ subplots
# if there are too many nodes to plot, separate heatmap into multiple pieces
n_heatmaps = 1
width_ratios = [1,5]
if data.shape[0] > max_col:
n_heatmaps = 2
width_ratios = [1,1,1,4]
n_col = len(width_ratios)
x0, y0, x1, y1 = ax.get_position().get_points().flatten()
gs = mpl.gridspec.GridSpec(1, n_col, width_ratios=width_ratios, left=x0, bottom=y0, right=x1, top=y1)
axes = []
for i in range(n_heatmaps):
# skip every other ax in gridspec because it is used to make space for heatmap labels
j = i*2
ax = plt.Subplot(fig, gs[j])
fig.add_subplot(ax)
axes.append(ax)
# add ax for graph
ax = plt.Subplot(fig, gs[-1])
fig.add_subplot(ax)
axes.append(ax)
# use node identifiers as heatmap labels but use node indices as graph labels because they dont fit well
if nodelist is None:
nodelist = sorted(G.nodes())
node_to_ind = {}
for i, node in enumerate(nodelist):
node_to_ind[node] = i
G = nx.relabel_nodes(G, node_to_ind)
if vmin is None:
vmin = np.min(data)
if vmax is None:
vmax = np.max(data)
if nodelabels is None:
nodelabels = []
for i, node in enumerate(nodelist):
nodelabels.append('{}: {}'.format(node, i))
for i in range(n_heatmaps):
data_start = 0 + max_col * i
data_end = max_col * (i+1)
data_this = data[data_start:data_end,0]
yticklabels = nodelabels[data_start:data_end]
plot_vec(data_this, axes[i], yticklabels=yticklabels, vmin=vmin, vmax=vmax, log=log, column_index=i, n_columns=n_heatmaps)
ax = axes[-1]
pos = plot_graph(G, ax, pos=pos, title=title, fontsize=title_fontsize, title_y=title_y)
return pos
def split_title(title, n_lines=2):
title_len = len(title)
cur_len = 0
new_words = []
words = title.split()
for word in words:
word_len = len(word)
new_words.append(word)
cur_len += word_len
if(cur_len) >= title_len / float(n_lines):
new_words.append('\n')
return ' '.join(new_words)
|
import numpy as np
from gym import utils
from gym.envs.dart import dart_env
class DartWalker2dEnv(dart_env.DartEnv, utils.EzPickle):
def __init__(self):
self.control_bounds = np.array([[1.0]*6,[-1.0]*6])
self.action_scale = np.array([100, 100, 20, 100, 100, 20])
obs_dim = 17
dart_env.DartEnv.__init__(self, 'walker2d.skel', 4, obs_dim, self.control_bounds, disableViewer=False)
utils.EzPickle.__init__(self)
def _step(self, a):
pre_state = [self.state_vector()]
clamped_control = np.array(a)
for i in range(len(clamped_control)):
if clamped_control[i] > self.control_bounds[0][i]:
clamped_control[i] = self.control_bounds[0][i]
if clamped_control[i] < self.control_bounds[1][i]:
clamped_control[i] = self.control_bounds[1][i]
tau = np.zeros(self.robot_skeleton.ndofs)
tau[3:] = clamped_control * self.action_scale
posbefore = self.robot_skeleton.q[0]
self.do_simulation(tau, self.frame_skip)
posafter,ang = self.robot_skeleton.q[0,2]
height = self.robot_skeleton.bodynodes[2].com()[1]
contacts = self.dart_world.collision_result.contacts
total_force_mag = 0
for contact in contacts:
total_force_mag += np.square(contact.force).sum()
alive_bonus = 1.0
vel = (posafter - posbefore) / self.dt
reward = vel
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
# uncomment to enable knee joint limit penalty
'''joint_limit_penalty = 0
for j in [-2, -5]:
if (self.robot_skeleton.q_lower[j] - self.robot_skeleton.q[j]) > -0.05:
joint_limit_penalty += abs(1.5)
if (self.robot_skeleton.q_upper[j] - self.robot_skeleton.q[j]) < 0.05:
joint_limit_penalty += abs(1.5)
reward -= 5e-1 * joint_limit_penalty'''
s = self.state_vector()
done = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and
(height > .8) and (height < 2.0) and (abs(ang) < 1.0))
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
state = np.concatenate([
self.robot_skeleton.q[1:],
np.clip(self.robot_skeleton.dq,-10,10)
])
state[0] = self.robot_skeleton.bodynodes[2].com()[1]
return state
def reset_model(self):
self.dart_world.reset()
qpos = self.robot_skeleton.q + self.np_random.uniform(low=-.005, high=.005, size=self.robot_skeleton.ndofs)
qvel = self.robot_skeleton.dq + self.np_random.uniform(low=-.005, high=.005, size=self.robot_skeleton.ndofs)
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self._get_viewer().scene.tb.trans[2] = -5.5
|
# -*- coding: utf-8 -*-
"""Conditional_SLE-GAN_template.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GADBacbjtpsiWWnlS_gs8KPgIzphobf3
# 概要
SLE-GAN (Lightweight GAN)に、生成画像の条件指定機能を追加
# 使用するGPUを指定
"""
import os
# dual GPUなら、"0,1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
"""# 使用するGPUメモリの制限"""
import tensorflow as tf
# tf.compat.v1.disable_eager_execution()
tf_ver = tf.__version__
GPU_ID = 0
if tf_ver >= "2.1.0":
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.list_physical_devices('GPU')
tf.config.set_visible_devices(physical_devices[GPU_ID], 'GPU')
tf.config.experimental.set_memory_growth(physical_devices[GPU_ID], True)
elif tf_ver.startswith('1.'):
from tensorflow.keras.backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
sess = tf.Session(config=config)
set_session(sess)
tf.executing_eagerly()
"""# 実験用環境変数"""
RUN = 2
RESOLUTION = 256
BATCH_SIZE = 2
EPOCHS = 200
IMAGE_DIR = 'path to top directory of images'
OVERRIDE = True
ANNOTATION_FILE = 'path to annotation csv file'
generator_weights = None
discriminator_weights = None
G_learning_rate = 2e-4
D_learning_rate = 2e-4
FID = True
DIFF_AUGMENT = True
FID_FREQUENCY = 1
"""# 教師データ生成クラス"""
from PIL import Image, ImageOps
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, array_to_img
import tensorflow as tf
class ImageSequence(tf.keras.utils.Sequence):
def __init__(self, file_list, conditional_vectors, batch_size, resolution, shuffle=True, horizontal_flip=False):
self.file_list = list(file_list)
self.conditional_vectors = conditional_vectors
self.batch_size = batch_size
self.resolution = resolution
self.shuffle = shuffle
self.horizontal_flip = horizontal_flip
self.indexes = np.arange(len(self.file_list))
if self.shuffle:
random.shuffle(self.indexes)
def __getitem__(self, index):
x = []
y = []
idx = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# print(len(self.conditional_vectors))
for i in idx:
f = self.file_list[i]
img = Image.open(f)
if img.mode != 'RGB':
img = img.convert('RGB')
# if self.horizontal_flip and random.random() > 0.5:
# img = ImageOps.mirror(img)
img = img.resize((self.resolution, self.resolution), Image.BILINEAR)
img = img_to_array(img).astype('float32')
img -= 127.5
img /= 127.5
x.append(np.expand_dims(img, axis=0))
y.append(np.expand_dims(self.conditional_vectors[i], axis=0))
return np.concatenate(x, axis=0), np.concatenate(y, axis=0)
def __len__(self):
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
if self.shuffle:
random.shuffle(self.indexes)
"""# アノテーションファイルを読み込む
* CSVフォーマット
* ヘッダなし
* 1列目:画像ファイルのパス。起点画像フォルダからの相対パス
* 2列目以降:条件ベクトル
"""
import os
def load_annotation(image_dir, file):
files = []
condition_vectors = []
with open(file, 'r', encoding='utf8') as f:
lines = f.readlines()
for l in lines:
ws = l.rstrip().split(',')
files.append(os.path.join(image_dir, ws[0]))
tmp = np.array([float(v) for v in ws[1:]]).astype('float32')
condition_vectors.append(np.expand_dims(tmp, axis=0))
return files, np.concatenate(condition_vectors, axis=0)
"""# 学習
## モジュールのインポート
"""
import shutil
from pathlib import Path
import tensorflow as tf
import sle_gan
import numpy as np
# args = sle_gan.get_args()
# print(args)
# For debugging:
# tf.config.experimental_run_functions_eagerly(True)
"""## 実験環境セットアップ"""
physical_devices = tf.config.list_physical_devices("GPU")
_ = [tf.config.experimental.set_memory_growth(x, True) for x in physical_devices]
experiments_folder = Path("results") / f'run{RUN}'
if experiments_folder.is_dir():
if OVERRIDE:
shutil.rmtree(experiments_folder)
else:
raise FileExistsError("Experiment already exists")
checkpoints_folder = experiments_folder / "checkpoints"
checkpoints_folder.mkdir(parents=True)
logs_folder = experiments_folder / "tensorboard_logs"
logs_folder.mkdir(parents=True)
"""## 教師データ生成"""
file_list, condition_vectors = load_annotation(IMAGE_DIR, ANNOTATION_FILE)
NUM_CLASSES = condition_vectors.shape[-1]
# 訓練データ用シーケンス
dataset = ImageSequence(file_list, condition_vectors, batch_size=BATCH_SIZE, resolution=RESOLUTION,
shuffle=True)
"""## Generator初期化"""
G = sle_gan.Generator(RESOLUTION)
sample_G_output = G.initialize(BATCH_SIZE, condition_vectors[:BATCH_SIZE].astype('float32'))
if generator_weights is not None:
G.load_weights(generator_weights)
print("Weights are loaded for G")
print(f"[Model G] output shape: {sample_G_output.shape}")
"""## Discriminator初期化"""
D = sle_gan.Discriminator(RESOLUTION)
sample_D_output = D.initialize(BATCH_SIZE, condition_vectors[:BATCH_SIZE].astype('float32'))
if discriminator_weights is not None:
D.load_weights(discriminator_weights)
print("Weights are loaded for D")
print(f"[Model D] real_fake output shape: {sample_D_output[0].shape}")
print(f"[Model D] image output shape{sample_D_output[1].shape}")
print(f"[Model D] image part output shape{sample_D_output[2].shape}")
"""## Fréchet Inception Distance評価用セットアップ"""
if FID:
# Model for the FID calculation
fid_inception_model = sle_gan.InceptionModel(height=RESOLUTION, width=RESOLUTION)
test_input_size = 25
test_dataset = ImageSequence(file_list, condition_vectors, batch_size=test_input_size, resolution=RESOLUTION,
shuffle=True)
FID_NUMBER_OF_IMAGES = min(len(file_list), 128)
test_image_src, test_conditional_vectors = test_dataset.__getitem__(0)
# 生成器テスト用入力潜在ベクトル
test_input_for_generation = sle_gan.data.create_latent_vectors(test_input_size, test_conditional_vectors)
# 識別器テスト用入力画像
test_images = sle_gan.data.create_discriminator_inputs(test_image_src, test_conditional_vectors)
tb_file_writer = tf.summary.create_file_writer(str(logs_folder))
tb_file_writer.set_as_default()
"""## 最適化アルゴリズム"""
G_optimizer = tf.optimizers.Adam(learning_rate=G_learning_rate)
D_optimizer = tf.optimizers.Adam(learning_rate=D_learning_rate)
"""## 損失"""
G_loss_metric = tf.keras.metrics.Mean()
D_loss_metric = tf.keras.metrics.Mean()
D_real_fake_loss_metric = tf.keras.metrics.Mean()
D_I_reconstruction_loss_metric = tf.keras.metrics.Mean()
D_I_part_reconstruction_loss_metric = tf.keras.metrics.Mean()
"""## データ拡張"""
diff_augment_policies = None
if DIFF_AUGMENT:
diff_augment_policies = "color,translation,cutout"
"""## 学習"""
for epoch in range(EPOCHS):
print(f"Epoch {epoch} -------------")
for step, image_batch in enumerate(dataset):
images, conditional_vectors = image_batch
G_loss, D_loss, D_real_fake_loss, D_I_reconstruction_loss, D_I_part_reconstruction_loss = sle_gan.train_step(
G=G,
D=D,
G_optimizer=G_optimizer,
D_optimizer=D_optimizer,
images=images,
conditional_vectors=conditional_vectors,
diff_augmenter_policies=diff_augment_policies)
G_loss_metric(G_loss)
D_loss_metric(D_loss)
D_real_fake_loss_metric(D_real_fake_loss)
D_I_reconstruction_loss_metric(D_I_reconstruction_loss)
D_I_part_reconstruction_loss_metric(D_I_part_reconstruction_loss)
if step % 100 == 0 and step != 0:
print(f"\tStep {step} - "
f"G loss {G_loss_metric.result():.4f} | "
f"D loss {D_loss_metric.result():.4f} | "
f"D realfake loss {D_real_fake_loss_metric.result():.4f} | "
f"D I recon loss {D_I_reconstruction_loss_metric.result():.4f} | "
f"D I part recon loss {D_I_part_reconstruction_loss_metric.result():.4f}")
if DIFF_AUGMENT:
if epoch % FID_FREQUENCY == 0:
fid_score = sle_gan.evaluation_step(inception_model=fid_inception_model,
dataset=test_dataset,
G=G,
batch_size=test_input_size,
image_height=RESOLUTION,
image_width=RESOLUTION,
nb_of_images_to_use=FID_NUMBER_OF_IMAGES)
print(f"[FID] {fid_score:.2f}")
tf.summary.scalar("FID_score", fid_score, epoch)
tf.summary.scalar("G_loss/G_loss", G_loss_metric.result(), epoch)
tf.summary.scalar("D_loss/D_loss", D_loss_metric.result(), epoch)
tf.summary.scalar("D_loss/D_real_fake_loss", D_real_fake_loss_metric.result(), epoch)
tf.summary.scalar("D_loss/D_I_reconstruction_loss", D_I_reconstruction_loss_metric.result(), epoch)
tf.summary.scalar("D_loss/D_I_part_reconstruction_loss", D_I_part_reconstruction_loss_metric.result(), epoch)
print(f"Epoch {epoch} - "
f"G loss {G_loss_metric.result():.4f} | "
f"D loss {D_loss_metric.result():.4f} | "
f"D realfake loss {D_real_fake_loss_metric.result():.4f} | "
f"D I recon loss {D_I_reconstruction_loss_metric.result():.4f} | "
f"D I part recon loss {D_I_part_reconstruction_loss_metric.result():.4f}")
G_loss_metric.reset_states()
D_loss_metric.reset_states()
D_real_fake_loss_metric.reset_states()
D_I_part_reconstruction_loss_metric.reset_states()
D_I_reconstruction_loss_metric.reset_states()
# TODO: save weights only when the FID score gets better
G.save_weights(str(checkpoints_folder / "G_checkpoint.h5"))
D.save_weights(str(checkpoints_folder / "D_checkpoint.h5"))
# Generate test images
generated_images = G(test_input_for_generation, training=False)
generated_images = sle_gan.postprocess_images(generated_images, dtype=tf.uint8).numpy()
sle_gan.visualize_images_on_grid_and_save(epoch, generated_images, experiments_folder / "generated_images",
5, 5)
# Generate reconstructions from Discriminator
_, decoded_images, decoded_part_images = D(test_images, training=False)
decoded_images = sle_gan.postprocess_images(decoded_images, dtype=tf.uint8).numpy()
decoded_part_images = sle_gan.postprocess_images(decoded_part_images, dtype=tf.uint8).numpy()
sle_gan.visualize_images_on_grid_and_save(epoch, decoded_images, experiments_folder / "reconstructed_whole_images",
5, 5)
sle_gan.visualize_images_on_grid_and_save(epoch, decoded_part_images,
experiments_folder / "reconstructed_part_images", 5, 5)
|
# https://github.com/pytorch/vision/blob/master/torchvision/models/__init__.py
import argparse
import os,sys
import shutil
import pdb, time
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import convert_secs2time, time_string, time_file_str
# from models import print_log
import models
import random
import numpy as np
import copy
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--save_dir', type=str, default='./', help='Folder to save checkpoints and log.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--print-freq', '-p', default=5, type=int, metavar='N', help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
# compress rate
parser.add_argument('--rate', type=float, default=0.9, help='compress rate of model')
parser.add_argument('--layer_begin', type=int, default=3, help='compress layer of model')
parser.add_argument('--layer_end', type=int, default=3, help='compress layer of model')
parser.add_argument('--layer_inter', type=int, default=1, help='compress layer of model')
parser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')
parser.add_argument('--skip_downsample', type=int, default=1, help='compress layer of model')
parser.add_argument('--get_small', dest='get_small', action='store_true', help='whether a big or small model')
args = parser.parse_args()
args.use_cuda = torch.cuda.is_available()
args.prefix = time_file_str()
def main():
best_prec1 = 0
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
log = open(os.path.join(args.save_dir, 'gpu-time.{}.{}.log'.format(args.arch, args.prefix)), 'w')
# create model
print_log("=> creating model '{}'".format(args.arch), log)
model = models.__dict__[args.arch](pretrained=False)
print_log("=> Model : {}".format(model), log)
print_log("=> parameter : {}".format(args), log)
print_log("Compress Rate: {}".format(args.rate), log)
print_log("Layer Begin: {}".format(args.layer_begin), log)
print_log("Layer End: {}".format(args.layer_end), log)
print_log("Layer Inter: {}".format(args.layer_inter), log)
print_log("Epoch prune: {}".format(args.epoch_prune), log)
print_log("Skip downsample : {}".format(args.skip_downsample), log)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
state_dict = checkpoint['state_dict']
state_dict = remove_module_dict(state_dict)
model.load_state_dict(state_dict)
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
cudnn.benchmark = True
# Data loading code
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Scale(256),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss().cuda()
if args.get_small:
big_path = os.path.join(args.save_dir, "big_model.pt")
torch.save(model, big_path)
small_model = get_small_model(model.cpu())
# small_model = torch.load('small_model.pt')
small_path = os.path.join(args.save_dir, "small_model.pt")
torch.save(small_model, small_path)
if args.use_cuda:
model = model.cuda()
small_model = small_model.cuda()
print('evaluate: big')
print('big model accu', validate(val_loader, model, criterion, log))
print('evaluate: small')
print('small model accu', validate(val_loader, small_model, criterion, log))
def validate(val_loader, model, criterion, log):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# target = target.cuda(async=True)
if args.use_cuda:
input, target = input.cuda(), target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5), log)
print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg), log)
return top1.avg
def save_checkpoint(state, is_best, filename, bestname):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, bestname)
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def remove_module_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def import_sparse(model):
checkpoint = torch.load('/data/yahe/imagenet/resnet50-rate-0.7/checkpoint.resnet50.2018-01-07-9744.pth.tar')
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
print("sparse_model_loaded")
return model
def check_channel(tensor):
size_0 = tensor.size()[0]
size_1 = tensor.size()[1] * tensor.size()[2] * tensor.size()[3]
tensor_resize = tensor.view(size_0, -1)
# indicator: if the channel contain all zeros
channel_if_zero = np.zeros(size_0)
for x in range(0, size_0, 1):
channel_if_zero[x] = np.count_nonzero(tensor_resize[x].cpu().numpy()) != 0
# indices = (torch.LongTensor(channel_if_zero) != 0 ).nonzero().view(-1)
indices_nonzero = torch.LongTensor((channel_if_zero != 0).nonzero()[0])
# indices_nonzero = torch.LongTensor((channel_if_zero != 0).nonzero()[0])
zeros = (channel_if_zero == 0).nonzero()[0]
indices_zero = torch.LongTensor(zeros) if zeros != [] else []
return indices_zero, indices_nonzero
def extract_para(big_model):
'''
:param model:
:param batch_size:
:return: num_for_construc: number of remaining filter,
[conv1,stage1,stage1_expend,stage2,stage2_expend,stage3,stage3_expend,stage4,stage4_expend]
kept_filter_per_layer: number of remaining filters for every layer
kept_index_per_layer: filter index of remaining channel
model: small model
'''
item = list(big_model.state_dict().items())
print("length of state dict is", len(item))
try:
assert len(item) in [102, 182, 267, 522]
print("state dict length is one of 102, 182, 267, 522")
except AssertionError as e:
print("False state dict")
# indices_list = []
kept_index_per_layer = {}
kept_filter_per_layer = {}
pruned_index_per_layer = {}
for x in range(0, len(item) - 2, 5):
indices_zero, indices_nonzero = check_channel(item[x][1])
# indices_list.append(indices_nonzero)
pruned_index_per_layer[item[x][0]] = indices_zero
kept_index_per_layer[item[x][0]] = indices_nonzero
kept_filter_per_layer[item[x][0]] = indices_nonzero.shape[0]
# add 'module.' if state_dict are store in parallel format
# state_dict = ['module.' + x for x in state_dict]
if len(item) == 102 or len(item) == 182:
basic_block_flag = ['conv1.weight',
'layer1.0.conv1.weight', 'layer1.0.conv2.weight',
'layer2.0.conv1.weight', 'layer2.0.conv2.weight',
'layer3.0.conv1.weight', 'layer3.0.conv2.weight',
'layer4.0.conv1.weight', 'layer4.0.conv2.weight']
constrct_flag = basic_block_flag
block_flag = "conv2"
elif len(item) == 267 or len(item) == 522:
bottle_block_flag = ['conv1.weight',
'layer1.0.conv1.weight', 'layer1.0.conv3.weight',
'layer2.0.conv1.weight', 'layer2.0.conv3.weight',
'layer3.0.conv1.weight', 'layer3.0.conv3.weight',
'layer4.0.conv1.weight', 'layer4.0.conv3.weight']
constrct_flag = bottle_block_flag
block_flag = "conv3"
# number of nonzero channel in conv1, and four stages
num_for_construct = []
for key in constrct_flag:
num_for_construct.append(kept_filter_per_layer[key])
index_for_construct = dict(
(key, value) for (key, value) in kept_index_per_layer.items() if block_flag in key)
bn_value = get_bn_value(big_model, block_flag, pruned_index_per_layer)
if len(item) == 102:
small_model = models.resnet18_small(index=kept_index_per_layer, bn_value=bn_value,
num_for_construct=num_for_construct)
if len(item) == 182:
small_model = models.resnet34_small(index=kept_index_per_layer, bn_value=bn_value,
num_for_construct=num_for_construct)
if len(item) == 267:
small_model = models.resnet50_small(index=kept_index_per_layer, bn_value=bn_value,
num_for_construct=num_for_construct)
if len(item) == 522:
small_model = models.resnet101_small(index=kept_index_per_layer, bn_value=bn_value,
num_for_construct=num_for_construct)
return kept_index_per_layer, pruned_index_per_layer, block_flag, small_model
def get_bn_value(big_model, block_flag, pruned_index_per_layer):
big_model.eval()
bn_flag = "bn3" if block_flag == "conv3" else "bn2"
key_bn = [x for x in big_model.state_dict().keys() if "bn3" in x]
layer_flag_list = [[x[0:6], x[7], x[9:12], x] for x in key_bn if "weight" in x]
# layer_flag_list = [['layer1', "0", "bn3",'layer1.0.bn3.weight']]
bn_value = {}
for layer_flag in layer_flag_list:
module_bn = big_model._modules.get(layer_flag[0])._modules.get(layer_flag[1])._modules.get(layer_flag[2])
num_feature = module_bn.num_features
act_bn = module_bn(Variable(torch.zeros(1, num_feature, 1, 1)))
index_name = layer_flag[3].replace("bn", "conv")
index = Variable(torch.LongTensor(pruned_index_per_layer[index_name]))
act_bn = torch.index_select(act_bn, 1, index)
select = Variable(torch.zeros(1, num_feature, 1, 1))
select.index_add_(1, index, act_bn)
bn_value[layer_flag[3]] = select
return bn_value
def get_small_model(big_model):
indice_dict, pruned_index_per_layer, block_flag, small_model = extract_para(big_model)
big_state_dict = big_model.state_dict()
small_state_dict = {}
keys_list = list(big_state_dict.keys())
# print("keys_list", keys_list)
for index, [key, value] in enumerate(big_state_dict.items()):
# all the conv layer excluding downsample layer
flag_conv_ex_down = not 'bn' in key and not 'downsample' in key and not 'fc' in key
# downsample conv layer
flag_down = 'downsample.0' in key
# value for 'output' dimension: all the conv layer including downsample layer
if flag_conv_ex_down or flag_down:
small_state_dict[key] = torch.index_select(value, 0, indice_dict[key])
conv_index = keys_list.index(key)
# 4 following bn layer, bn_weight, bn_bias, bn_runningmean, bn_runningvar
for offset in range(1, 5, 1):
bn_key = keys_list[conv_index + offset]
small_state_dict[bn_key] = torch.index_select(big_state_dict[bn_key], 0, indice_dict[key])
# value for 'input' dimension
if flag_conv_ex_down:
# first layer of first block
if 'layer1.0.conv1.weight' in key:
small_state_dict[key] = torch.index_select(small_state_dict[key], 1, indice_dict['conv1.weight'])
# just conv1 of block, the input dimension should not change for shortcut
elif not "conv1" in key:
conv_index = keys_list.index(key)
# get the last con layer
key_for_input = keys_list[conv_index - 5]
# print("key_for_input", key, key_for_input)
small_state_dict[key] = torch.index_select(small_state_dict[key], 1, indice_dict[key_for_input])
# only the first downsample layer should change as conv1 reduced
elif 'layer1.0.downsample.0.weight' in key:
small_state_dict[key] = torch.index_select(small_state_dict[key], 1, indice_dict['conv1.weight'])
elif 'fc' in key:
small_state_dict[key] = value
if len(set(big_state_dict.keys()) - set(small_state_dict.keys())) != 0:
print("different keys of big and small model",
sorted(set(big_state_dict.keys()) - set(small_state_dict.keys())))
for x, y in zip(small_state_dict.keys(), small_model.state_dict().keys()):
if small_state_dict[x].size() != small_model.state_dict()[y].size():
print("difference with model and dict", x, small_state_dict[x].size(),
small_model.state_dict()[y].size())
small_model.load_state_dict(small_state_dict)
return small_model
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on 2021-01-05
@author: cheng.li
"""
from typing import Union
import cvxpy as cp
import numpy as np
from pfopt.base import _IOptimizer
class LpOptimizer(_IOptimizer):
def __init__(self,
cost: np.ndarray,
cons_matrix: np.ndarray = None,
lower_bound: Union[float, np.ndarray] = None,
upper_bound: Union[float, np.ndarray] = None):
super().__init__(cost, cons_matrix, lower_bound, upper_bound)
def solve(self, solver: str = "CBC"):
x, constraints = self._prepare()
prob = cp.Problem(cp.Minimize(x @ self._cost), constraints=constraints)
prob.solve(solver=solver)
return x.value, prob.value, prob.status
class L1LpOptimizer(_IOptimizer):
def __init__(self,
cost: np.ndarray,
benchmark: np.ndarray,
l1norm: float,
cons_matrix: np.ndarray = None,
lower_bound: Union[float, np.ndarray] = None,
upper_bound: Union[float, np.ndarray] = None):
super().__init__(cost, cons_matrix, lower_bound, upper_bound)
self._benchmark = benchmark
self._l1norm = l1norm
def solve(self, solver: str = "ECOS"):
x, constraints = self._prepare()
constraints.append(
cp.pnorm(x - self._benchmark, 1) <= self._l1norm
)
prob = cp.Problem(cp.Minimize(x @ self._cost), constraints=constraints)
prob.solve(solver=solver)
return x.value, prob.value, prob.status
|
n, k = map(int, input().split())
circle = [x for x in range(1, n + 1)]
series = list()
target = 0
for i in range(n):
target = (target + k - 1) % (n - i)
series.append(circle[target])
circle = circle[:target] + circle[target + 1:]
print(f"<{', '.join(map(str, series))}>")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import sys
from setuptools import find_packages, setup
def get_version(filename):
"""Extract the package version."""
with open(filename, encoding='utf8') as in_fh:
for line in in_fh:
if line.startswith('__version__'):
return line.split('=')[1].strip()[1:-1]
raise ValueError("Cannot extract version from %s" % filename)
with open('README.rst', encoding='utf8') as readme_file:
readme = readme_file.read()
try:
with open('HISTORY.rst', encoding='utf8') as history_file:
history = history_file.read()
except OSError:
history = ''
# requirements for use
requirements = [
'click >= 6.7',
'configobj >= 5.0.6',
'doctr',
'packaging',
'pyparsing >= 2.0.2',
'setuptools',
'sphinx',
]
# requirements for development (testing, generating docs)
dev_requirements = [
'coverage<5.0', # 5.0 breaks a lot of other packages:
# https://github.com/computationalmodelling/nbval/issues/129
# https://github.com/codecov/codecov-python/issues/224
'coveralls',
'flake8',
'gitpython',
'ipython',
'isort',
'pdbpp',
'pre-commit',
'pylint',
'pytest',
'pytest-cov',
'pytest-xdist',
'sphinx',
'sphinx-autobuild',
'sphinx-autodoc-typehints',
'sphinx-copybutton',
'sphinx_rtd_theme', # for testing only
'travis-encrypt',
'twine',
'wheel',
]
if sys.version_info >= (3, 6):
dev_requirements.append('black')
version = get_version('./src/doctr_versions_menu/__init__.py')
setup(
author="Michael Goerz",
author_email='mail@michaelgoerz.net',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Environment :: Console',
'Environment :: Web Environment',
'Environment :: Plugins',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Software Development :: Documentation',
'Topic :: Utilities',
'Programming Language :: JavaScript',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
description=(
"Sphinx extension and command to add a versions menu to "
"Doctr-deployed documentation"
),
python_requires='>=3.5',
install_requires=requirements,
extras_require={'dev': dev_requirements},
license="MIT license",
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
include_package_data=True,
keywords=['Doctr', 'Sphinx', 'Github'],
name='doctr_versions_menu',
packages=find_packages(where="src"),
package_dir={"": "src"},
package_data={"": ["_template/*", "_css/*", "_fonts/*"]},
url='https://github.com/goerz/doctr_versions_menu',
version=version,
zip_safe=False,
entry_points='''
[console_scripts]
doctr-versions-menu=doctr_versions_menu.cli:main
''',
)
|
import torch
from torch import nn
import torch.nn.functional as F
def smooth_loss(pred_map):
def gradient(pred):
D_dy = pred[:, :, 1:] - pred[:, :, :-1]
D_dx = pred[:, :, :, 1:] - pred[:, :, :, :-1]
return D_dx, D_dy
loss = 0
weight = 1.
dx, dy = gradient(pred_map)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss += (dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean())*weight
return loss
|
#!/usr/bin/env python
import sys
import subprocess
def main():
paths = sys.argv[1:] or sys.stdin.read().splitlines()
cmd = ['packaging/release/changelogs/changelog.py', 'lint'] + paths
subprocess.check_call(cmd)
if __name__ == '__main__':
main()
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='GradespeedScraper',
version='0.1-dev',
description='Scrapes Gradespeed',
author='Davis Robertson',
author_email='davis@daviskr.com',
license='MIT',
url='https://github.com/epicdavi/GradespeedScraper/',
install_requires=['mechanize>=0.2.5', 'beautifulsoup4>=4.3.x,<4.4'],
)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
def add_helps(command_group, server_type):
helps['{}'.format(command_group)] = """
type: group
short-summary: Commands to manage %s servers.
""" % server_type
helps['{} server'.format(command_group)] = """
type: group
short-summary: Commands to manage %s servers.
""" % server_type
helps['{} server create'.format(command_group)] = """
type: command
short-summary: Create an {0} server
examples:
- name: Create server testsvr with only required paramaters in North Europe.
text: az {1} server create -l northeurope -g testgroup -n testsvr -u username -p password
- name: Create server testsvr with specified performance tier and compute units in North Europe.
text: az {1} server create -l northeurope -g testgroup -n testsvr -u username -p password --performance-tier Standard --compute-units 100
- name: Create server testsvr with all paramaters
text: az {1} server create -l northeurope -g testgroup -n testsvr -u username -p password --performance-tier Basic --compute-units 100 --ssl-enforcement Disabled --storage-size 51200 --tags "key=value" --version <server_version>
""".format(server_type, command_group)
helps['{} server restore'.format(command_group)] = """
type: command
short-summary: Create a new {0} server by restoring from a server backup.
examples:
- name: Restore to server testsvrnew from server testsvr.
text: az {1} server restore -g testgroup -n testsvrnew --source-server testsvr --restore-point-in-time "2017-06-15T13:10:00Z"
- name: Restore to server testsvrnew from server testsvr2 which is in a different resource group.
text: az {1} server restore -g testgroup -n testsvrnew -s "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/othergroup/providers/Microsoft.DBfor{2}/servers/testsvr2" --restore-point-in-time "2017-06-15T13:10:00Z"
""".format(server_type, command_group, server_type.split()[3])
helps['{} server update'.format(command_group)] = """
type: command
short-summary: Update an {0} server.
examples:
- name: Update server's compute-units to 100.
text: az {1} server update -g testgroup -n testsvrnew --compute-units 100
- name: Update server's tags.
text: az {1} server update -g testgroup -n testsvrnew --tags "k1=v1" "k2=v2"
""".format(server_type, command_group)
helps['{} server delete'.format(command_group)] = """
type: command
short-summary: Delete an %s server.
""" % server_type
helps['{} server show'.format(command_group)] = """
type: command
short-summary: Show the details of an %s server.
""" % server_type
helps['{} server list'.format(command_group)] = """
type: command
short-summary: List all the {0} servers belong to given resource group or subscription.
examples:
- name: List all servers in resource group.
text: az {1} server list -g testgroup
- name: List all servers in subscription.
text: az {1} server list
""".format(server_type, command_group)
helps['{} server firewall-rule'.format(command_group)] = """
type: group
short-summary: Commands to manage firewall rules for an %s server
""" % server_type
helps['{} server firewall-rule create'.format(command_group)] = """
type: command
short-summary: Create a firewall rule for an {0} server
examples:
- name: Create a firewall rule for server testsvr.
text: az {1} server firewall-rule create -g testgroup -s testsvr -n allowall --start-ip-address 0.0.0.0 --end-ip-address 255.255.255.255
""".format(server_type, command_group)
helps['{} server firewall-rule update'.format(command_group)] = """
type: command
short-summary: Update a firewall rule for an {0} server
examples:
- name: Update firewall rule's start IP address.
text: az {1} server firewall-rule update -g testgroup -s testsvr -n allowall --start-ip-address 0.0.0.1
- name: Update firewall rule's start and end IP address.
text: az {1} server firewall-rule update -g testgroup -s testsvr -n allowall --start-ip-address 0.0.0.1 --end-ip-address 255.255.255.254
""".format(server_type, command_group)
helps['{} server firewall-rule delete'.format(command_group)] = """
type: command
short-summary: Delete a firewall rule for an %s server
""" % server_type
helps['{} server firewall-rule show'.format(command_group)] = """
type: command
short-summary: Show the details of a firewall rule for an %s server
""" % server_type
helps['{} server firewall-rule list'.format(command_group)] = """
type: command
short-summary: List all the firewall rules for an %s server
""" % server_type
helps['{} server configuration'.format(command_group)] = """
type: group
short-summary: Commands to configure an %s server
""" % server_type
helps['{} server configuration set'.format(command_group)] = """
type: command
short-summary: Update the configuration of an {0} server
examples:
- name: Set new value for a configuration.
text: az {1} server configuration set -g testgroup -s testsvr -n <config_name> --value <config_value>
- name: Set configuration's value to default.
text: az {1} server configuration set -g testgroup -s testsvr -n <config_name>
""".format(server_type, command_group)
helps['{} server configuration show'.format(command_group)] = """
type: command
short-summary: Show the configuration of an %s server
""" % server_type
helps['{} server configuration list'.format(command_group)] = """
type: command
short-summary: List the configurations of an %s server
""" % server_type
helps['{} server-logs'.format(command_group)] = """
type: group
short-summary: Commands to manage %s server logs.
""" % server_type
helps['{} server-logs list'.format(command_group)] = """
type: command
short-summary: List log files for {0}
examples:
- name: List logs files modified in last 72 hours (default value).
text: az {1} server-logs list -g testgroup -s testsvr
- name: List logs files modified in last 10 hours.
text: az {1} server-logs list -g testgroup -s testsvr --file-last-written 10
- name: List logs files that size not exceeds 30KB.
text: az {1} server-logs list -g testgroup -s testsvr --max-file-size 30
""".format(server_type, command_group)
helps['{} server-logs download'.format(command_group)] = """
type: command
short-summary: Download log file(s) to current directory for {0}
examples:
- name: Download log file f1 and f2 for server testsvr.
text: az {1} server-logs download -g testgroup -s testsvr -n f1.log f2.log
""".format(server_type, command_group)
helps['{} db'.format(command_group)] = """
type: group
short-summary: Commands to manage %s databases
""" % server_type
helps['{} db create'.format(command_group)] = """
type: command
short-summary: Create a database for {0}
examples:
- name: Create database testdb in server testsvr with default parameters.
text: az {1} db create -g testgroup -s testsvr -n testdb
- name: Create database testdb in server testsvr with specified parameters.
text: az {1} db create -g testgroup -s testsvr -n testdb --charset <valid_charset> --collation <valid_collation>
""".format(server_type, command_group)
helps['{} db delete'.format(command_group)] = """
type: command
short-summary: Delete a database for %s
""" % server_type
helps['{} db show'.format(command_group)] = """
type: command
short-summary: Show the details of a database for %s
""" % server_type
helps['{} db list'.format(command_group)] = """
type: command
short-summary: List the databases of an %s server
""" % server_type
add_helps("mysql", "Azure Database for MySQL")
add_helps("postgres", "Azure Database for PostgreSQL")
|
import click
import frappe
def execute():
doctype = "Data Import Legacy"
table = frappe.utils.get_table_name(doctype)
# delete the doctype record to avoid broken links
frappe.db.delete("DocType", {"name": doctype})
# leaving table in database for manual cleanup
click.secho(
f"`{doctype}` has been deprecated. The DocType is deleted, but the data still"
" exists on the database. If this data is worth recovering, you may export it"
f" using\n\n\tbench --site {frappe.local.site} backup -i '{doctype}'\n\nAfter"
" this, the table will continue to persist in the database, until you choose"
" to remove it yourself. If you want to drop the table, you may run\n\n\tbench"
f" --site {frappe.local.site} execute frappe.db.sql --args \"('DROP TABLE IF"
f" EXISTS `{table}`', )\"\n",
fg="yellow",
)
|
'''
Quotes App
=========================================
This program generates a random quote everything time the user requests by pressing a button. Uses the Kivy framework.
'''
#Things to Add: Export it to Play Store, add a way to maxmize window automatically, etc.
import random as r
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from kivy.core.window import Window
class quotesApp(App):
def build(self):
Window.clearcolor = (0.98, 0.28, 1, 1)
main_layout = FloatLayout(size = (500, 500))
self.welcome = Label(text = "The Most Wonderful Quotes of All-Time!", pos_hint = {"x" : .018, "y": .41}, font_size = '40sp', color =(1, 1, 1, 1), markup = True,)
main_layout.add_widget(self.welcome)
self.quote = TextInput(multiline = "True", readonly = True, halign = "center", font_size=50, size_hint = (.71,.55),
pos_hint = {"x": .15,"y": .30},background_color = (0.98, 0.28, 1, 1))
main_layout.add_widget(self.quote)
self.author = TextInput(multiline = "False", readonly = True, halign = "center", font_size = 25, size_hint = (.43, .10), pos_hint = {"x": .285,"y": .175},
background_color = (0.98, 0.28, 1, 1))
main_layout.add_widget(self.author)
nextButton = Button(text = "Click for a Quote", size_hint = (.3, .1), pos_hint = {"x": .355, "y": .055}, background_color = (0.98, 0.28, 1, 1))
nextButton.bind(on_press = self.onButtonPress)
main_layout.add_widget(nextButton)
return main_layout
def onButtonPress(self, instance):
quotesNAuthors = {
"Nelson Mandela": "The greatest glory in living lies not in never falling, but in rising every time we fall.",
"Walt Disney" : "The way to get started is to quit talking and begin doing.",
"Steve Jobs" : "Your time is limited, so don't waste it living someone else's life. Don't be trapped by dogma – which is living with the results of other people's thinking.",
"Eleanor Roosevelt":"If life were predictable it would cease to be life, and be without flavor.",
"Oprah Winfrey" : "If you look at what you have in life, you'll always have more. If you look at what you don't have in life, you'll never have enough.",
"James Cameron" : "If you set your goals ridiculously high and it's a failure, you will fail above everyone else's success.",
"John Lennon" : "Life is what happens when you're busy making other plans.",
"Mother Teresa" : "Spread love everywhere you go. Let no one ever come to you without leaving happier.",
"Franklin D. Roosevelt" : "When you reach the end of your rope, tie a knot in it and hang on.",
"Margaret Mead" :"Always remember that you are absolutely unique. Just like everyone else.",
"Robert Louis Stevenson" :"Don't judge each day by the harvest you reap but by the seeds that you plant.",
"Eleanor Roosevelt" : "The future belongs to those who believe in the beauty of their dreams.",
"Benjamin Franklin" :"Tell me and I forget. Teach me and I remember. Involve me and I learn." ,
"Helen Keller": "The best and most beautiful things in the world cannot be seen or even touched - they must be felt with the heart.",
"Aristotle" : "It is during our darkest moments that we must focus to see the light.",
"Anne Frank" : "Whoever is happy will make others happy too." ,
"Ralph Waldo Emerson" : "Do not go where the path may lead, go instead where there is no path and leave a trail.",
"Maya Angelou" :"You will face many defeats in life, but never let yourself be defeated.",
"Abraham Lincoln" : "In the end, it's not the years in your life that count. It's the life in your years.",
"Babe Ruth" : "Never let the fear of striking out keep you from playing the game.",
"Helen Keller" : "Life is either a daring adventure or nothing at all.",
"Thomas Edison" : "Many of life's failures are people who did not realize how close they were to success when they gave up.",
"Dr. Seuss" : "You have brains in your head. You have feet in your shoes. You can steer yourself any direction you choose.",
"Oscar Wilde" :"Life is never fair, and perhaps it is a good thing for most of us that it is not.",
"Tony Robbins" : "The only impossible journey is the one you never begin.",
"Albert Einstein" : "Only a life lived for others is a life worthwhile.",
"Dalai Lama" : "The purpose of our lives is to be happy.",
"Mae West" : "You only live once, but if you do it right, once is enough.",
"Henry David Thoreau" :"Go confidently in the direction of your dreams! Live the life you've imagined.",
"Confucius" :"Life is really simple, but we insist on making it complicated.",
"Jonathan Swift" : "May you live all the days of your life.",
"Hans Christian Andersen" : "Life itself is the most wonderful fairy tale.",
"John Wooden" : "Do not let making a living prevent you from making a life.",
"D. H. Lawrence" :"Life is ours to be spent, not to be saved.",
"Marilyn Monroe" :"Keep smiling, because life is a beautiful thing and there's so much to smile about.",
"James M. Barrie" : "Life is a long lesson in humility.",
"Robert Frost" : "In three words I can sum up everything I've learned about life: it goes on.",
"Bob Marley" : "Love the life you live. Live the life you love.",
"Charles Dickens" : "Life is made of ever so many partings welded together.",
"Ray Bradbury" : "Life is trying things to see if they work." ,
"Winston Churchill" : "Success is not final; failure is not fatal: It is the courage to continue that counts.",
"Steve Jobs": "If you really look closely, most overnight successes took a long time.",
"John D. Rockefeller" :"The secret of success is to do the common thing uncommonly well.",
"Thomas Jefferson" :"I find that the harder I work, the more luck I seem to have.",
"Barack Obama" : "The real test is not whether you avoid this failure, because you won't. It's whether you let it harden or shame you into inaction, or whether you learn from it; whether you choose to persevere.",
"Zig Ziglar" : "Don't be distracted by criticism. Remember -- the only taste of success some people get is to take a bite out of you.",
"Conrad Hilton" : "Success seems to be connected with action. Successful people keep moving. They make mistakes but they don't quit.",
"Colin Powell" : "There are no secrets to success. It is the result of preparation, hard work, and learning from failure.",
"Herman Melville" : "It is better to fail in originality than to succeed in imitation.",
"Jim Rohn" :"Successful people do what unsuccessful people are not willing to do. Don't wish it were easier; wish you were better.",
"James Cameron" : "If you set your goals ridiculously high and it's a failure, you will fail above everyone else's success.",
"Steve Jobs" : "If you really look closely, most overnight successes took a long time.",
"David Brinkley" : "A successful man is one who can lay a firm foundation with the bricks others have thrown at him.",
"Albert Einstein" : "Try not to become a man of success. Rather become a man of value.",
"John D. Rockefeller" : "Don't be afraid to give up the good to go for the great.",
"Winston Churchill" : "Success is walking from failure to failure with no loss of enthusiasm.",
"Thomas J. Watson" : "If you want to achieve excellence, you can get there today. As of this second, quit doing less-than-excellent work.",
"Gurbaksh Chahal" : "If you genuinely want something, don't wait for it -- teach yourself to be impatient.",
"Vidal Sassoon" : "The only place where success comes before work is in the dictionary.",
"Alexander Graham Bell" : "Before anything else, preparation is the key to success.",
"Wayne Gretzky" : "You miss 100% of the shots you don't take.",
"Henry Ford" : "Whether you think you can or you think you can't, you're right.",
"Rosa Parks": "I have learned over the years that when one's mind is made up, this diminishes fear.",
"Mother Teresa" : "I alone cannot change the world, but I can cast a stone across the water to create many ripples.",
"Audrey Hepburn" : "Nothing is impossible, the word itself says, ‘I'm possible!'",
"Ayn Rand" : "The question isn't who is going to let me; it's who is going to stop me.",
"Ralph Waldo Emerson" : "The only person you are destined to become is the person you decide to be.",
"Theodore Roosevelt" : "Believe you can and you're halfway there.",
"Maya Angelou" : "I've learned that people will forget what you said, people will forget what you did, but people will never forget how you made them feel.",
"Vince Lombardi" : "Winning isn't everything, but wanting to win is.",
"Amelia Earhart" : "The most difficult thing is the decision to act, the rest is merely tenacity.",
"Socrates" : "An unexamined life is not worth living.",
"George Addair" : "Everything you've ever wanted is on the other side of fear.",
"Norman Vaughan" : "Dream big and dare to fail.",
"Beverly Sills" : "You may be disappointed if you fail, but you are doomed if you don't try.",
"Charles Swindoll" : "Life is 10% what happens to me and 90% of how I react to it.",
"Les Brown" : "Too many of us are not living our dreams because we are living our fears.",
"Benjamin Franklin" : "I didn't fail the test. I just found 100 ways to do it wrong.",
"Sheryl Sandberg" : "If you're offered a seat on a rocket ship, don't ask what seat! Just get on.",
"Florence Nightingale" : "I attribute my success to this: I never gave or took any excuse.",
"Vincent van Gogh" : "I would rather die of passion than of boredom.",
"Gloria Steinem": "Dreaming, after all, is a form of planning.",
"Napolean Hill" : "Whatever the mind of man can conceive and believe, it can achieve.",
"Aristotle" : "First, have a definite, clear practical ideal; a goal, an objective. Second, have the necessary means to achieve your ends; wisdom, money, materials, and methods. Third, adjust all your means to that end.",
"Mark Twain" : "Twenty years from now you will be more disappointed by the things that you didn't do than by the ones you did do. So, throw off the bowlines, sail away from safe harbor, catch the trade winds in your sails. Explore, Dream, Discover.",
"Mahatma Gandhi" : "Live as if you were to die tomorrow. Learn as if you were to live forever.",
"Bernard M. Baruch" : "Be who you are and say what you feel, because those who mind don’t matter and those who matter don’t mind.",
"Plato" : "Wise men speak because they have something to say; fools because they have to say something.",
"Mahatma Gandhi" : "You must be the change you wish to see in the world.",
"Martin Luther King Jr." : "Darkness cannot drive out darkness; only light can do that. Hate cannot drive out hate; only love can do that",
"E.E Cummings" : "It takes courage to grow up and turn out to be who you really are.",
"Leonardo Da Vinci": "As a well-spent day brings happy sleep, so a life well spent brings happy death.",
"Herbert Hoover": "Children are our most valuable resource.",
"J.K. Rowling" : "It takes a great deal of courage to stand up to your enemies, but even more to stand up to your friends.",
"Frank Zappa" : "A mind is like a parachute. It doesn’t work if it isn’t open.",
"Deepam Chatterjee" : "When you are totally at peace with yourself, nothing can shake you.",
"Muhammad Ali" : "Peace comes from within. Do not seek it without.",
"Andrew Hendrixson" : "Anyone who has ever made anything of importance was disciplined.",
"Coco Chanel" : "Don’t spend time beating on a wall, hoping to transform it into a door.",
"Billie Jean King": "Champions keep playing until they get it right.",
"Neil Barringham" : "The grass is greener where you water it.",
"Ernest Hemingway": "But man is not made for defeat. A man can be destroyed but not defeated.",
"Indira Gandhi" : "You cannot shake hands with a clenched fist.",
"Jane Austen" : "There is no charm equal to tenderness of heart.",
"Edgar Allen Poe" : "All that we see or seem is but a dream within a dream.",
"George Washington" : "It is far better to be alone, than to be in bad company.",
"Thomas Carlyle" : "Permanence, perseverance and persistence in spite of all obstacles, discouragements, and impossibilities: It is this, that in all things distinguishes the strong soul from the weak.",
"Sun Tzu" : "The supreme art of war is to subdue the enemy without fighting.",
"Buddha" : "Do not dwell in the past, do not dream of the future, concentrate the mind on the present moment."
}
for i in range(1):
authors, quotes = r.choice(list(quotesNAuthors.items()))
self.quote.text = ' "' + quotes + ' " '
self.author.text = " - " + authors
return quotes, authors
if __name__ == '__main__':
app = quotesApp()
app.run()
|
from classes.game_object import GameObject
from classes.game_state import GameState
from classes.ghost import Ghost
import pygame
from pygame.sprite import Sprite
class Resp(GameObject):
"""It's just a resp"""
resp_sprite = None
def __init__(self, x: int, y: int, sprite: Sprite = None):
# Load sprite only once
self.resp_sprite = pygame.image.load('img/skull.png').convert_alpha()
super().__init__(x, y, 16, 16, self.resp_sprite, 'background')
self.last_resp = 0
def update(self, time_delta, objects=None):
game_state = GameState.get_instance()
self.last_resp += time_delta
if self.last_resp >= 4000:
self.last_resp = 0
game_state.resp.append(Ghost(self.get_x(), self.get_y()))
|
# AoC 2019 - Day 6 - Part 1
f = open('input.txt', 'r')
orbits = f.read().split('\n')
planets = {'COM': 0}
sum = 0
def get_orbits(planet):
if planet == 'COM':
return 0
return 1 + get_orbits(planets.get(planet))
for orbit in orbits:
planet = orbit.split(')')
planets[planet[1]] = planet[0]
for orbit in orbits:
planet = orbit.split(')')
sum += get_orbits(planet[1])
print(sum)
|
# example@gmail.com
import math
class Dot:
def __init__(self, x_input, y_input):
self.x = x_input
self.y = y_input
def dot_get_x(self):
return self.x
def dot_get_y(self):
return self.y
def dot_add(self, other):
# return (x, y) = (x1 + x2, y1 + y2) (Dot)
return Dot(self.x + other.x, self.y + other.y)
def dot_sub(self, other):
# return (x, y) = (x1 - x2, y1 - y2) (Dot)
return Dot(self.x - other.x, self.y - other.y)
def dot_dist_origin(self):
# return the distance from the origin (0,0) (number)
# (1, 1) -> 1.414....
# sqrt((x1-0)**2 + (y1-0)**2)
return math.sqrt(self.x ** 2 + self.y ** 2)
def dot_dist(self, other):
# return the distance between the two points (number)
return math.sqrt((self.x - other.x) ** 2 + (self.y - other.y) ** 2)
def dot_midpoint(self, other):
# return the midpoint between the two points (Dot)
# x_new = (x1 + x2) / 2, y_new = (y1 + y2) / 2
return Dot((self.x + other.x) / 2, (self.y + other.y) / 2)
def dot_get_line(self, other):
# return the linear function passes through the two points (string)
# Ex 1: y = 12x + 5
# Ex 2: y = 3 (if the line is parallel to x-axis)
# Ex 3: x = 15 (if the line is parallel to y-axis)
# Ex 4: Invalid (if the two points are in the same position)
x_same = self.x == other.x
y_same = self.y == other.y
# (1, 1) (0, 1) y = 1
if x_same and y_same: # (x1, y1) = (x1, y1)
return "Invalid"
elif y_same: # parallel to x-axis
return "y = " + str(self.y)
elif x_same: # parallel to y-axis
return "x = " + str(self.x)
# y = ax + b
slope = (self.y - other.y) / (self.x - other.x)
intercept = ((self.x * other.y) - (self.y * other.x)) / (self.x - other.x)# (x1*y2 - y1*x1) / (x1 - x2)
return "y = " + str(slope) + "x + " + str(intercept)
def dot_G(self, other1, other2):
# return the "center of mass" (Dot)
# for x_1, y_1) x_2, y_2), x_3, y_3)
# G = (x_1 + x_2 + x_3) / 3 , (y_1 + y_2+ y_3) / 3
return Dot((self.x + other1.x + other2.x) / 3,
(self.y + other1.y + other2.y) / 3)
a = Dot(1, 1)
b = Dot(0, 0)
c = Dot(1, 2)
print(a.dot_get_x())
print(a.dot_get_line(b))
print(b.dot_get_line(c))
|
import collections
import typing
from enum import Enum
from .enums import GVAL_TEXTABLE, GVAL_NUMERIC, GVAL_LOCATABLE, GVAL_LISTABLE, GVAL_ITEM, ParticleType, SoundType, \
Material
from .classes.abc import Itemable
from .classes.mc_types import DFNumber, DFText, DFLocation, DFPotion, Item, DFCustomSpawnEgg, DFParticle, DFSound
from .classes.variable import (
DFVariable, DFGameValue, NumberVar, TextVar, ListVar, LocationVar, PotionVar, ParticleVar,
SoundVar, ItemVar
)
from .utils import flatten
class ParamTypes:
"""Custom type annotations for parameters in humanized methods."""
Numeric = typing.Union[int, float, DFNumber, DFGameValue, DFVariable, NumberVar]
"""Union[:class:`int`, :class:`float`, :class:`~.DFNumber`, :class:`~.DFGameValue`, :class:`~.DFVariable`, \
:class:`~.NumberVar`] : The possible types of a numeric parameter."""
Textable = typing.Union[str, DFText, DFGameValue, DFVariable, TextVar]
"""Union[:class:`str`, :class:`~.DFText`, :class:`~.DFGameValue`, :class:`~.DFVariable`, :class:`~.TextVar`] : The \
possible types of a text parameter."""
Listable = typing.Union[DFGameValue, DFVariable, ListVar]
"""Union[:class:`~.DFGameValue`, :class:`~.DFVariable`, :class:`~.ListVar`] : The possible types of a List (in \
DiamondFire) parameter."""
Locatable = typing.Union[DFLocation, DFGameValue, DFVariable, LocationVar]
"""Union[:class:`~.DFLocation`, :class:`~.DFGameValue`, :class:`~.DFVariable`, :class:`~.LocationVar`] : The \
possible types of a Location parameter."""
Potionable = typing.Union[DFPotion, DFVariable, PotionVar] # there is no Game Value representing a potion effect.
"""Union[:class:`~.DFPotion`, :class:`~.DFVariable`, :class:`~.PotionVar`] : The possible types of a Potion Effect \
parameter."""
ParticleParam = typing.Union[DFParticle, ParticleType, DFVariable, ParticleVar] # no particle game value
"""Union[:class:`~.DFParticle`, :class:`~.ParticleType`, :class:`~.DFVariable`, :class:`~.ParticleVar`] : The \
possible types of a Particle parameter."""
SoundParam = typing.Union[DFSound, SoundType, DFVariable, SoundVar] # no sound game value
"""Union[:class:`~.DFSound`, :class:`~.SoundType`, :class:`~.DFVariable`, :class:`~.SoundVar`] : The possible \
types of a Sound param."""
ItemParam = typing.Union[Item, Material, DFGameValue, DFVariable, ItemVar]
"""Union[:class:`~.Item`, :class:`~.Material`, :class:`~.DFGameValue`, :class:`~.DFVariable`, :class:`~.ItemVar`] \
: The possible types of an Item parameter."""
SpawnEggable = typing.Union[DFCustomSpawnEgg, ItemParam]
"""Union[:class:`~.DFCustomSpawnEgg`, :attr:`ItemParam`] : The possible types of a Spawn Egg parameter."""
Param = typing.Union[
"ParamTypes.Numeric", "ParamTypes.Textable", "ParamTypes.Listable", "ParamTypes.Potionable",
"ParamTypes.ParticleParam", "ParamTypes.SoundParam", "ParamTypes.ItemParam", "ParamTypes.SpawnEggable"
]
"""Union[:attr:`Numeric`, :attr:`Textable`, :attr:`Listable`, :attr:`Potionable`, :attr:`ParticleParam`, \
:attr:`SoundParam`, :attr:`ItemParam`, :attr:`SpawnEggable`] : All the possible parameter types."""
Numeric = ParamTypes.Numeric
Textable = ParamTypes.Textable
Listable = ParamTypes.Listable
Locatable = ParamTypes.Locatable
Potionable = ParamTypes.Potionable
ParticleParam = ParamTypes.ParticleParam
SoundParam = ParamTypes.SoundParam
ItemParam = ParamTypes.ItemParam
SpawnEggable = ParamTypes.SpawnEggable
Param = ParamTypes.Param
GVAL_TYPES = {
Numeric: GVAL_NUMERIC,
Textable: GVAL_TEXTABLE,
Listable: GVAL_LISTABLE,
Locatable: GVAL_LOCATABLE,
ItemParam: GVAL_ITEM,
Item: GVAL_ITEM,
# a few common Unions
typing.Union[Numeric, Locatable]: GVAL_NUMERIC + GVAL_LOCATABLE
}
def convert_numeric(param: Numeric) -> Numeric:
"""Converts ints and floats from a Numeric parameter to the appropriate DFNumber, while leaving
Game Values and Variables untouched.
Parameters
----------
param : :attr:`~.Numeric`
The numeric parameter to convert.
Returns
-------
:attr:`~.Numeric`
Resulting conversion, or the parameter itself if nothing required change.
Examples
--------
>>> convert_numeric(5)
<DFNumber value=5.0>
>>> convert_numeric(6.54)
<DFNumber value=6.54>
"""
if isinstance(param, (int, float)):
return DFNumber(param)
return param
def convert_text(param: Textable) -> Textable:
"""Converts strs from a Textable parameter to the appropriate DFText, while leaving
Game Values and Variables untouched.
Parameters
----------
param : :attr:`~.Textable`
The text parameter to convert.
Returns
-------
:attr:`~.Textable`
Resulting conversion, or the parameter itself if nothing required change.
Examples
--------
>>> convert_text("test")
<DFText data='test'>
"""
if isinstance(param, (str, collections.UserString)):
return DFText(str(param))
return param
def convert_particle(param: ParticleParam) -> ParticleParam:
"""Converts :class:`~.ParticleType` from a ParticleParam parameter to the appropriate DFParticle, while leaving
Game Values and Variables untouched.
Parameters
----------
param : :attr:`~.ParticleParam`
The particle parameter to convert.
Returns
-------
:attr:`~.ParticleParam`
Resulting conversion, or the parameter itself if nothing required change.
Examples
--------
>>> convert_particle(ParticleType.ANGRY_VILLAGER)
<DFParticle particle_type='Angry Villager'>
"""
if isinstance(param, ParticleType):
return DFParticle(param)
return param
def convert_sound(param: SoundParam) -> SoundParam:
"""Converts :class:`~.SoundType` from a SoundParam parameter to the appropriate DFSound, while leaving
Game Values and Variables untouched.
Parameters
----------
param : :attr:`~.SoundParam`
The sound parameter to convert.
Returns
-------
:attr:`~.SoundParam`
Resulting conversion, or the parameter itself if nothing required change.
"""
if isinstance(param, SoundType):
return DFSound(param)
return param
def convert_material(param: typing.Union[Param, Material]) -> Param:
"""Converts :class:`~.Material` into :class:`~.Item`.
Parameters
----------
param : Union[:attr:`~.Param`, :class:`~.Material`]
The parameter/material to convert.
Returns
-------
:attr:`~.Param`
The generated item, or the param specified.
Examples
--------
>>> convert_material(Material.DIAMOND_SWORD)
<Item minecraft:diamond_sword x 1>
"""
if isinstance(param, Material):
return Item(param)
return param
def convert_all(param: Param) -> Param:
"""Converts anything from a Param parameter to the appropriate DF(something) class, while leaving
Game Values and Variables untouched. (Calls all other converting methods)
Parameters
----------
param : :attr:`~.Param`
The parameter to convert.
Returns
-------
:attr:`~.Param`
Resulting conversion, or the parameter itself if nothing required change.
See Also
--------
:meth:`convert_particle`, :meth:`convert_sound`, :meth:`convert_numeric`, :meth:`convert_text`, \
:meth:`convert_material`
"""
return convert_particle(convert_sound(convert_numeric(convert_text(convert_material(param)))))
_P = typing.TypeVar(
"_P",
Param, Numeric, Textable, Listable, Locatable, Potionable, ItemParam, DFVariable, SpawnEggable
)
_A = typing.TypeVar("_A",
Param, Numeric, Textable, Listable, Locatable, Potionable, ItemParam, DFVariable, SpawnEggable
)
_B = typing.TypeVar("_B",
Param, Numeric, Textable, Listable, Locatable, Potionable, ItemParam, DFVariable, SpawnEggable
)
@typing.overload
def p_check(
obj: typing.Optional[_P], typeof: typing.Type[typing.Optional[_P]], arg_name: typing.Optional[str] = None,
*, convert: bool = True
) -> _P: ...
@typing.overload
def p_check(
obj: Numeric, typeof: typing.Type[Numeric], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> Numeric: ...
@typing.overload
def p_check(
obj: Textable, typeof: typing.Type[Textable], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> Textable: ...
@typing.overload
def p_check(
obj: Listable, typeof: typing.Type[Listable], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> Listable: ...
@typing.overload
def p_check(
obj: Locatable, typeof: typing.Type[Locatable], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> Locatable: ...
@typing.overload
def p_check(
obj: Potionable, typeof: typing.Type[Potionable], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> Potionable: ...
@typing.overload
def p_check(
obj: ItemParam, typeof: typing.Type[ItemParam], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> ItemParam: ...
@typing.overload
def p_check(
obj: ParticleParam, typeof: typing.Type[ParticleParam], arg_name: typing.Optional[str] = None,
*, convert: bool = True
) -> ParticleParam: ...
@typing.overload
def p_check(
obj: SoundParam, typeof: typing.Type[SoundParam], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> SoundParam: ...
@typing.overload
def p_check(
obj: DFVariable, typeof: typing.Type[DFVariable], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> DFVariable: ...
@typing.overload
def p_check(
obj: SpawnEggable, typeof: typing.Type[SpawnEggable], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> DFVariable: ...
@typing.overload
def p_check(
obj: Param, typeof: typing.Type[Param], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> Param: ...
def p_check(
obj: _P, typeof: typing.Type[_P], arg_name: typing.Optional[str] = None, *, convert: bool = True
) -> _P:
"""Checks an object for being a valid param type, and raises a TypeError if that does not occur. For checking
and returning a bool, see :func:`p_bool_check`.
Parameters
----------
obj : :attr:`ParamTypes.Param`
The object to check.
typeof : Type[:attr:`ParamTypes.Param`]
The parameter type to check.
convert : :class:`bool`, optional
Whether or not the object should be converted from :class:`str`, :class:`int` or :class:`float` to,
respectively, :class:`~.DFText` (for str) or :class:`~.DFNumber` (for int/float). Defaults to ``True``.
arg_name : Optional[:class:`str`], optional
The name of the argument, in order to have a more specific error. Defaults to ``None`` (no name given).
Returns
-------
:attr:`ParamTypes.Param`
The object, given there are no type incompatibilities.
Raises
------
:exc:`TypeError`
If the object is found to not match the required param type.
See Also
--------
:func:`p_bool_check`
"""
class _Check: # kinda hacky solution, but...
_val: typeof
p_typeof = typing.get_type_hints(_Check, globalns=None, localns=None)['_val']
valid_types: typing.List[type] = flatten(
[getattr(type_, "__args__", type_) for type_ in getattr(p_typeof, "__args__", [p_typeof])]
) # ^this allows Union[] to be specified as well, such that Union[Numeric, Locatable] works, for example.
valid_names = (
"Param", "Numeric", "Textable", "Locatable", "Potionable", "ItemParam", "ParticleParam", "SoundParam",
"SpawnEggable",
"Union[Numeric, Locatable]", "Union[Textable, ItemParam]", "Union[Locatable, Textable]"
)
corresponding_values = (
Param, Numeric, Textable, Locatable, Potionable, ItemParam, ParticleParam, SoundParam, SpawnEggable,
typing.Union[Numeric, Locatable], typing.Union[Textable, ItemParam], typing.Union[Locatable, Textable]
)
if not isinstance(obj, tuple(filter(lambda t: t is not None, valid_types))): # remove 'None'
try:
corresp_class_ind = corresponding_values.index(typeof)
name = valid_names[corresp_class_ind]
msg = f"Object must be a valid {repr(name)} parameter, not {repr(str(type(obj)))}."
except (IndexError, ValueError):
msg = f"Object must correspond to the appropriate parameter type, and not be a {repr(str(type(obj)))}."
raise TypeError("{0}{1}".format(
msg,
f" (Arg '{arg_name}')" if arg_name else ""
))
if GVAL_TYPES.get(typeof) and isinstance(obj, DFGameValue) and obj not in GVAL_TYPES[typeof]:
try:
corresp_class_ind = corresponding_values.index(typeof)
name = valid_names[corresp_class_ind]
msg = f"The DFGameValue type specified does not evaluate to a valid {repr(name)} parameter. (Check \
documentation to see valid 'GameValueType' attrs for this parameter type.)"
except (IndexError, ValueError):
msg = f"The DFGameValue type specified does not evaluate to a valid parameter of the required type. \
(Check documentation to see valid 'GameValueType' attrs for this parameter type.)"
raise TypeError("{0}{1}".format(
msg,
f" (Arg '{arg_name}')" if arg_name else ""
))
if convert:
obj = convert_all(typing.cast(_P, obj))
if typeof == SpawnEggable and isinstance(obj, Item) and "spawn_egg" not in obj.material.value:
raise TypeError(
f"Object must be a valid spawn egg item, not a(n) '{obj.material.value}'."
+ (f" (Arg '{arg_name}')" if arg_name else "")
)
return typing.cast(_P, obj)
def p_bool_check(obj: _P, typeof: typing.Type[_P], gameval_check: bool = True, error_on_gameval: bool = False) -> bool:
"""Checks an object for being a valid param type, returning True if the type matches and False otherwise. For
checking and raising an error, see :func:`p_check`.
Parameters
----------
obj : :attr:`ParamTypes.Param`
The object to check.
typeof : Type[:attr:`ParamTypes.Param`]
The parameter type to check.
gameval_check : :class:`bool`, optional
If any DFGameValue instances specified should be checked to ensure they have the same Return Type as the
specified parameter type. Defaults to ``True``.
error_on_gameval : :class:`bool`, optional
If DFGameValue instances found to not correspond to the given type should raise a TypeError instead of
causing the function to return ``False``. Defaults to ``False``.
Returns
-------
:class:`bool`
If the object matches the given type, then this is ``True``. Otherwise, ``False``.
Raises
------
:exc:`TypeError`
If ``error_on_gameval`` is set to ``True`` and a DFGameValue instance of incompatible type is given.
See Also
--------
:func:`p_check`
Examples
--------
>>> p_bool_check(5, Numeric)
True
>>> p_bool_check(5, Locatable)
False
"""
class _Check: # kinda hacky solution, but...
_val: typeof
p_typeof = typing.get_type_hints(_Check, globalns=None, localns=None)['_val'] # resolve forward refs
valid_types: typing.List[type] = flatten(
[getattr(type_, "__args__", type_) for type_ in getattr(p_typeof, "__args__", [p_typeof])]
)
if not isinstance(obj, tuple(valid_types)):
return False
if gameval_check and GVAL_TYPES.get(typeof) and isinstance(obj, DFGameValue) and obj not in GVAL_TYPES[typeof]:
if error_on_gameval:
try:
valid_names = (
"Param", "Numeric", "Textable", "Locatable", "Potionable", "ItemParam", "ParticleParam",
"SoundParam", "SpawnEggable",
"Union[Numeric, Locatable]", "Union[Textable, ItemParam]"
)
corresponding_values = (
Param, Numeric, Textable, Locatable, Potionable, ItemParam, ParticleParam, SoundParam,
SpawnEggable,
typing.Union[Numeric, Locatable], typing.Union[Textable, ItemParam]
)
corresp_class_ind = corresponding_values.index(typeof)
name = valid_names[corresp_class_ind]
msg = f"The DFGameValue type specified does not evaluate to a valid {repr(name)} parameter. (Check \
documentation to see valid 'GameValueType' attrs for this parameter type.)"
except (IndexError, ValueError):
msg = f"The DFGameValue type specified does not evaluate to a valid parameter of the required type. \
(Check documentation to see valid 'GameValueType' attrs for this parameter type.)"
raise TypeError(msg)
else:
return False
return True
|
from django.contrib import admin
from users.models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
fields = [
'company_name',
'first_name',
'last_name',
'username',
'phone',
'address',
]
list_display = ['company_name', 'username', 'phone']
|
import os
os.chdir("updater-lib")
os.execl("updater.exe", "Rebirth Item Tracker")
|
class Identity:
def do(self, x):
return x
|
def voto(anoNascimento):
from datetime import date
idade = date.today().year - anoNascimento
if idade < 16:
return(f'Com {idade} anos: NÃO VOTA!')
elif 16 <= idade < 18 or idade > 65:
return(f'Com {idade} anos: VOTO OPCIONAL!')
else:
return(f'Com {idade} anos: VOTO OBRIGATÓRIO!')
#Programa Principal
print('=' * 30)
ano = int(input('Em qual ano você nasceu? '))
print(voto(ano))
|
from collections import OrderedDict
from pytest import fixture
from oarepo_nusl_rules.xoai.rules import xoai_identifier
@fixture
def identifier():
return OrderedDict([('@name', 'none'),
('field',
OrderedDict([('@name', 'value'),
('#text',
'http://hdl.handle.net/20.500.11956/115864')]))])
@fixture
def header_id_oai():
return 'oai:dspace.cuni.cz:20.500.11956/115864'
def test_identifier_1(identifier, header_id_oai):
assert xoai_identifier(identifier, identifier=header_id_oai) == {
'identifier': [{'value': 'oai:dspace.cuni.cz:20.500.11956/115864', 'type': 'originalOAI'}, {
'value': 'http://hdl.handle.net/20.500.11956/115864', 'type': 'originalRecord'
}]
}
def test_identifier_2(identifier):
assert xoai_identifier(identifier) == {
'identifier': [{'value': 'oai:dspace.cuni.cz:20.500.11956/115864', 'type': 'originalOAI'}, {
'value': 'http://hdl.handle.net/20.500.11956/115864', 'type': 'originalRecord'
}]
}
|
# repl.py
"""
Simple loop function to take user input
"""
from util.output_handler import handle_output
def user_input(input_handler, output_handler, prompt='>>> '):
"""
Takes in input from user then passes input to input_handler for other code
execution. Input handler should return an output of type list of strings.
"""
while 1:
try:
user_input = input(prompt)
except KeyboardInterrupt:
break
else:
if user_input == 'exit':
break
output = input_handler(user_input)
handle_output(output)
if __name__ == "__main__":
user_input(
input_handler=lambda x: [f'got {x}'],
output_handler=handle_output
)
|
import os
from flask import Blueprint, request, jsonify, make_response, current_app
from flask.views import MethodView
import hashlib, binascii, uuid
import jwt
import datetime
from .. import db
bp = Blueprint('api', __name__, url_prefix='/')
class TestAPI(MethodView):
def get(self):
return make_response(jsonify({'Mensaje': 'Funciona bien!'}))
class RegisterUserAPI(MethodView):
def post(self):
data = request.get_json()
if data is None:
return make_response(jsonify({'message':'Specify the name and passwd'}))
if 'name' in data and 'passwd' in data:
name = data['name']
passwd = data['passwd']
cur = db.connection.cursor()
cur.execute('select * from User where NAME="'+name+'"')
rv = cur.fetchall()
if rv:
return make_response(jsonify({'message':'User already exists'}))
salt = hashlib.sha256( uuid.uuid4().hex.encode() ).hexdigest()
hased_passwd = hashlib.sha256(passwd.encode() + salt.encode()).hexdigest()
hased_passwd = hased_passwd + salt
cur = db.connection.cursor()
try:
cur.execute('INSERT INTO User (name, passwd) VALUES (%s,%s)',(name,hased_passwd))
db.connection.commit()
return make_response(jsonify({'message':'User registered'}))
except Exception as e:
return make_response(jsonify({'message':'Error in database query.'}))
return make_response(jsonify({'message':'Specify the name and passwd'}))
def get(self):
return make_response(jsonify({'response':'Hola mundo!'}))
class LoginUserApi(MethodView):
def post(self):
data = request.get_json()
if data is None:
return make_response(jsonify({'message':'Specify the name and passwd'}))
if 'name' in data and 'passwd' in data:
name = data["name"]
passwd = data["passwd"]
cur = db.connection.cursor()
cur.execute('select * from User where NAME="'+name+'"')
rv = cur.fetchall()
if not rv:
return make_response(jsonify({'message':'Specify a valid name and passwd'}))
stored_passwd = rv[0][2]
salt = stored_passwd[64:]
hased_passwd = hashlib.sha256(passwd.encode() + salt.encode()).hexdigest()
if hased_passwd == stored_passwd[:64]:
token = jwt.encode({'user': name, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=1)}, current_app.config['SECRET_KEY'])
print(str(token) + " : " + str(rv[0][0]))
return jsonify({'token': token.decode('UTF-8'), 'user_id': rv[0][0], 'user_name': rv[0][1]})
else:
return jsonify('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
return make_response(jsonify({'message':'Specify the name and passwd'}))
def get(self):
return make_response(jsonify({'response':'Todo correcto!'}))
class CheckJWT(MethodView):
def post(self):
data = request.get_json()
if data is None:
return make_response(jsonify({'message':'No JSON content found on the request'}))
if 'token' in data:
recived_token = data['token']
try:
data = jwt.decode(recived_token, current_app.config['SECRET_KEY'])
except jwt.ExpiredSignatureError:
return make_response(jsonify({'message':'Token expired'}))
except :
return make_response(jsonify({'message':'Token invalid'}))
return make_response(jsonify({'message':'Va bien la cosa'}))
return make_response(jsonify({'message':'No field named token found'}))
register_user_api = RegisterUserAPI.as_view('register')
login_user_api = LoginUserApi.as_view('login')
check_jwt_api = CheckJWT.as_view('check')
test_api = TestAPI.as_view('test')
bp.add_url_rule('/login', view_func=login_user_api, methods=['POST', 'GET'])
bp.add_url_rule('/register', view_func=register_user_api, methods=['POST', 'GET'])
bp.add_url_rule('/check', view_func=check_jwt_api, methods=['POST'])
bp.add_url_rule('/', view_func=test_api, methods=['POST', 'GET'])
|
import hashlib
import itertools
import json
import os
import io
import glob
import pathlib
import re
import requests
import sys
import time
from bs4 import BeautifulSoup
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from PIL import Image
from hashlib import md5
from math import floor
from seleniumwire import webdriver
from typing import List, Union, Optional
from urllib.parse import urlparse, urlencode
from termcolor import colored
from urllib3.exceptions import SSLError, NewConnectionError
from slugify import slugify
Driver = Union[webdriver.Chrome, webdriver.Edge,
webdriver.Firefox, webdriver.Safari]
DRIVER_NAME_TO_CLASS = {
'Chrome': webdriver.Chrome,
'Edge': webdriver.Edge,
'Firefox': webdriver.Firefox,
'Safari': webdriver.Safari,
} # type: Dict[str, Driver]
def get_driver(name: str = "Chrome") -> Driver:
# driver_class = DRIVER_NAME_TO_CLASS[name]
# args = {'executable_path': path} if path else {}
# driver = driver_class(**args)
# driver = driver_class()
driver = DRIVER_NAME_TO_CLASS[name]()
driver.get(YandexImagesDownloader.MAIN_URL)
# Odkliknout "Accept"
els = driver.find_elements_by_css_selector("button.sc-pNWxx.sc-jrsJCI.dryRrI.emsrNO")
# els = driver.find_elements_by_css_selector("button.emsrNO")
for el in els:
print("Element Accept", el.text)
el.click()
# Rozbalit combo security level
el_combo = driver.find_element_by_css_selector("button.button2.button2_theme_clear.button2_size_s.button2_view_classic.dropdown-menu__switcher.i-bem.button2_js_inited")
el_combo.click()
# Zvolit "Unsecure" (prvni varianta)
el_security_levels = driver.find_elements_by_css_selector("a.link.link_theme_normal.b-menu-vert__text.head-filter__item.i-bem")
el_security_levels[0].click()
return driver
#####
@dataclass_json
@dataclass
class ImgUrlResult:
status: str
message: str
img_url: str
img_path: str
STATUS_COLORS = {
'fail': 'red',
'success': 'green',
'ok': 'green',
'skip': 'yellow', # old synonymum for exist
'exist': 'yellow',
'negative': 'cyan',
}
def print(self):
status_colored = colored(self.status, self.STATUS_COLORS[self.status])
print(f"\t{status_colored}: {self.img_url} - {self.message}")
@dataclass_json
@dataclass
class PageResult:
status: str
message: str
page: int
errors_count: int
img_url_results: List[ImgUrlResult]
@dataclass_json
@dataclass
class KeywordResult:
status: str
message: str
keyword: str
errors_count: int
page_results: List[PageResult]
@dataclass_json
@dataclass
class DownloaderResult:
status: str
message: str
keyword_results: List[KeywordResult]
def save_json(json_path, downloader_result: DownloaderResult):
downloader_result_json = downloader_result.to_dict() # pylint: disable=no-member
pretty_json = json.dumps(downloader_result_json, indent=4, ensure_ascii=False)
with open(json_path, "w", encoding="utf-8") as f:
f.write(pretty_json)
print(f"Result information saved: {json_path}.")
#####
# Log of successfully downloaded image urls
downloaded_log = {}
# negative_ids = [] # used as global variable
def filepath_fix_existing(directory_path: pathlib.Path, name: str,
filepath: pathlib.Path) -> pathlib.Path:
"""Expands name portion of filepath with numeric "(x)" suffix.
"""
new_filepath = filepath
if filepath.exists():
for i in itertools.count(start=1):
new_name = f'{name} ({i}){filepath.suffix}'
new_filepath = directory_path / new_name
if not new_filepath.exists():
break
return new_filepath
def download_single_image(img_url: str,
output_directory: pathlib.Path,
min_width: int,
min_height: int,
sub_directory: str = "",
negative_ids=[]
) -> ImgUrlResult:
img_url_result = ImgUrlResult(status=None, message=None, img_url=img_url, img_path=None)
# Generate unique hash (SHA224 of img_url)
img_hash = hashlib.sha224(img_url.encode()).hexdigest()
directory_path = output_directory / sub_directory
directory_path.mkdir(parents=True, exist_ok=True)
img_path = directory_path / img_hash
# Skip downloading if image `id` is in negative
if img_hash in negative_ids:
img_url_result.status = "negative"
img_url_result.message = ""
img_url_result.print()
return img_url_result
# Skip downloading if image already exist
glob_path = f"{directory_path}/{img_hash}.*"
if glob.glob(glob_path):
img_url_result.status = "exist"
img_url_result.message = "Image already exists"
img_url_result.img_path = glob_path
img_url_result.print()
return img_url_result
img_extensions = (".jpg", ".jpeg", ".jfif", "jpe", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
content_type_to_ext = {
"image/gif": ".gif",
"image/jpeg": ".jpg",
"image/png": ".png",
"image/svg+xml": ".svg",
"image/x-icon": ".ico"
}
try:
response = requests.get(img_url, timeout=10)
if response.ok:
data = response.content
content_type = response.headers["Content-Type"]
if not any(img_path.name.endswith(ext) for ext in img_extensions):
img_path = img_path.with_suffix(
content_type_to_ext[content_type]
)
# Skip saving if image has lower than minimun resolution
tmp_pil = Image.open(io.BytesIO(data))
if tmp_pil.width < min_width or tmp_pil.height < min_height:
img_url_result.status = "small"
img_url_result.message = f"Image {tmp_pil.width}x{tmp_pil.height} is less than {min_width}x{min_height}"
# print("tmp pil:", tmp_pil.width, "x", tmp_pil.height, "min:", min_width, "x", min_height )
return img_url_result
with open(img_path, "wb") as f:
f.write(data)
img_url_result.status = "success"
img_url_result.message = "Downloaded the image."
img_url_result.img_path = str(img_path)
# Log img_url
downloaded_log[img_url] = 1
else:
img_url_result.status = "fail"
img_url_result.message = (f"img_url response is not ok."
f" response: {response}.")
except (KeyboardInterrupt, SystemExit):
raise
except (requests.exceptions.SSLError,
requests.exceptions.ConnectionError) as e:
img_url_result.status = "fail"
img_url_result.message = f"{type(e)}"
except Exception as exception:
img_url_result.status = "fail"
img_url_result.message = (f"Something is wrong here.",
f" Error: {type(exception), exception}")
# Print result
img_url_result.print()
# if img_url_result.status == "fail":
# print(colored(" fail", 'red'), f"{img_url} - {img_url_result.message}")
# else:
# print(colored(" fail", 'red'), f"{img_url} - {img_url_result.message}")
# print(f" {img_url_result.message} ==> {img_path}")
return img_url_result
#####
class YandexImagesDownloader:
"""Class to download images from Yandex """
MAIN_URL = "https://yandex.ru/images/search"
MAXIMUM_PAGES_PER_SEARCH = 50
MAXIMUM_IMAGES_PER_PAGE = 30
MAXIMUM_FILENAME_LENGTH = 50
def __init__(self,
driver: Driver,
output_directory="download/",
limit=100,
isize=None,
min_width=None,
min_height=None,
exact_isize=None,
iorient=None,
extension=None,
color=None,
itype=None,
commercial=None,
recent=None,
pool=None,
similar_images=False,
negative=[]):
# global negative_ids
# negative_ids = negative # Set global variable
self.driver = driver
self.output_directory = pathlib.Path(output_directory)
self.limit = limit
self.isize = isize
self.min_width = min_width
self.min_height = min_height
self.exact_isize = exact_isize
self.iorient = iorient
self.extension = extension
self.color = color
self.itype = itype
self.commercial = commercial
self.recent = recent
self.url_params = self.init_url_params()
self.requests_headers = {
'User-Agent':
("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML,"
" like Gecko) Chrome/41.0.2228.0 Safari/537.36")
}
self.cookies = {}
self.pool = pool
self.similar_images = similar_images
self.negative = negative # List of negative image ids (hashes)
print(f'Output directory is set to "{self.output_directory}/"')
print(f"Limit of images is set to {self.limit}")
def get_response(self):
current_url = self.driver.current_url
if self.similar_images:
current_url = self.MAIN_URL + "?"
for request in self.driver.requests:
if str(request).startswith(current_url):
return request.response
def init_url_params(self):
params = {
"nomisspell": 1,
"isize": self.isize,
"iw": None,
"ih": None,
"iorient": self.iorient,
"type": self.extension,
"color": self.color,
"itype": self.itype,
"commercial": self.commercial,
"recent": self.recent
}
if self.exact_isize:
width, height = self.exact_isize
params["isize"] = "eq"
params["iw"] = width
params["ih"] = height
return params
def get_url_params(self, page, text):
if self.similar_images:
params = {"p": page, "url": text, "rpt": "imagelike"}
else:
params = {"p": page, "text": text}
params.update(self.url_params)
return params
def download_images_by_page(self, keyword, page, imgs_count, sub_directory) -> PageResult:
page_result = PageResult(status=None,
message=None,
page=page,
errors_count=None,
img_url_results=[])
self.check_captcha_and_get(YandexImagesDownloader.MAIN_URL,
params=self.get_url_params(page, keyword))
response = self.get_response()
if not response or not (response.reason == "OK"):
page_result.status = "fail"
page_result.message = (f"Page response is not ok."
f" page: {page},",
f" status_code: {response.status_code if response else '???'}.")
page_result.errors_count = YandexImagesDownloader.MAXIMUM_IMAGES_PER_PAGE
return page_result
soup_page = BeautifulSoup(self.driver.page_source, "lxml")
# Getting all image urls from page
try:
tag_sepr_item = soup_page.find_all("div", class_="serp-item")
serp_items = [
json.loads(item.attrs["data-bem"])["serp-item"]
for item in tag_sepr_item
]
img_hrefs = [key["img_href"] for key in serp_items]
except Exception as e:
page_result.status = "fail"
page_result.message = str(e)
page_result.errors_count = YandexImagesDownloader.MAXIMUM_IMAGES_PER_PAGE
return page_result
errors_count = 0
for img_url in img_hrefs:
if imgs_count >= self.limit:
break
if self.pool:
img_url_result = self.pool.apply_async(
download_single_image,
# args=(),
kwds={
'img_url': img_url,
'output_directory': self.output_directory,
'sub_directory': sub_directory,
'min_width': self.min_width,
'min_height': self.min_height,
'negative_ids': self.negative,
})
else:
img_url_result = download_single_image(
img_url,
self.output_directory,
min_width=self.min_width,
min_height=self.min_height,
sub_directory=sub_directory
)
page_result.img_url_results.append(img_url_result)
imgs_count += 1
if self.pool:
for i, img_url_result in enumerate(page_result.img_url_results):
page_result.img_url_results[i] = img_url_result.get()
errors_count += sum(1 if page_result.status == "fail" else 0
for page_result in page_result.img_url_results)
page_result.status = "success"
page_result.message = f"All successful images from page {page} downloaded."
page_result.errors_count = errors_count
return page_result
def download_images_by_keyword(self, keyword, sub_directory="", label_prefix="") -> KeywordResult:
keyword_result = KeywordResult(status=None, message=None, keyword=keyword, errors_count=None, page_results=[])
if self.similar_images:
params = {
"url": keyword,
"rpt": "imagelike"
}
else:
params = {
"text": keyword,
"nomisspell": 1
}
self.check_captcha_and_get(YandexImagesDownloader.MAIN_URL, params=params)
response = self.get_response()
if not response or not (response.reason == "OK"):
keyword_result.status = "fail"
keyword_result.message = (
"Failed to fetch a search page."
f" url: {YandexImagesDownloader.MAIN_URL},"
f" params: {params},"
f" status_code: {response.status_code if response else '???'}")
return keyword_result
soup = BeautifulSoup(self.driver.page_source, "lxml")
# Getting last_page.
tag_serp_list = soup.find("div", class_="serp-list")
if not tag_serp_list:
keyword_result.status = "success"
keyword_result.message = f"No images with keyword {keyword} found."
keyword_result.errors_count = 0
print(f" {keyword_result.message}")
return keyword_result
serp_list = json.loads(tag_serp_list.attrs["data-bem"])["serp-list"]
last_page = serp_list["lastPage"]
actual_last_page = 1 + floor(
self.limit / YandexImagesDownloader.MAXIMUM_IMAGES_PER_PAGE)
print(f" Found {last_page+1} pages of {keyword}.")
# Getting all images.
imgs_count = 0
errors_count = 0
for page in range(last_page + 1):
if imgs_count >= self.limit:
break
if page > actual_last_page:
actual_last_page += 1
print(f"\n [{label_prefix}]: Scrapping page {page+1}/{actual_last_page} {keyword}")
page_result = self.download_images_by_page(keyword, page, imgs_count, sub_directory)
keyword_result.page_results.append(page_result)
page_result_urls_count = len(page_result.img_url_results)
if page_result_urls_count <= 0:
print(" Last page found (0 results)")
break
imgs_count += len(page_result.img_url_results)
errors_count += page_result.errors_count
time.sleep(0.5) # bot id protection
keyword_result.status = "success"
keyword_result.message = f"All images for {keyword} downloaded!"
keyword_result.errors_count = errors_count
return keyword_result
def download_images(self, keywords: List[str], single_output_dir=False) -> DownloaderResult:
dowloader_result = DownloaderResult(status=None, message=None, keyword_results=[])
dowloader_result.status = "fail"
keywords_counter = 0
keywords_count = len(keywords)
for keyword in keywords:
keywords_counter += 1
if single_output_dir:
sub_directory = ""
elif self.similar_images:
sub_directory = slugify(keyword)
else:
sub_directory = keyword
# Skip if subdirectory (url) is too long
if len(sub_directory) > 255:
print(f"Sub-directory too long: {colored(sub_directory, 'cyan')}")
continue
print(f"{keywords_counter}/{keywords_count} Downloading images for {keyword}...")
try:
keyword_result = self.download_images_by_keyword(
keyword,
sub_directory=sub_directory,
label_prefix=f"{keywords_counter}/{keywords_count}" # Pass counter info for printing progress
)
dowloader_result.keyword_results.append(keyword_result)
except:
continue
print(keyword_result.message)
dowloader_result.status = "success"
dowloader_result.message = "Everything is downloaded!"
return dowloader_result
class StopCaptchaInput(Exception):
pass
def check_captcha_and_get(self, url, params=None):
"""Checking for captcha on url and get url after that.
If there is captcha, you have to type it in input() or quit."""
url_with_params = f"{url}?{urlencode(params)}"
del self.driver.requests
self.driver.get(url_with_params)
while True:
soup = BeautifulSoup(self.driver.page_source, "lxml")
if not soup.select(".form__captcha"):
break
print("Please, type the captcha in the browser, then press Enter or type [q] to exit")
reply = input()
if reply == "q":
raise YandexImagesDownloader.StopCaptchaInput()
del self.driver.requests
self.driver.get(url_with_params)
|
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#########################################################
# filename : generateReadme.py
# author : geekhall
# version : v1.0.0
# function : Gererate root README.md file.
#########################################################
import os
# Constant
ROOT_DIR = './'
# link address : https://github.com/geekhall/leetcode/tree/main/00003_LongestSubstring
LEETCODE_PRE_GITHUB = 'https://github.com/geekhall/algorithms/tree/main/leetcode/'
LEETCODE_PRE_GITEE='https://gitee.com/geekhall/algorithms/tree/main/leetcode/'
LINTCODE_PRE_GITHUB = 'https://github.com/geekhall/algorithms/tree/main/lintcode/'
LINTCODE_PRE_GITEE='https://gitee.com/geekhall/algorithms/tree/main/lintcode/'
CODEWARS_PRE_GITHUB = 'https://github.com/geekhall/algorithms/tree/main/codewars/'
CODEWARS_PRE_GITEE='https://gitee.com/geekhall/algorithms/tree/main/codewars/'
EXCLUDES = ['QuickSort', '.vscode', '.git', 'TempSource','00000_Template','00000_Template_Go','static']
class Quiz:
def __init__(self, id, name, type):
self.id = id
self.name = name
self.type = type
def __lt__(self,other):
if self.type == other.type:
return int(self.id) < int(other.id)
elif self.type < other.type:
return True
def __str__(self):
return self.id+" "+str(self.name)
# Generate quiz list from folder started by numbers
def generate_quiz_list():
quizs = []
for parent,dirs,files in os.walk(ROOT_DIR):
for dirname in dirs:
if parent == './leetcode' and dirname not in EXCLUDES:
q = Quiz(dirname[0:5],dirname[6:], 1)
quizs.append(q)
if parent == './lintcode' and dirname not in EXCLUDES:
q = Quiz(dirname[0:5],dirname[6:], 2)
quizs.append(q)
if parent == './codewars' and dirname not in EXCLUDES:
q = Quiz(dirname[0:5],dirname[6:], 3)
quizs.append(q)
quizs.sort()
return quizs
if __name__ == '__main__':
quizs = generate_quiz_list()
f = open('./problem-list.md', 'w', encoding='utf-8')
f.write('## Problem List\n')
f.write('\n')
f.write('### LeetCode\n')
f.write('\n')
f.write('| id |Name(Github)|Name(Gitee)|\n')
f.write('|----|----|----|\n')
for q in quizs:
if q.type == 1:
line = '|' + q.id + '|[' + q.name+']('+LEETCODE_PRE_GITHUB+q.id+'_'+q.name+')|[' + q.name+']('+LEETCODE_PRE_GITEE+q.id+'_'+q.name+')|' +'\n'
f.write(line)
f.write('\n')
f.write('### LintCode\n')
f.write('\n')
f.write('| id |Name(Github)|Name(Gitee)|\n')
f.write('|----|----|----|\n')
for q in quizs:
if q.type == 2:
line = '|' + q.id + '|[' + q.name+']('+LINTCODE_PRE_GITHUB+q.id+'_'+q.name+')|[' + q.name+']('+LINTCODE_PRE_GITEE+q.id+'_'+q.name+')|' +'\n'
f.write(line)
f.write('\n')
f.write('### codewars\n')
f.write('\n')
f.write('| id |Name(Github)|Name(Gitee)|\n')
f.write('|----|----|----|\n')
for q in quizs:
if q.type == 3:
line = '|' + q.id + '|[' + q.name+']('+CODEWARS_PRE_GITHUB+q.id+'_'+q.name+')|[' + q.name+']('+CODEWARS_PRE_GITEE+q.id+'_'+q.name+')|' +'\n'
f.write(line)
f.close()
|
import cv2 as cv
import numpy as np
path ="/home/senai/tiago-projects/opencv-tutorials/opencv-course/Resources"
img = cv.imread(path +"/Photos/cats.jpg")
cv.imshow('', img)
blank = np.zeros(img.shape, dtype='uint8')
cv.imshow('b', blank)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('gray', gray)
blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
cv.imshow('blur', blur)
canny = cv.Canny(blur, 125, 175)
cv.imshow('canny', canny)
#ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
#cv.imshow('thresh', thresh)
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
print(f'lenght of list = {len(contours)}')
cv.drawContours(blank, contours, -1, (0,0,255), 2)
cv.imshow('cont', blank)
cv.waitKey(0)
|
import os
import textwrap
import unittest
from parameterized.parameterized import parameterized
from conans.client import tools
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.utils.tools import TestClient, NO_SETTINGS_PACKAGE_ID
from conans.util.files import load
class CreateTest(unittest.TestCase):
def dependencies_order_matches_requires_test(self):
client = TestClient()
conanfile = """from conans import ConanFile
from conans.tools import save
import os
class Pkg(ConanFile):
def package(self):
save(os.path.join(self.package_folder, "include/file.h"), "//file")
def package_info(self):
self.cpp_info.libs = ["Lib%s"]
"""
client.save({"conanfile.py": conanfile % "A"})
client.run("create . PkgA/0.1@user/testing")
client.save({"conanfile.py": conanfile % "B"})
client.run("create . PkgB/0.1@user/testing")
conanfile = """[requires]
PkgB/0.1@user/testing
PkgA/0.1@user/testing"""
client.save({"conanfile.txt": conanfile}, clean_first=True)
client.run("install . -g txt -g cmake")
text = load(os.path.join(client.current_folder, "conanbuildinfo.txt"))
txt = ";".join(text.splitlines())
self.assertIn("[libs];LibB;LibA", txt)
cmake = load(os.path.join(client.current_folder, "conanbuildinfo.cmake"))
self.assertIn("set(CONAN_LIBS LibB LibA ${CONAN_LIBS})", cmake)
def transitive_same_name_test(self):
# https://github.com/conan-io/conan/issues/1366
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "HelloBar"
version = "0.1"
'''
test_package = '''
from conans import ConanFile
class HelloTestConan(ConanFile):
requires = "HelloBar/0.1@lasote/testing"
def test(self):
pass
'''
client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_package})
client.run("create . lasote/testing")
self.assertIn("HelloBar/0.1@lasote/testing: WARN: Forced build from source",
client.user_io.out)
client.save({"conanfile.py": conanfile.replace("HelloBar", "Hello") +
" requires='HelloBar/0.1@lasote/testing'",
"test_package/conanfile.py": test_package.replace("HelloBar", "Hello")})
client.run("create . lasote/stable")
self.assertNotIn("HelloBar/0.1@lasote/testing: WARN: Forced build from source",
client.user_io.out)
@parameterized.expand([(True, ), (False, )])
def keep_build_test(self, with_test):
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class MyPkg(ConanFile):
exports_sources = "*.h"
def source(self):
self.output.info("mysource!!")
def build(self):
self.output.info("mybuild!!")
def package(self):
self.output.info("mypackage!!")
self.copy("*.h")
""")
if with_test:
test_conanfile = textwrap.dedent("""
from conans import ConanFile
class MyPkg(ConanFile):
def test(self):
pass
""")
client.save({"conanfile.py": conanfile,
"header.h": "",
"test_package/conanfile.py": test_conanfile})
else:
client.save({"conanfile.py": conanfile,
"header.h": ""})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: mysource!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: mybuild!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: mypackage!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing package(): Packaged 1 '.h' file: header.h", client.out)
# keep the source
client.save({"conanfile.py": conanfile + " "})
client.run("create . Pkg/0.1@lasote/testing --keep-source")
self.assertIn("A new conanfile.py version was exported", client.out)
self.assertNotIn("Pkg/0.1@lasote/testing: mysource!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: mybuild!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: mypackage!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing package(): Packaged 1 '.h' file: header.h", client.out)
# keep build
client.run("create . Pkg/0.1@lasote/testing --keep-build")
self.assertIn("Pkg/0.1@lasote/testing: Won't be built as specified by --keep-build",
client.out)
self.assertNotIn("Pkg/0.1@lasote/testing: mysource!!", client.out)
self.assertNotIn("Pkg/0.1@lasote/testing: mybuild!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: mypackage!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing package(): Packaged 1 '.h' file: header.h", client.out)
# Changes in the recipe again
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@lasote/testing --keep-build")
# The source folder is removed, but not necessary, as it will reuse build
self.assertNotIn("Pkg/0.1@lasote/testing: Removing 'source' folder", client.out)
self.assertIn("Pkg/0.1@lasote/testing: Won't be built as specified by --keep-build",
client.out)
self.assertNotIn("Pkg/0.1@lasote/testing: mysource!!", client.out)
self.assertNotIn("Pkg/0.1@lasote/testing: mybuild!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: mypackage!!", client.out)
self.assertIn("Pkg/0.1@lasote/testing package(): Packaged 1 '.h' file: header.h", client.out)
def keep_build_error_test(self):
client = TestClient()
conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@lasote/testing --keep-build", assert_error=True)
self.assertIn("ERROR: --keep-build specified, but build folder not found", client.out)
def keep_build_package_folder_test(self):
"""
Package folder should be deleted always before a new conan create command, even with
--keep-build
"""
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class MyPkg(ConanFile):
exports_sources = "*.h", "*.cpp"
def package(self):
self.copy("*.h")
""")
client.save({"conanfile.py": conanfile,
"header.h": "",
"source.cpp": ""})
client.run("create . pkg/0.1@danimtb/testing")
ref = ConanFileReference("pkg", "0.1", "danimtb", "testing")
pref = PackageReference(ref, NO_SETTINGS_PACKAGE_ID)
package_files = os.listdir(client.cache.package_layout(pref.ref).package(pref))
self.assertIn("header.h", package_files)
self.assertNotIn("source.cpp", package_files)
client.save({"conanfile.py": conanfile.replace("self.copy(\"*.h\")",
"self.copy(\"*.cpp\")")})
client.run("create . pkg/0.1@danimtb/testing -kb")
package_files = os.listdir(client.cache.package_layout(pref.ref).package(pref))
self.assertNotIn("header.h", package_files)
self.assertIn("source.cpp", package_files)
def create_test(self):
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
def source(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def configure(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def requirements(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def build(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def package(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def package_info(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def system_requirements(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
self.output.info("Running system requirements!!")
"""})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Configuration:[settings]", "".join(str(client.out).splitlines()))
self.assertIn("Pkg/0.1@lasote/testing: Generating the package", client.out)
self.assertIn("Running system requirements!!", client.out)
client.run("search")
self.assertIn("Pkg/0.1@lasote/testing", client.out)
# Create with only user will raise an error because of no name/version
client.run("create conanfile.py lasote/testing", assert_error=True)
self.assertIn("ERROR: conanfile didn't specify name", client.out)
# Same with only user, (default testing)
client.run("create . lasote", assert_error=True)
self.assertIn("Invalid parameter 'lasote', specify the full reference or user/channel",
client.out)
def create_name_command_line_test(self):
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
def source(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def configure(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def requirements(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def build(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def package(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def package_info(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
def system_requirements(self):
assert(self.version=="0.1")
assert(self.name=="Pkg")
self.output.info("Running system requirements!!")
"""})
client.run("create . 0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Generating the package", client.out)
self.assertIn("Running system requirements!!", client.out)
client.run("search")
self.assertIn("Pkg/0.1@lasote/testing", client.out)
def create_werror_test(self):
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class Pkg(ConanFile):
pass
"""})
client.run("export . LibA/0.1@user/channel")
client.run("export conanfile.py LibA/0.2@user/channel")
client.save({"conanfile.py": """from conans import ConanFile
class Pkg(ConanFile):
requires = "LibA/0.1@user/channel"
"""})
client.run("export ./ LibB/0.1@user/channel")
client.save({"conanfile.py": """from conans import ConanFile
class Pkg(ConanFile):
requires = "LibA/0.2@user/channel"
"""})
client.run("export . LibC/0.1@user/channel")
client.save({"conanfile.py": """from conans import ConanFile
class Pkg(ConanFile):
requires = "LibB/0.1@user/channel", "LibC/0.1@user/channel"
"""})
client.run("create ./conanfile.py Consumer/0.1@lasote/testing", assert_error=True)
self.assertIn("ERROR: Conflict in LibC/0.1@user/channel",
client.out)
def test_error_create_name_version(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
"""
client.save({"conanfile.py": conanfile})
client.run("create . Hello/1.2@lasote/stable")
client.run("create ./ Pkg/1.2@lasote/stable", assert_error=True)
self.assertIn("ERROR: Package recipe exported with name Pkg!=Hello", client.out)
client.run("create . Hello/1.1@lasote/stable", assert_error=True)
self.assertIn("ERROR: Package recipe exported with version 1.1!=1.2", client.out)
def create_user_channel_test(self):
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
version = "0.1"
"""})
client.run("create . lasote/channel")
self.assertIn("Pkg/0.1@lasote/channel: Generating the package", client.out)
client.run("search")
self.assertIn("Pkg/0.1@lasote/channel", client.out)
client.run("create . lasote", assert_error=True) # testing default
self.assertIn("Invalid parameter 'lasote', specify the full reference or user/channel",
client.out)
def create_in_subfolder_test(self):
client = TestClient()
client.save({"subfolder/conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
version = "0.1"
"""})
client.run("create subfolder lasote/channel")
self.assertIn("Pkg/0.1@lasote/channel: Generating the package", client.out)
client.run("search")
self.assertIn("Pkg/0.1@lasote/channel", client.out)
def create_in_subfolder_with_different_name_test(self):
# Now with a different name
client = TestClient()
client.save({"subfolder/CustomConanFile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
version = "0.1"
"""})
client.run("create subfolder/CustomConanFile.py lasote/channel")
self.assertIn("Pkg/0.1@lasote/channel: Generating the package", client.out)
client.run("search")
self.assertIn("Pkg/0.1@lasote/channel", client.out)
def create_test_package_test(self):
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
version = "0.1"
""", "test_package/conanfile.py": """from conans import ConanFile
class MyTest(ConanFile):
def test(self):
self.output.info("TESTING!!!")
"""})
client.run("create . lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Generating the package", client.out)
self.assertIn("Pkg/0.1@lasote/testing (test package): TESTING!!!", client.out)
def create_skip_test_package_test(self):
"""
Skip the test package stage if explicitly disabled with --test-folder=None
"""
# https://github.com/conan-io/conan/issues/2355
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
version = "0.1"
""", "test_package/conanfile.py": """from conans import ConanFile
class MyTest(ConanFile):
def test(self):
self.output.info("TESTING!!!")
"""})
client.run("create . lasote/testing --test-folder=None")
self.assertIn("Pkg/0.1@lasote/testing: Generating the package", client.out)
self.assertNotIn("Pkg/0.1@lasote/testing (test package): TESTING!!!", client.out)
def create_test_package_requires(self):
client = TestClient()
dep_conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
pass
"""
client.save({"conanfile.py": dep_conanfile})
client.run("create . Dep/0.1@user/channel")
client.run("create . Other/1.0@user/channel")
conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
requires = "Dep/0.1@user/channel"
"""
test_conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
requires = "Other/1.0@user/channel"
def build(self):
for r in self.requires.values():
self.output.info("build() Requires: %s" % str(r.ref))
import os
for dep in self.deps_cpp_info.deps:
self.output.info("build() cpp_info dep: %s" % dep)
self.output.info("build() cpp_info: %s"
% os.path.basename(self.deps_cpp_info["Pkg"].includedirs[0]))
self.output.info("build() cpp_info: %s"
% os.path.basename(self.deps_cpp_info["Dep"].includedirs[0]))
def test(self):
pass
"""
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing (test package): build() cpp_info: include",
client.out)
self.assertIn("Pkg/0.1@lasote/testing (test package): build() "
"Requires: Other/1.0@user/channel", client.out)
self.assertIn("Pkg/0.1@lasote/testing (test package): build() "
"Requires: Pkg/0.1@lasote/testing", client.out)
self.assertIn("Pkg/0.1@lasote/testing (test package): build() cpp_info dep: Other",
client.out)
self.assertIn("Pkg/0.1@lasote/testing (test package): build() cpp_info dep: Dep",
client.out)
self.assertIn("Pkg/0.1@lasote/testing (test package): build() cpp_info dep: Pkg",
client.out)
def build_policy_test(self):
# https://github.com/conan-io/conan/issues/1956
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "HelloBar"
version = "0.1"
build_policy = "always"
'''
test_package = '''
from conans import ConanFile
class HelloTestConan(ConanFile):
requires = "HelloBar/0.1@lasote/testing"
def test(self):
pass
'''
client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_package})
client.run("create . lasote/testing")
self.assertIn("HelloBar/0.1@lasote/testing: WARN: Forced build from source",
client.out)
client.save({"conanfile.py": conanfile.replace("HelloBar", "Hello") +
" requires='HelloBar/0.1@lasote/testing'",
"test_package/conanfile.py": test_package.replace("HelloBar", "Hello")})
client.run("create . lasote/stable")
self.assertIn("HelloBar/0.1@lasote/testing: WARN: Forced build from source",
client.out)
def test_build_folder_handling_test(self):
conanfile = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
'''
test_conanfile = '''
from conans import ConanFile
class TestConanLib(ConanFile):
def test(self):
pass
'''
client = TestClient()
default_build_dir = os.path.join(client.current_folder, "test_package", "build")
# Test the default behavior.
client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_conanfile},
clean_first=True)
client.run("create . lasote/stable")
self.assertTrue(os.path.exists(default_build_dir))
# Test if the specified build folder is respected.
client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_conanfile},
clean_first=True)
client.run("create -tbf=build_folder . lasote/stable")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "build_folder")))
self.assertFalse(os.path.exists(default_build_dir))
# Test if using a temporary test folder can be enabled via the environment variable.
client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_conanfile},
clean_first=True)
with tools.environment_append({"CONAN_TEMP_TEST_FOLDER": "True"}):
client.run("create . lasote/stable")
self.assertFalse(os.path.exists(default_build_dir))
# # Test if using a temporary test folder can be enabled via the config file.
client.run('config set general.temp_test_folder=True')
client.run("create . lasote/stable")
self.assertFalse(os.path.exists(default_build_dir))
# Test if the specified build folder is respected also when the use of
# temporary test folders is enabled in the config file.
client.run("create -tbf=test_package/build_folder . lasote/stable")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "test_package",
"build_folder")))
self.assertFalse(os.path.exists(default_build_dir))
def package_folder_build_error_test(self):
"""
Check package folder is not created if the build step fails
"""
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class MyPkg(ConanFile):
def build(self):
raise ConanException("Build error")
""")
client.save({"conanfile.py": conanfile})
ref = ConanFileReference("pkg", "0.1", "danimtb", "testing")
pref = PackageReference(ref, NO_SETTINGS_PACKAGE_ID, None)
client.run("create . %s" % ref.full_repr(), assert_error=True)
self.assertIn("Build error", client.out)
package_folder = client.cache.package_layout(pref.ref).package(pref)
self.assertFalse(os.path.exists(package_folder))
|
"""
main
"""
from scr.floatingwindow import *
def main():
fw = FloatingWindow()
fw.mainloop()
if __name__ == '__main__':
main()
|
# written by junying
# 2019-04-28
def delkey(dict_,field):
for key, value in dict_.items():
if key == field: dict_.pop(field)
if isinstance(value, dict): delkey(value,field)
def rmempty(dict_):
for key, value in dict_.items():
if not value: dict_.pop(key)
if isinstance(value, dict): rmempty(value)
def isin(dic,field):
found = False
for key, value in dic.items():
if key == field: return True
if isinstance(value, dict) and isin(value,field): return True
return found
|
class GameBoard(object):
"""
The board of the game, default size is 25x25
"""
def __init__(self, width, height):
self.width = width
self.height = height
self.gameboard = []
for i in xrange(width):
self.gameboard.append([])
for j in xrange(height):
self.gameboard[i].append(None)
def set_object(self, obj, x, y):
# Add object obj to (x,y)
self.gameboard[x][y] = obj
def get_object(self, x, y):
return self.gameboard[x][y]
def get_width(self):
return self.width
def get_height(self):
return self.height
def move_object(self, old_x, old_y, new_x, new_y):
# Move object from (old_x,old_y) to (new_x,new_y)
self.gameboard[new_x][new_y] = self.gameboard[old_x][old_y]
self.gameboard[old_x][old_y] = None
def del_object(self, x, y):
#Delete whatever object in coordinate (x,y)
self.gameboard[x][y] = None
|
from datetime import datetime
from torch import load, no_grad
from painting_mode.utils import *
class GNST:
'''
Inference stage for
Generative Neural Style Transfer with CycleGAN
'''
def __init__(self, content_path):
# Start the clock
self.time_started = self._start_clock()
# Prepare images
self.content = prep(content_path)
# Load network in evaluation mode
self.model = load('painting_mode/cyclegan_van_gogh.pth', map_location='cpu')
def _start_clock(self):
return datetime.now()
def _stop_clock(self):
# Return inference time in seconds
time_passed = datetime.now() - self.time_started
return f'{time_passed.seconds}.{str(time_passed.microseconds)[:2]}'
def transfer_style(self):
# Forward pass for inference
with no_grad():
output = self.model(self.content)
# Post-process output image
output_image = post(output)
# Stop the clock
time_passed = self._stop_clock()
# Return inference result and inference time in seconds
return output_image, time_passed
|
from django.urls import path
from django.views.generic import TemplateView
app_name='menu'
urlpatterns = [
path('', TemplateView.as_view(template_name='menu/main_menu.html'), name='main'),
path('page1', TemplateView.as_view(template_name='menu/main_menu.html'), name='page1'),
path('page2', TemplateView.as_view(template_name='menu/main_menu.html'), name='page2'),
path('page3', TemplateView.as_view(template_name='menu/main_menu.html'), name='page3'),
]
|
from proxy.system_handler import system_handler
from proxy.data import data_loader
from proxy.Decklist import Decklist
import os
import test
import logging
##Logging configuration
logger = logging.getLogger()
logger.setLevel(level = logging.INFO)
stream = logging.StreamHandler()
streamformat = logging.Formatter("[%(asctime)s]: [%(levelname)s]: [%(module)s]: %(message)s")
stream.setFormatter(streamformat)
stream.setLevel(logging.INFO)
logger.addHandler(stream)
def run():
#1. Get decklist filepath
logger.info("Started Application")
decklist_file = system_handler.choose_file()
logger.info(f"Loaded desklist: %s", decklist_file)
#2. Load decklist contents
decklist = Decklist(decklist_file)
decklist.populate_attributes()
#3. Create directory structure
system_handler.setup_dir(decklist.file_dir)
logger.info(f"Made subdirecotires: %s, and %s", decklist.file_dir, decklist.latex_dir)
#4. Download png images from decklist_dict and place in appropriate directory (imageDirectory)
data_loader.download_pngFiles(decklist.file_dir, decklist.card_info)
logger.info(f"Downloaded all the .png files to: %s", decklist.file_dir)
#5. Build laTEX document
decklist.make_latex()
logger.info(f"Created the laTEX string")
system_handler.make_file(decklist.latex_dir, decklist.latex_docName, decklist.latex_docString)
logger.info(f"Made laTEX file")
#6. Compile laTEX document
system_handler.compile_latex(decklist.latex_file, decklist.file_dir)
#7. Clean up
system_handler.clean_up(decklist.file_dir)
logger.info(f"Deleted subdirecotires:")
logger.info("Finished Application")
|
#thomas feiring model
import math
import numpy as np
import matplotlib.pyplot as plt
month=11#int(input("enter the month number: "))-1
year=2017#int(input("enter year"))
X_t= 6401#int(input("inflow data: "))
number_of_months=20*12#int(input("Enter the number of months for which you want predictions"))
#mean
u=[5563.75, 5626.2, 7415.4, 10994.6, 21938.45, 32920.2, 45904.45, 48152.7, 27123.6, 12069.42, 7652.368, 6191.368]
month_name=['Jan','Feb','Mar','Apr','May','June','July','Aug','Sep','Oct','Nov','Dec']
#standard deviation
sd=[766.457, 922.113, 1793.752, 2550.227, 5808.079, 8193.273, 8201.919, 8091.542, 5783.090, 1897.098, 991.837, 653.419]
print("Month,Year,Inflow")
np.random.seed(9001)
#lag -1 correlation
lag=[0.227655, 0.551696, 0.401201485, 0.605124717, 0.491461791, 0.410272397, 0.399201027, 0.389443329, 0.472289721, 0.700926754, 0.85389162, 0.742986236]
entry=[]
for i in range(number_of_months):
rn=np.random.normal(0,1,1)[0]
z_t=(X_t-u[month])/sd[month]
z_t1=lag[month]*z_t+rn*math.sqrt(1-lag[month]*lag[month])
X_t1=u[(month+1)%12]+z_t1*sd[(month+1)%12]
if(month==11):
year=year+1
month=(month+1)%12
#print(month_name[month],",",year,",",X_t1)
X_t=X_t1
entry.append({'month':month_name[month],'year':year,'inflow':X_t})
month_dict={}
for items in entry:
if items['month'] in month_dict:
month_dict[items['month']]['year'].append(items['year'])
month_dict[items['month']]['inflow'].append(items['inflow'])
else:
month_dict[items['month']]={'year':[items['year']],'inflow':[items['inflow']]}
print(month_dict)
plt.style.use('seaborn')
# create a color palette
palette = plt.get_cmap('Set1')
fig = plt.figure()
# multiple line plot
num=0
for i in range(len(month_name)):
plt.plot(month_dict[month_name[i]]['year'],month_dict[month_name[i]]['inflow'],color=palette(num), marker='', label=str(month_name[i]))
#ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
num+=1
# Add legend
plt.legend(loc=2, ncol=2)
# Add titles
plt.title("Month wise thomas fiering model prediction for 20 years", loc='left', fontsize=12, fontweight=0, color='orange')
plt.xlabel("Time in Years",fontsize=12)
plt.ylabel("Inflow",fontsize=12)
plt.show()
|
VERSION = "1.38"
|
game_time = input().split(" ")
initial_hour = int(game_time[0])
initial_min = int(game_time[1])
final_hour = int(game_time[2])
final_min = int(game_time[3])
if (initial_hour < final_hour): # First case: HI > HF
total_hour = (final_hour - initial_hour)
if (initial_min <= final_min):
total_min = (final_min - initial_min)
else:
total_min = (60 - initial_min) + final_min
total_hour = total_hour - 1
elif(initial_hour == final_hour): # Second case: HI == HF
total_hour = (24 - initial_hour) + final_hour
if (initial_min < final_min):
total_min = (final_min - initial_min)
total_hour = initial_hour - final_hour
if(initial_min == final_min):
total_hour = final_hour - initial_hour
else:
total_min = (60 - initial_min) + final_min
total_hour = total_hour - 1
else: # Third case: HI > HF
total_hour = (24 - initial_hour) + final_hour
if (initial_min <= final_min):
total_min = (final_min - initial_min)
else:
total_min = (60 - initial_min) + final_min
total_hour = total_hour - 1
if(total_min == 60): # checking if total minutes is eguals or bigger than 60
total_min = 0
total_hour = total_hour + 1
print("O JOGO DUROU {0} HORA(S) E {1} MINUTO(S)".format(total_hour,total_min))
|
#!/usr/bin/env python3
"""A cross-platform script to build cohorts, run models, build and
start a notebook, open a web browser on the correct port, and handle
shutdowns gracefully
"""
import cohortextractor
import glob
import importlib
import os
import re
import requests
import shutil
import sys
import base64
from io import BytesIO
from argparse import ArgumentParser
from matplotlib import pyplot as plt
import numpy as np
from pandas.api.types import is_categorical_dtype
from pandas.api.types import is_bool_dtype
from pandas.api.types import is_datetime64_dtype
from pandas.api.types import is_numeric_dtype
import yaml
from datetime import datetime
import seaborn as sns
from cohortextractor.remotejobs import get_job_logs
from cohortextractor.remotejobs import submit_job
notebook_tag = "opencorona-research"
target_dir = "/home/app/notebook"
def relative_dir():
return os.getcwd()
def make_chart(name, series, dtype):
FLOOR_DATE = datetime(1960, 1, 1)
CEILING_DATE = datetime.today()
img = BytesIO()
# Setting figure sizes in seaborn is a bit weird:
# https://stackoverflow.com/a/23973562/559140
if is_categorical_dtype(dtype):
sns.set_style("ticks")
sns.catplot(
x=name, data=series.to_frame(), kind="count", height=3, aspect=3 / 2
)
plt.xticks(rotation=45)
elif is_bool_dtype(dtype):
sns.set_style("ticks")
sns.catplot(x=name, data=series.to_frame(), kind="count", height=2, aspect=1)
plt.xticks(rotation=45)
elif is_datetime64_dtype(dtype):
# Early dates are dummy values; I don't know what late dates
# are but presumably just dud data
series = series[(series > FLOOR_DATE) & (series <= CEILING_DATE)]
# Set bin numbers appropriate to the time window
delta = series.max() - series.min()
if delta.days <= 31:
bins = delta.days
elif delta.days <= 365 * 10:
bins = delta.days / 31
else:
bins = delta.days / 365
if bins < 1:
bins = 1
fig = plt.figure(figsize=(5, 2))
ax = fig.add_subplot(111)
series.hist(bins=int(bins), ax=ax)
plt.xticks(rotation=45, ha="right")
elif is_numeric_dtype(dtype):
# Trim percentiles and negatives which are usually bad data
series = series.fillna(0)
series = series[
(series < np.percentile(series, 95))
& (series > np.percentile(series, 5))
& (series > 0)
]
fig = plt.figure(figsize=(5, 2))
ax = fig.add_subplot(111)
sns.distplot(series, kde=False, ax=ax)
plt.xticks(rotation=45)
else:
raise ValueError()
plt.savefig(img, transparent=True, bbox_inches="tight")
img.seek(0)
plt.close()
return base64.b64encode(img.read()).decode("UTF-8")
def preflight_generation_check():
"""Raise an informative error if things are not as they should be
"""
missing_paths = []
required_paths = ["codelists/", "analysis/"]
for p in required_paths:
if not os.path.exists(p):
missing_paths.append(p)
if missing_paths:
msg = "This command expects the following relative paths to exist: {}"
raise RuntimeError(msg.format(", ".join(missing_paths)))
def generate_cohort(output_dir, expectations_population):
preflight_generation_check()
for study_name, suffix in list_study_definitions():
print(f"Generating cohort for {study_name}...")
_generate_cohort(output_dir, study_name, suffix, expectations_population)
def _generate_cohort(output_dir, study_name, suffix, expectations_population):
print("Running. Please wait...")
study = load_study_definition(study_name)
with_sqlcmd = shutil.which("sqlcmd") is not None
os.makedirs(output_dir, exist_ok=True)
study.to_csv(
f"{output_dir}/input{suffix}.csv",
expectations_population=expectations_population,
with_sqlcmd=with_sqlcmd,
)
print(
f"Successfully created cohort and covariates at {output_dir}/input{suffix}.csv"
)
def make_cohort_report(input_dir, output_dir):
for study_name, suffix in list_study_definitions():
_make_cohort_report(input_dir, output_dir, study_name, suffix)
def _make_cohort_report(input_dir, output_dir, study_name, suffix):
study = load_study_definition(study_name)
df = study.csv_to_df(f"{input_dir}/input{suffix}.csv")
descriptives = df.describe(include="all")
for name, dtype in zip(df.columns, df.dtypes):
if name == "patient_id":
continue
main_chart = '<div><img src="data:image/png;base64,{}"/></div>'.format(
make_chart(name, df[name], dtype)
)
empty_values_chart = ""
if is_datetime64_dtype(dtype):
# also do a null / not null plot
empty_values_chart = '<div><img src="data:image/png;base64,{}"/></div>'.format(
make_chart(name, df[name].isnull(), bool)
)
elif is_numeric_dtype(dtype):
# also do a null / not null plot
empty_values_chart = '<div><img src="data:image/png;base64,{}"/></div>'.format(
make_chart(name, df[name] > 0, bool)
)
descriptives.loc["values", name] = main_chart
descriptives.loc["nulls", name] = empty_values_chart
with open(f"{output_dir}/descriptives{suffix}.html", "w") as f:
f.write(
"""<html>
<head>
<style>
table {
text-align: left;
position: relative;
border-collapse: collapse;
}
td, th {
padding: 8px;
margin: 2px;
}
td {
border-left: solid 1px black;
}
tr:nth-child(even) {background: #EEE}
tr:nth-child(odd) {background: #FFF}
tbody th:first-child {
position: sticky;
left: 0px;
background: #fff;
}
</style>
</head>
<body>"""
)
f.write(descriptives.to_html(escape=False, na_rep="", justify="left", border=0))
f.write("</body></html>")
print(f"Created cohort report at {output_dir}/descriptives{suffix}.html")
def update_codelists():
base_path = os.path.join(os.getcwd(), "codelists")
# delete all existing codelists
for path in glob.glob(os.path.join(base_path, "*.csv")):
os.unlink(path)
with open(os.path.join(base_path, "codelists.txt")) as f:
for line in f:
line = line.strip()
if not line:
continue
print(line)
project_id, codelist_id, version = line.split("/")
url = f"https://codelists.opensafely.org/codelist/{project_id}/{codelist_id}/{version}/download.csv"
rsp = requests.get(url)
rsp.raise_for_status()
with open(
os.path.join(base_path, f"{project_id}-{codelist_id}.csv"), "w"
) as f:
f.write(rsp.text)
def dump_cohort_sql(study_definition):
study = load_study_definition(study_definition)
print(study.to_sql())
def dump_study_yaml(study_definition):
study = load_study_definition(study_definition)
print(yaml.dump(study.to_data()))
def load_study_definition(name):
sys.path.extend([relative_dir(), os.path.join(relative_dir(), "analysis")])
# Avoid creating __pycache__ files in the analysis directory
sys.dont_write_bytecode = True
return importlib.import_module(name).study
def list_study_definitions():
pattern = re.compile(r"^(study_definition(_\w+)?)\.py$")
matches = []
for name in sorted(os.listdir(os.path.join(relative_dir(), "analysis"))):
match = pattern.match(name)
if match:
name = match.group(1)
suffix = match.group(2) or ""
matches.append((name, suffix))
if not matches:
raise RuntimeError(f"No study definitions found in {relative_dir()}")
return matches
def main():
parser = ArgumentParser(
description="Generate cohorts and run models in openSAFELY framework. "
)
# Cohort parser options
parser.add_argument("--version", help="Display version", action="store_true")
subparsers = parser.add_subparsers(help="sub-command help")
generate_cohort_parser = subparsers.add_parser(
"generate_cohort", help="Generate cohort"
)
generate_cohort_parser.set_defaults(which="generate_cohort")
cohort_report_parser = subparsers.add_parser(
"cohort_report", help="Generate cohort report"
)
cohort_report_parser.set_defaults(which="cohort_report")
cohort_report_parser.add_argument(
"--input-dir",
help="Location to look for input CSVs",
type=str,
default="analysis",
)
cohort_report_parser.add_argument(
"--output-dir",
help="Location to store output CSVs",
type=str,
default="output",
)
run_notebook_parser = subparsers.add_parser("notebook", help="Run notebook")
run_notebook_parser.set_defaults(which="notebook")
update_codelists_parser = subparsers.add_parser(
"update_codelists",
help="Update codelists, using specification at codelists/codelists.txt",
)
update_codelists_parser.set_defaults(which="update_codelists")
dump_cohort_sql_parser = subparsers.add_parser(
"dump_cohort_sql", help="Show SQL to generate cohort"
)
dump_cohort_sql_parser.add_argument(
"--study-definition", help="Study definition name", type=str, required=True
)
dump_cohort_sql_parser.set_defaults(which="dump_cohort_sql")
dump_study_yaml_parser = subparsers.add_parser(
"dump_study_yaml", help="Show study definition as YAML"
)
dump_study_yaml_parser.set_defaults(which="dump_study_yaml")
dump_study_yaml_parser.add_argument(
"--study-definition", help="Study definition name", type=str, required=True
)
remote_parser = subparsers.add_parser("remote", help="Manage remote jobs")
remote_parser.set_defaults(which="remote")
# Remote subcommands
remote_subparser = remote_parser.add_subparsers(help="Remote sub-command help")
generate_cohort_remote_parser = remote_subparser.add_parser(
"generate_cohort", help="Generate cohort"
)
generate_cohort_remote_parser.set_defaults(which="remote_generate_cohort")
generate_cohort_remote_parser.add_argument(
"--ref",
help="Tag or branch against which to run the extraction",
type=str,
required=True,
)
generate_cohort_remote_parser.add_argument(
"--repo",
help="Tag or branch against which to run the extraction (leave blank for current repo)",
type=str,
)
generate_cohort_remote_parser.add_argument(
"--db",
help="Database to run against",
choices=["full", "slice", "dummy"],
nargs="?",
const="full",
default="full",
type=str,
)
generate_cohort_remote_parser.add_argument(
"--backend",
help="Backend to run against",
choices=["all", "tpp"],
nargs="?",
const="all",
default="all",
type=str,
)
log_remote_parser = remote_subparser.add_parser("log", help="Show logs")
log_remote_parser.set_defaults(which="remote_log")
# Cohort parser options
generate_cohort_parser.add_argument(
"--output-dir",
help="Location to store output CSVs",
type=str,
default="output",
)
cohort_method_group = generate_cohort_parser.add_mutually_exclusive_group(
required=True
)
cohort_method_group.add_argument(
"--expectations-population",
help="Generate a dataframe from study expectations",
type=int,
default=0,
)
cohort_method_group.add_argument(
"--database-url",
help="Database URL to query",
type=str,
default=os.environ.get("DATABASE_URL", ""),
)
options = parser.parse_args()
if options.version:
print(f"v{cohortextractor.__version__}")
elif not hasattr(options, "which"):
parser.print_help()
elif options.which == "generate_cohort":
os.environ["DATABASE_URL"] = options.database_url
generate_cohort(options.output_dir, options.expectations_population)
elif options.which == "cohort_report":
make_cohort_report(options.input_dir, options.output_dir)
elif options.which == "update_codelists":
update_codelists()
print("Codelists updated. Don't forget to commit them to the repo")
elif options.which == "dump_cohort_sql":
dump_cohort_sql(options.study_definition)
elif options.which == "dump_study_yaml":
dump_study_yaml(options.study_definition)
elif options.which == "remote_generate_cohort":
submit_job(
options.backend, options.db, options.ref, "generate_cohort", options.repo
)
print("Job submitted!")
elif options.which == "remote_log":
logs = get_job_logs()
print("\n".join(logs))
if __name__ == "__main__":
main()
|
"""
oauthlib.oauth2.rfc6749.endpoint.metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the `OAuth 2.0 Authorization Server Metadata`.
.. _`OAuth 2.0 Authorization Server Metadata`: https://tools.ietf.org/html/rfc8414
"""
import copy
import json
import logging
from .. import grant_types
from .authorization import AuthorizationEndpoint
from .base import BaseEndpoint, catch_errors_and_unavailability
from .introspect import IntrospectEndpoint
from .revocation import RevocationEndpoint
from .token import TokenEndpoint
log = logging.getLogger(__name__)
class MetadataEndpoint(BaseEndpoint):
"""OAuth2.0 Authorization Server Metadata endpoint.
This specification generalizes the metadata format defined by
`OpenID Connect Discovery 1.0` in a way that is compatible
with OpenID Connect Discovery while being applicable to a wider set
of OAuth 2.0 use cases. This is intentionally parallel to the way
that OAuth 2.0 Dynamic Client Registration Protocol [`RFC7591`_]
generalized the dynamic client registration mechanisms defined by
OpenID Connect Dynamic Client Registration 1.0
in a way that is compatible with it.
.. _`OpenID Connect Discovery 1.0`: https://openid.net/specs/openid-connect-discovery-1_0.html
.. _`RFC7591`: https://tools.ietf.org/html/rfc7591
"""
def __init__(self, endpoints, claims={}, raise_errors=True):
assert isinstance(claims, dict)
for endpoint in endpoints:
assert isinstance(endpoint, BaseEndpoint)
BaseEndpoint.__init__(self)
self.raise_errors = raise_errors
self.endpoints = endpoints
self.initial_claims = claims
self.claims = self.validate_metadata_server()
@catch_errors_and_unavailability
def create_metadata_response(self, uri, http_method='GET', body=None,
headers=None):
"""Create metadata response
"""
headers = {
'Content-Type': 'application/json'
}
return headers, json.dumps(self.claims), 200
def validate_metadata(self, array, key, is_required=False, is_list=False, is_url=False, is_issuer=False):
if not self.raise_errors:
return
if key not in array:
if is_required:
raise ValueError("key {} is a mandatory metadata.".format(key))
elif is_issuer:
if not array[key].startswith("https"):
raise ValueError("key {}: {} must be an HTTPS URL".format(key, array[key]))
if "?" in array[key] or "&" in array[key] or "#" in array[key]:
raise ValueError("key {}: {} must not contain query or fragment components".format(key, array[key]))
elif is_url:
if not array[key].startswith("http"):
raise ValueError("key {}: {} must be an URL".format(key, array[key]))
elif is_list:
if not isinstance(array[key], list):
raise ValueError("key {}: {} must be an Array".format(key, array[key]))
for elem in array[key]:
if not isinstance(elem, str):
raise ValueError("array {}: {} must contains only string (not {})".format(key, array[key], elem))
def validate_metadata_token(self, claims, endpoint):
"""
If the token endpoint is used in the grant type, the value of this
parameter MUST be the same as the value of the "grant_type"
parameter passed to the token endpoint defined in the grant type
definition.
"""
self._grant_types.extend(endpoint._grant_types.keys())
claims.setdefault("token_endpoint_auth_methods_supported", ["client_secret_post", "client_secret_basic"])
self.validate_metadata(claims, "token_endpoint_auth_methods_supported", is_list=True)
self.validate_metadata(claims, "token_endpoint_auth_signing_alg_values_supported", is_list=True)
self.validate_metadata(claims, "token_endpoint", is_required=True, is_url=True)
def validate_metadata_authorization(self, claims, endpoint):
claims.setdefault("response_types_supported",
list(filter(lambda x: x != "none", endpoint._response_types.keys())))
claims.setdefault("response_modes_supported", ["query", "fragment"])
# The OAuth2.0 Implicit flow is defined as a "grant type" but it is not
# using the "token" endpoint, as such, we have to add it explicitly to
# the list of "grant_types_supported" when enabled.
if "token" in claims["response_types_supported"]:
self._grant_types.append("implicit")
self.validate_metadata(claims, "response_types_supported", is_required=True, is_list=True)
self.validate_metadata(claims, "response_modes_supported", is_list=True)
if "code" in claims["response_types_supported"]:
code_grant = endpoint._response_types["code"]
if not isinstance(code_grant, grant_types.AuthorizationCodeGrant) and hasattr(code_grant, "default_grant"):
code_grant = code_grant.default_grant
claims.setdefault("code_challenge_methods_supported",
list(code_grant._code_challenge_methods.keys()))
self.validate_metadata(claims, "code_challenge_methods_supported", is_list=True)
self.validate_metadata(claims, "authorization_endpoint", is_required=True, is_url=True)
def validate_metadata_revocation(self, claims, endpoint):
claims.setdefault("revocation_endpoint_auth_methods_supported",
["client_secret_post", "client_secret_basic"])
self.validate_metadata(claims, "revocation_endpoint_auth_methods_supported", is_list=True)
self.validate_metadata(claims, "revocation_endpoint_auth_signing_alg_values_supported", is_list=True)
self.validate_metadata(claims, "revocation_endpoint", is_required=True, is_url=True)
def validate_metadata_introspection(self, claims, endpoint):
claims.setdefault("introspection_endpoint_auth_methods_supported",
["client_secret_post", "client_secret_basic"])
self.validate_metadata(claims, "introspection_endpoint_auth_methods_supported", is_list=True)
self.validate_metadata(claims, "introspection_endpoint_auth_signing_alg_values_supported", is_list=True)
self.validate_metadata(claims, "introspection_endpoint", is_required=True, is_url=True)
def validate_metadata_server(self):
"""
Authorization servers can have metadata describing their
configuration. The following authorization server metadata values
are used by this specification. More details can be found in
`RFC8414 section 2`_ :
issuer
REQUIRED
authorization_endpoint
URL of the authorization server's authorization endpoint
[`RFC6749#Authorization`_]. This is REQUIRED unless no grant types are supported
that use the authorization endpoint.
token_endpoint
URL of the authorization server's token endpoint [`RFC6749#Token`_]. This
is REQUIRED unless only the implicit grant type is supported.
scopes_supported
RECOMMENDED.
response_types_supported
REQUIRED.
Other OPTIONAL fields:
jwks_uri,
registration_endpoint,
response_modes_supported
grant_types_supported
OPTIONAL. JSON array containing a list of the OAuth 2.0 grant
type values that this authorization server supports. The array
values used are the same as those used with the "grant_types"
parameter defined by "OAuth 2.0 Dynamic Client Registration
Protocol" [`RFC7591`_]. If omitted, the default value is
"["authorization_code", "implicit"]".
token_endpoint_auth_methods_supported
token_endpoint_auth_signing_alg_values_supported
service_documentation
ui_locales_supported
op_policy_uri
op_tos_uri
revocation_endpoint
revocation_endpoint_auth_methods_supported
revocation_endpoint_auth_signing_alg_values_supported
introspection_endpoint
introspection_endpoint_auth_methods_supported
introspection_endpoint_auth_signing_alg_values_supported
code_challenge_methods_supported
Additional authorization server metadata parameters MAY also be used.
Some are defined by other specifications, such as OpenID Connect
Discovery 1.0 [`OpenID.Discovery`_].
.. _`RFC8414 section 2`: https://tools.ietf.org/html/rfc8414#section-2
.. _`RFC6749#Authorization`: https://tools.ietf.org/html/rfc6749#section-3.1
.. _`RFC6749#Token`: https://tools.ietf.org/html/rfc6749#section-3.2
.. _`RFC7591`: https://tools.ietf.org/html/rfc7591
.. _`OpenID.Discovery`: https://openid.net/specs/openid-connect-discovery-1_0.html
"""
claims = copy.deepcopy(self.initial_claims)
self.validate_metadata(claims, "issuer", is_required=True, is_issuer=True)
self.validate_metadata(claims, "jwks_uri", is_url=True)
self.validate_metadata(claims, "scopes_supported", is_list=True)
self.validate_metadata(claims, "service_documentation", is_url=True)
self.validate_metadata(claims, "ui_locales_supported", is_list=True)
self.validate_metadata(claims, "op_policy_uri", is_url=True)
self.validate_metadata(claims, "op_tos_uri", is_url=True)
self._grant_types = []
for endpoint in self.endpoints:
if isinstance(endpoint, TokenEndpoint):
self.validate_metadata_token(claims, endpoint)
if isinstance(endpoint, AuthorizationEndpoint):
self.validate_metadata_authorization(claims, endpoint)
if isinstance(endpoint, RevocationEndpoint):
self.validate_metadata_revocation(claims, endpoint)
if isinstance(endpoint, IntrospectEndpoint):
self.validate_metadata_introspection(claims, endpoint)
# "grant_types_supported" is a combination of all OAuth2 grant types
# allowed in the current provider implementation.
claims.setdefault("grant_types_supported", self._grant_types)
self.validate_metadata(claims, "grant_types_supported", is_list=True)
return claims
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.