content
stringlengths 5
1.05M
|
|---|
# response status for client use
# if value is tuple, the second value will be returned as the error message
# database
db_add_err = 'db_add_err', 'Database Add Error'
db_delete_err = 'db_delete_err', 'Database Delete Error'
db_update_err = 'db_update_err', 'Database Update Error'
db_query_err = 'db_query_err', 'Database Query Error'
db_data_not_found = 'db_data_not_found', 'Data Not Found'
db_data_already_exist = 'db_data_already_exist', 'Data Already Exists'
db_data_in_use = 'db_data_in_use', 'Data In Use'
# remote request
api_request_err = "api_req_err", 'Remote api request error'
# error
uri_unauthorized = 'uri_unauthorized', 'Unauthorized' # 401 Unauthorized, need login
uri_forbidden = 'uri_forbidden', 'Forbidden' # 403 Forbidden, need permission
uri_not_found = 'uri_not_found', 'Not Found' # 404 Not Found
method_not_allowed = 'method_not_allowed', 'Method Not Allowed' # 405 HTTP method is not supported
internal_server_error = 'internal_server_error', 'Internal Server Error' # 500
bad_request = 'bad_request', 'Bad Request' # 400 Bad Request, payload error
# account
account_not_found = 'account_not_found', 'No Account Found'
account_disabled = 'account_disabled', "Account Disabled"
account_verify_err = 'account_verify_err', 'Wrong Password'
# others
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for adding results to perf dashboard."""
import calendar
import datetime
import httplib
import json
import os
import urllib
import urllib2
from slave import slave_utils
# The paths in the results dashboard URLs for sending and viewing results.
SEND_RESULTS_PATH = '/add_point'
RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
# CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache
# results which need to be retried.
CACHE_DIR = 'results_dashboard'
CACHE_FILENAME = 'results_to_retry'
def SendResults(data, url, build_dir):
"""Sends results to the Chrome Performance Dashboard.
This function tries to send the given data to the dashboard, in addition to
any data from the cache file. The cache file contains any data that wasn't
successfully sent in a previous run.
Args:
data: The data to try to send. Must be JSON-serializable.
url: Performance Dashboard URL (including schema).
build_dir: Directory name, where the cache directory shall be.
"""
results_json = json.dumps(data)
# Write the new request line to the cache file, which contains all lines
# that we shall try to send now.
cache_file_name = _GetCacheFileName(build_dir)
_AddLineToCacheFile(results_json, cache_file_name)
# Send all the results from this run and the previous cache to the dashboard.
fatal_error, errors = _SendResultsFromCache(cache_file_name, url)
# Print out a Buildbot link annotation.
link_annotation = _LinkAnnotation(url, data)
if link_annotation:
print link_annotation
# Print any errors; if there was a fatal error, it should be an exception.
for error in errors:
print error
if fatal_error:
print 'Error uploading to dashboard.'
print '@@@STEP_EXCEPTION@@@'
return False
return True
def _GetCacheFileName(build_dir):
"""Gets the cache filename, creating the file if it does not exist."""
cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_filename = os.path.join(cache_dir, CACHE_FILENAME)
if not os.path.exists(cache_filename):
# Create the file.
open(cache_filename, 'wb').close()
return cache_filename
def _AddLineToCacheFile(line, cache_file_name):
"""Appends a line to the given file."""
with open(cache_file_name, 'ab') as cache:
cache.write('\n' + line)
def _SendResultsFromCache(cache_file_name, url):
"""Tries to send each line from the cache file in a separate request.
This also writes data which failed to send back to the cache file.
Args:
cache_file_name: A file name.
Returns:
A pair (fatal_error, errors), where fatal_error is a boolean indicating
whether there there was a major error and the step should fail, and errors
is a list of error strings.
"""
with open(cache_file_name, 'rb') as cache:
cache_lines = cache.readlines()
total_results = len(cache_lines)
fatal_error = False
errors = []
lines_to_retry = []
for index, line in enumerate(cache_lines):
line = line.strip()
if not line:
continue
print 'Sending result %d of %d to dashboard.' % (index + 1, total_results)
# TODO(rnephew): Get rid of this when crbug.com/525214 is resolved.
print line
# Check that the line that was read from the file is valid JSON. If not,
# don't try to send it, and don't re-try it later; just print an error.
if not _CanParseJSON(line):
errors.append('Could not parse JSON: %s' % line)
continue
error = _SendResultsJson(url, line)
# If the dashboard returned an error, we will re-try next time.
if error:
if 'HTTPError: 400' in error:
# If the remote app rejects the JSON, it's probably malformed,
# so we don't want to retry it.
print 'Discarding JSON, error:\n%s' % error
fatal_error = True
break
if index != len(cache_lines) - 1:
# The very last item in the cache_lines list is the new results line.
# If this line is not the new results line, then this results line
# has already been tried before; now it's considered fatal.
fatal_error = True
# The lines to retry are all lines starting from the current one.
lines_to_retry = [l.strip() for l in cache_lines[index:] if l.strip()]
errors.append(error)
break
# Write any failing requests to the cache file.
cache = open(cache_file_name, 'wb')
cache.write('\n'.join(set(lines_to_retry)))
cache.close()
return fatal_error, errors
def _CanParseJSON(my_json):
"""Returns True if the input can be parsed as JSON, False otherwise."""
try:
json.loads(my_json)
except ValueError:
return False
return True
def MakeListOfPoints(charts, bot, test_name, buildername,
buildnumber, supplemental_columns):
"""Constructs a list of point dictionaries to send.
The format output by this function is the original format for sending data
to the perf dashboard.
Args:
charts: A dictionary of chart names to chart data, as generated by the
log processor classes (see process_log_utils.GraphingLogProcessor).
bot: A string which comes from perf_id, e.g. linux-release.
test_name: A test suite name, e.g. sunspider.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_columns: A dictionary of extra data to send with a point.
Returns:
A list of dictionaries in the format accepted by the perf dashboard.
Each dictionary has the keys "master", "bot", "test", "value", "revision".
The full details of this format are described at http://goo.gl/TcJliv.
"""
results = []
# The master name used for the dashboard is the CamelCase name returned by
# GetActiveMaster(), and not the canonical master name with dots.
master = slave_utils.GetActiveMaster()
for chart_name, chart_data in sorted(charts.items()):
point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_')
for trace_name, trace_values in sorted(chart_data['traces'].items()):
is_important = trace_name in chart_data.get('important', [])
test_path = _TestPath(test_name, chart_name, trace_name)
result = {
'master': master,
'bot': bot,
'test': test_path,
'revision': point_id,
'supplemental_columns': {}
}
# Add the supplemental_columns values that were passed in after the
# calculated revision column values so that these can be overwritten.
result['supplemental_columns'].update(revision_columns)
result['supplemental_columns'].update(
_GetStdioUriColumn(test_name, buildername, buildnumber))
result['supplemental_columns'].update(supplemental_columns)
result['value'] = trace_values[0]
result['error'] = trace_values[1]
# Add other properties to this result dictionary if available.
if chart_data.get('units'):
result['units'] = chart_data['units']
if is_important:
result['important'] = True
results.append(result)
return results
def MakeDashboardJsonV1(chart_json, revision_dict, test_name, bot, buildername,
buildnumber, supplemental_dict, is_ref):
"""Generates Dashboard JSON in the new Telemetry format.
See http://goo.gl/mDZHPl for more info on the format.
Args:
chart_json: A dict containing the telmetry output.
revision_dict: Dictionary of revisions to include, include "rev",
which determines the point ID.
test_name: A test suite name, e.g. sunspider.
bot: A string which comes from perf_id, e.g. linux-release.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_dict: A dictionary of extra data to send with a point;
this includes revisions and annotation data.
is_ref: True if this is a reference build, False otherwise.
Returns:
A dictionary in the format accepted by the perf dashboard.
"""
if not chart_json:
print 'Error: No json output from telemetry.'
print '@@@STEP_FAILURE@@@'
# The master name used for the dashboard is the CamelCase name returned by
# GetActiveMaster(), and not the canonical master name with dots.
master = slave_utils.GetActiveMaster()
point_id, versions = _RevisionNumberColumns(revision_dict, prefix='')
supplemental = {}
for key in supplemental_dict:
if key.startswith('r_'):
versions[key.replace('r_', '', 1)] = supplemental_dict[key]
if key.startswith('a_'):
supplemental[key.replace('a_', '', 1)] = supplemental_dict[key]
supplemental.update(
_GetStdioUriColumn(test_name, buildername, buildnumber))
fields = {
'master': master,
'bot': bot,
'test_suite_name': test_name,
'point_id': point_id,
'supplemental': supplemental,
'versions': versions,
'chart_data': chart_json,
'is_ref': is_ref,
}
return fields
def _GetStdioUriColumn(test_name, buildername, buildnumber):
"""Gets a supplemental column containing buildbot stdio link."""
if not buildername or not buildnumber:
return {}
url = '%sbuilders/%s/builds/%s/steps/%s/logs/stdio' % (
_GetBuildBotUrl(),
urllib.quote(buildername),
urllib.quote(str(buildnumber)),
urllib.quote(test_name))
return _CreateLinkColumn('stdio_uri', 'Buildbot stdio', url)
def _CreateLinkColumn(name, label, url):
"""Returns a column containing markdown link to show on dashboard."""
return {'a_' + name: '[%s](%s)' % (label, url)}
def _GetBuildBotUrl():
"""Gets the buildbot URL which contains hostname and master name."""
return os.environ.get('BUILDBOT_BUILDBOTURL',
'http://build.chromium.org/p/chromium/')
def _GetTimestamp():
"""Get the Unix timestamp for the current time."""
return int(calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
def _RevisionNumberColumns(data, prefix):
"""Get the revision number and revision-related columns from the given data.
Args:
data: A dict of information from one line of the log file.
master: The name of the buildbot master.
prefix: Prefix for revision type keys. 'r_' for non-telemetry json, '' for
telemetry json.
Returns:
A tuple with the point id (which must be an int), and a dict of
revision-related columns.
"""
revision_supplemental_columns = {}
# The dashboard requires points' x-values to be integers, and points are
# ordered by these x-values. If data['rev'] can't be parsed as an int, assume
# that it's a git commit hash and use timestamp as the x-value.
try:
revision = int(data['rev'])
if revision and revision > 300000 and revision < 1000000:
# Revision is the commit pos.
# TODO(sullivan,qyearsley): use got_revision_cp when available.
revision_supplemental_columns[prefix + 'commit_pos'] = revision
except ValueError:
# The dashboard requires ordered integer revision numbers. If the revision
# is not an integer, assume it's a git hash and send a timestamp.
revision = _GetTimestamp()
revision_supplemental_columns[prefix + 'chromium'] = data['rev']
# For other revision data, add it if it's present and not undefined:
for key in ['webkit_rev', 'webrtc_rev', 'v8_rev']:
if key in data and data[key] != 'undefined':
revision_supplemental_columns[prefix + key] = data[key]
# If possible, also send the git hash.
if 'git_revision' in data and data['git_revision'] != 'undefined':
revision_supplemental_columns[prefix + 'chromium'] = data['git_revision']
return revision, revision_supplemental_columns
def _TestPath(test_name, chart_name, trace_name):
"""Get the slash-separated test path to send.
Args:
test: Test name. Typically, this will be a top-level 'test suite' name.
chart_name: Name of a chart where multiple trace lines are grouped. If the
chart name is the same as the trace name, that signifies that this is
the main trace for the chart.
trace_name: The "trace name" is the name of an individual line on chart.
Returns:
A slash-separated list of names that corresponds to the hierarchy of test
data in the Chrome Performance Dashboard; doesn't include master or bot
name.
"""
# For tests run on reference builds by builds/scripts/slave/telemetry.py,
# "_ref" is appended to the trace name. On the dashboard, as long as the
# result is on the right chart, it can just be called "ref".
if trace_name == chart_name + '_ref':
trace_name = 'ref'
chart_name = chart_name.replace('_by_url', '')
# No slashes are allowed in the trace name.
trace_name = trace_name.replace('/', '_')
# The results for "test/chart" and "test/chart/*" will all be shown on the
# same chart by the dashboard. The result with path "test/path" is considered
# the main trace for the chart.
test_path = '%s/%s/%s' % (test_name, chart_name, trace_name)
if chart_name == trace_name:
test_path = '%s/%s' % (test_name, chart_name)
return test_path
def _SendResultsJson(url, results_json):
"""Make a HTTP POST with the given JSON to the Performance Dashboard.
Args:
url: URL of Performance Dashboard instance, e.g.
"https://chromeperf.appspot.com".
results_json: JSON string that contains the data to be sent.
Returns:
None if successful, or an error string if there were errors.
"""
# When data is provided to urllib2.Request, a POST is sent instead of GET.
# The data must be in the application/x-www-form-urlencoded format.
data = urllib.urlencode({'data': results_json})
req = urllib2.Request(url + SEND_RESULTS_PATH, data)
try:
urllib2.urlopen(req)
except urllib2.HTTPError as e:
return ('HTTPError: %d. Reponse: %s\n'
'JSON: %s\n' % (e.code, e.read(), results_json))
except urllib2.URLError as e:
return 'URLError: %s for JSON %s\n' % (str(e.reason), results_json)
except httplib.HTTPException as e:
return 'HTTPException for JSON %s\n' % results_json
return None
def _LinkAnnotation(url, data):
"""Prints a link annotation with a link to the dashboard if possible.
Args:
url: The Performance Dashboard URL, e.g. "https://chromeperf.appspot.com"
data: The data that's being sent to the dashboard.
Returns:
An annotation to print, or None.
"""
if not data:
return None
if isinstance(data, list):
master, bot, test, revision = (
data[0]['master'], data[0]['bot'], data[0]['test'], data[0]['revision'])
else:
master, bot, test, revision = (
data['master'], data['bot'], data['chart_data']['benchmark_name'],
data['point_id'])
results_link = url + RESULTS_LINK_PATH % (
urllib.quote(master), urllib.quote(bot), urllib.quote(test.split('/')[0]),
revision)
return '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link)
|
from binance.client import Client
import pandas as pd
from utils import configure_logging
from multiprocessing import Process, freeze_support, Pool, cpu_count
import os
try:
from credentials import API_KEY, API_SECRET
except ImportError:
API_KEY = API_SECRET = None
exit("CAN'T RUN SCRIPT WITHOUT BINANCE API KEY/SECRET")
log = configure_logging()
class FuturesDataPuller(Process):
SYMBOLS = ['BTCUSDT', 'ETHUSDT', 'LTCUSDT', 'LINKUSDT']
# 'BTCUSDT', 'XRPUSDT', 'SXPUSDT', 'ADAUSDT', 'EOSUSDT', 'DOTUSDT', 'VETUSDT', 'ETHUSDT', 'LTCUSDT', 'LINKUSDT'
KLINE_INTERVALS = ['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', '6h', '8h', '12h', '1d', '3d', '1w', '1M']
def __init__(self, client, symbol, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = client
self.symbol = symbol
def run(self):
klines = self.get_klines(
interval='1m',
start_date="15 Feb 2021 00:00:00",
end_date="19 Feb 2021 12:00:00",
)
funding_rates = self.get_funding_rate(klines=klines)
df = self.reformat_data(klines=klines, funding_rates=funding_rates)
self.to_csv(df=df)
def get_klines(self, interval, start_date, end_date):
"""
:param interval: str, one of the supported intervals from KLINES_INTERVALS list
:param start_date: str, format 'DD MMM YYYY HH:mm:ss'
:param end_date: str
:return: list of lists with klines[Open_time: int,
Open: Decimal,
High: Decimal,
Low: Decimal,
Close: Decimal,
Volume: Decimal,
Close_time: int,
Quote asset volume: Decimal,
Number of trades: int,
Taker buy base asset volume: Decimal,
Taker buy quote asset volume: Decimal,
Ignore: Decimal]
"""
try:
data = self.client.get_historical_klines(
symbol=self.symbol,
interval=interval,
start_str=start_date,
end_str=end_date,
)
except Exception as exc:
log.exception(exc)
return {'msg': exc}
return data
def get_funding_rate(self, klines: list):
"""
Uses first and last kline time to get funding rates for that period
:param klines: trade klines
:return: list of dicts(symbol=(str), fundingTime=(int), fundingRate=(Decimal))
"""
start, bypass_limit, end = klines[0][0], klines[int(len(klines)/2)][0], klines[len(klines) - 1][0]
try:
data = self.client.futures_funding_rate(
symbol=self.symbol,
startTime=start,
endTime=bypass_limit,
)
data_2 = self.client.futures_funding_rate(
symbol=self.symbol,
startTime=bypass_limit,
endTime=end,
)
except Exception as exc:
log.exception(exc)
return {'msg': exc}
for instance in data_2:
data.append(instance)
return data
def to_csv(self, df):
"""
:param df: pd.DataFrame obj.
:return: .csv file with data
"""
file_directory = 'data'
file_full_path = os.path.join(file_directory, f'{self.symbol}.csv')
if not os.path.exists(file_directory):
os.makedirs(name=file_directory)
df.to_csv(path_or_buf=file_full_path, sep=',')
@staticmethod
def reformat_data(klines, funding_rates):
"""
:return: pd.DataFrame obj. with required_data
"""
df = pd.DataFrame.from_records(klines)
df = df.drop(range(5, 12), axis=1)
col_names = ['time', 'open', 'high', 'low', 'close']
df.columns = col_names
for col in col_names:
df[col] = df[col].astype(float)
df['date'] = pd.to_datetime(df['time'] * 1000000, format='%Y-%m-%d %H:%M:%S')
df['date'] = df['date'].dt.floor('s')
df['close_diff'] = (df['close'] - df['open']) / df['open']
df['fundingRate'] = None
df_fund = pd.DataFrame.from_records(funding_rates)
df_fund = df_fund.drop(columns='symbol')
for column in df_fund.columns:
df_fund[column] = df_fund[column].astype(float)
df_fund['date'] = pd.to_datetime(df_fund['fundingTime'] * 1000000, format='%Y-%m-%d %H:%M:%S')
df_fund['date'] = df_fund['date'].dt.floor('s')
for ind, date in enumerate(df['date']):
for ind_2, date_2 in enumerate(df_fund['date']):
if date == date_2:
df.iat[ind, 7] = df_fund.iat[ind_2, 1]
return df
def main():
client = Client(API_KEY, API_SECRET)
pool = Pool(processes=cpu_count())
pullers = []
for symbol in FuturesDataPuller.SYMBOLS:
puller = FuturesDataPuller(client=client, symbol=symbol)
pullers.append(puller)
for puller in pullers:
pool.apply_async(puller.run())
while True:
if not any(puller.is_alive() for puller in pullers):
break
pool.close()
pool.join()
if __name__ == '__main__':
freeze_support()
main()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @author: James Zhang
# @data : 2021/3/15
from functools import wraps
from selenium.common.exceptions import NoSuchElementException, \
TimeoutException, StaleElementReferenceException
import allure
from easy_automation.utils.custom_logging import Logs
log = Logs(__name__)
AFTER_ERROR_EVENTS = []
def after_error(func):
"""
define if location element error, will trigger function
this function will return false or True.
execute successfully should return True,
execute failed should return False.
note: this func should always not error occurred.
and only have a param: driver
:param func:
:return:
"""
AFTER_ERROR_EVENTS.append(func)
return func
def after_error_hook(func):
@wraps(func)
def wrap(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except (NoSuchElementException, TimeoutException,
StaleElementReferenceException) as exc:
for event in AFTER_ERROR_EVENTS:
res = event(self.driver)
if res:
# use handle_blacklist decorator call func again,
# to deal with multiple blacklist display at same time.
return after_error_hook(func)(self, *args, *kwargs)
else:
allure.attach(self.screenshot(), attachment_type=allure.attachment_type.PNG)
log.error(f'Element find failed: {exc}')
raise exc
return wrap
|
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = self.right = None
def insert(nums: List[int]) -> TreeNode:
root = TreeNode(nums[0])
for node in nums[1:]:
insert_helper(root, node)
return root
def insert_helper(root: TreeNode, node: int) -> None:
parent, current = None, root
while current is not None:
parent = current
if node < current.val:
current = current.left
else:
current = current.right
if node < parent.val:
parent.left = TreeNode(node)
else:
parent.right = TreeNode(node)
def in_order_traversal(root: TreeNode) -> None:
stack = []
while root or stack:
while root:
stack.append(root)
root = root.left
root = stack.pop()
print(root.val, end=" ")
root = root.right
def post_order_traversal(root: TreeNode) -> None:
stack = []
while len(stack) > 0 or root is not None:
while root is not None:
stack.append(root)
root = root.left
temp = stack[-1].right
if temp is None:
temp = stack.pop()
print(temp.val, end=" ")
while len(stack) > 0 and temp == stack[-1].right:
temp = stack.pop()
print(temp.val, end=" ")
else:
root = temp
def level_order_traversal(root: TreeNode) -> list:
queue = [root]
res = []
while queue:
curr = queue.pop(0)
res += [[curr.val]]
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
return res
nodes = [7, 5, 11, 1, 6, 9, 15]
r = insert(nodes)
#
# in_order_traversal(r)
# print()
post_order_traversal(r)
print()
# print(level_order_traversal(r))
|
from peewee import SqliteDatabase
from . import db
from .models import *
def create_staff(forename, surname, username, password):
Staff.create(forename, surname, username, password)
def create_module(code, name, leaders, assessors=None):
pass
|
"""
Example UCT implementation in Python, which (with a Java wrapper) can be used
to play in the Ludii general game system.
The implementation is based on our Example UCT implementation in Java
(see: https://github.com/Ludeme/LudiiExampleAI/blob/master/src/mcts/ExampleUCT.java)
NOTE: because we don't extend the abstract AI class from Java, we can't inherit
the "wantsInterrupt" flag and hence can't make our AI automatically stop
when the Pause button is pressed in the GUI.
@author Dennis Soemers
"""
import math
import numpy as np
import random
import time
def rank_to_util(rank, num_players):
"""
Helper method to convert a rank into a utility value
(copied from AIUtils in the Ludii Java code)
:param rank:
:param num_players:
:return:
"""
if num_players == 1:
return 2.0 * rank - 1.0
else:
return 1.0 - ((rank - 1.0) * (2.0 / (num_players - 1)))
def utilities(context):
"""
Helper method to compute an array of utility values for a given
Ludii context object (copied from AIUtils in the Ludii Java code)
:param context:
:return:
"""
ranking = context.trial().ranking()
utils = np.zeros(len(ranking))
num_players = len(ranking) - 1
for p in range(1, len(ranking)):
rank = ranking[p]
if num_players > 1 and rank == 0.0:
rank = context.computeNextDrawRank()
utils[p] = rank_to_util(rank, num_players)
return utils
class UCT:
"""
UCT class in Python, implements Ludii's (Java) abstract class "AI"
"""
def __init__(self):
"""
Constructor
"""
self._player_id = -1
def init_ai(self, game, player_id):
"""
Initialises the AI
:param game:
:param player_id:
"""
self._player_id = player_id
def select_action(self,
game,
context,
max_seconds,
max_iterations,
max_depth):
"""
Returns an action to play
:param game:
:param context:
:param max_seconds:
:param max_iterations:
:param max_depth:
:return:
"""
# Start out by creating a new root node (no tree reuse in this example)
root = Node(None, None, context)
num_players = game.players().count()
# We'll respect any limitations on max seconds and max iterations
# (don't care about max depth)
stop_time = time.time() + max_seconds if max_seconds > 0.0 else math.inf
max_its = max_iterations if max_iterations > 0 else math.inf
num_iterations = 0
# Our main loop through MCTS iterations
while num_iterations < max_its and time.time() < stop_time:
# Start in root node
current = root
# Traverse tree
while True:
if current.context.trial().over():
# We've reached a terminal state
break
current = self.select(current)
if current.visit_count == 0:
# We've expanded a new node, time for playout!
break
context_end = current.context
if not context_end.trial().over():
# Run a playout if we don't already have a terminal
# game state in node
context_end = context_end.deepCopy()
game.playout(context_end,
None,
-1.0,
None,
0,
-1,
None)
# This computes utilities for all players at the of the playout,
# which will all be values in [-1.0, 1.0]
utils = utilities(context_end)
# Backpropagate utilities through the tree
while current is not None:
current.visit_count += 1
for p in range(1, num_players):
current.score_sums[p] += utils[p]
current = current.parent
# Increment iteration count
num_iterations += 1
# Return the move we wish to play
return self.final_move_selection(root)
def select(self, current):
"""
UCB1 Selection (+ Expansion phase)
:param current:
:return:
"""
if len(current.unexpanded_moves) > 0:
# Randomly select an unexpanded move (already shuffled,
# so just remove last element)
move = current.unexpanded_moves.pop()
# Create a copy of context
context = current.context.deepCopy()
# Apply the move
context.game().apply(context, move)
# Create new node and return it
return Node(current, move, context)
# Use UCB1 equation to select from all children,
# with random tie-breaking
best_child = None
best_value = -math.inf
two_parent_log = 2.0 * math.log(max(1, current.visit_count))
num_best_found = 0
num_children = len(current.children)
mover = current.context.state().mover()
for i in range(num_children):
child = current.children[i]
exploit = child.score_sums[mover] / child.visit_count
explore = math.sqrt(two_parent_log / child.visit_count)
ucb1_value = exploit + explore
if ucb1_value > best_value:
best_value = ucb1_value;
best_child = child;
num_best_found = 1;
elif ucb1_value == best_value:
rand = random.randint(0, num_best_found + 1)
if rand == 0:
best_child = child
num_best_found += 1
return best_child
def final_move_selection(self, root_node):
"""
Selects final move to play in the real game (uses the Robust Child
strategy)
:param root_node:
:return:
"""
best_child = None
best_visit_count = -math.inf
num_best_found = 0
num_children = len(root_node.children)
for i in range(num_children):
child = root_node.children[i]
visit_count = child.visit_count
if visit_count > best_visit_count:
best_visit_count = visit_count
best_child = child
num_best_found = 1
elif visit_count == best_visit_count:
# This case implements random tie-breaking
rand = random.randint(0, num_best_found + 1)
if rand == 0:
best_child = child
num_best_found += 1
return best_child.move_from_parent
class Node:
"""
Class for Nodes in the search tree built by UCT
"""
def __init__(self, parent, move_from_parent, context):
"""
Constructs a new node
:param parent: Parent node
:param move_from_parent: Move that leads from parent to this node
:param context: Context / game state
"""
self.visit_count = 0
self.children = []
self.parent = parent
self.move_from_parent = move_from_parent
self.context = context
game = self.context.game()
self.score_sums = np.zeros(game.players().count() + 1)
legal_moves = game.moves(context).moves()
num_legal_moves = legal_moves.size()
self.unexpanded_moves = [legal_moves.get(i) for i in range(num_legal_moves)]
random.shuffle(self.unexpanded_moves)
if parent is not None:
parent.children.append(self)
|
from pandas import DataFrame, concat
from pandas.core.apply import frame_apply
from sklearn import linear_model
from smart_fruit.feature_class import FeatureClassMeta
from smart_fruit.model_selection import train_test_split
from smart_fruit.utils import csv_open
__all__ = ["Model"]
class ModelMeta(type):
def __init__(cls, name, bases, namespace):
super().__init__(name, bases, namespace)
for feature_type in ['Input', 'Output']:
feature_class = getattr(cls, feature_type)
setattr(cls, feature_type, FeatureClassMeta(feature_class.__name__, (feature_class,), {}))
class Model(metaclass=ModelMeta):
model_class = linear_model.LinearRegression
class Input:
pass
class Output:
pass
def __init__(self, *args, **kwargs):
self.model = self.model_class(*args, **kwargs)
@classmethod
def input_features_from_list(cls, lists):
for l in lists:
yield cls.Input(*l).validate()
@classmethod
def input_features_from_json(cls, json):
for feature in json:
yield cls.Input.from_json(feature).validate()
@classmethod
def input_features_from_csv(cls, csv_path):
yield from cls.input_features_from_json(csv_open(csv_path, cls.Input._fields))
@classmethod
def features_from_list(cls, lists):
for l in lists:
yield cls.Input(*l[:len(cls.Input._fields)]).validate(), cls.Output(*l[len(cls.Input._fields):]).validate()
@classmethod
def features_from_json(cls, json):
for feature in json:
yield cls.Input.from_json(feature).validate(), cls.Output.from_json(feature).validate()
@classmethod
def features_from_csv(cls, csv_path):
yield from cls.features_from_json(csv_open(csv_path, cls.Input._fields + cls.Output._fields))
@staticmethod
def _to_raw_features(dataframe, feature_class):
return concat([
column.apply(feature_type.to_series)
for (i, column), feature_type in zip(dataframe.iteritems(), feature_class)
], axis=1)
def _dataframes_from_features(self, features):
dataframe = DataFrame(list(input_) + list(output) for input_, output in features)
input_dataframe = self._to_raw_features(dataframe, self.Input)
output_dataframe = self._to_raw_features(dataframe.loc[:, len(self.Input):], self.Output)
return input_dataframe, output_dataframe
@classmethod
def train(cls, features, train_test_split_ratio=None, test_sample_count=None, random_state=None):
if train_test_split_ratio is not None or test_sample_count is not None:
train_features, test_features = train_test_split(
features,
train_test_split_ratio=train_test_split_ratio,
test_sample_count=test_sample_count,
random_state=random_state
)
model = cls.train(train_features)
return model, model.score(test_features)
model = cls()
model.model.fit(*model._dataframes_from_features(features))
return model
def score(self, features):
return self.model.score(*self._dataframes_from_features(features))
@staticmethod
def _chunk_dataframe(dataframe, feature_types):
start = 0
for feature_type in feature_types:
chunk = dataframe.loc[:, start:start + feature_type.feature_count - 1]
chunk.columns = range(len(chunk.columns))
yield chunk, feature_type
start += feature_type.feature_count
def predict(self, input_features, yield_inputs=False):
input_features_dataframe = DataFrame(input_features)
raw_features = self._to_raw_features(input_features_dataframe, self.Input)
raw_prediction_dataframe = DataFrame(self.model.predict(raw_features))
# Using frame_apply instead of chunk.apply as chunk.apply doesn't behave as expected for 0 columns
prediction_dataframe = concat([
frame_apply(chunk, feature_type.from_series, axis=1).apply_standard()
for chunk, feature_type in self._chunk_dataframe(raw_prediction_dataframe, self.Output)
], axis=1)
if yield_inputs:
for (_, input_series), (_, output_series) in zip(
input_features_dataframe.iterrows(),
prediction_dataframe.iterrows()
):
yield self.Input(*input_series), self.Output(*output_series)
else:
for _, output_series in prediction_dataframe.iterrows():
yield self.Output(*output_series)
|
from django.contrib import admin
from django.contrib import admin
from .models import Department
from modeltranslation.admin import TranslationAdmin
# Register your models here.
@admin.register(Department)
class DepartmentAdmin(TranslationAdmin):
list_display = ("name", "short_description")
prepopulated_fields = {"slug": ("name",)}
filter_horizontal = ('staff',)
|
#coding: utf-8
__author__ = "Lário dos Santos Diniz"
import requests
import json
class ApiConnection:
def __init__(self, url = ""):
self._url = 'http://127.0.0.1:8080/'
if url != '':
self._url = url
def checks_cutting_work(self, job_id):
try:
r = requests.get(self._url+'api/cutting_jobs/{}/'.format(job_id))
except ConnectionError:
return 'Erro de conexão'
if r.status_code == 200:
retorno = r.json()
return retorno['message']
def post_program(self, program, path):
p = {
'title':program.title,
'start_time':program.start_time,
'end_time':program.end_time,
'path':path}
headers = {'content-type': 'application/json'}
try:
r = requests.post(self._url+'api/programs/', data=json.dumps(p), headers=headers)
except ConnectionError:
return 'Erro de conexão'
if r.status_code == 201:
return ('success', r.json())
else:
return 'Erro {}'.format(r.status_code)
def post_globo_play(self, video_process):
p = {
'title':video_process[0],
'name':video_process[1],
'duration':video_process[2]
}
headers = {'content-type': 'application/json'}
try:
r = requests.post(self._url+'api/globo_play/', data=json.dumps(p), headers=headers)
except ConnectionError:
return 'Erro de conexão'
if r.status_code == 201:
return ('success', r.json())
else:
return 'Erro {}'.format(r.status_code)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
df = pd.read_csv("data/reglab1.txt", sep="\t")
# print(df)
X = np.asarray(df.iloc[:, -1]).reshape(-1, 1)
y = np.asarray(df.iloc[:, 1])
clf = LinearRegression().fit(X, y)
print("\nx(y):", clf.score(X, y))
X = np.asarray(df.iloc[:, -1]).reshape(-1, 1)
y = np.asarray(df.iloc[:, 0])
clf = LinearRegression().fit(X, y)
print("x(z):", clf.score(X, y))
X = np.asarray(df.iloc[:, 1]).reshape(-1, 1)
y = np.asarray(df.iloc[:, -1])
clf = LinearRegression().fit(X, y)
print("y(x):", clf.score(X, y))
X = np.asarray(df.iloc[:, 0]).reshape(-1, 1)
y = np.asarray(df.iloc[:, -1])
clf = LinearRegression().fit(X, y)
print("y(z):", clf.score(X, y))
X = np.asarray(df.iloc[:, 1]).reshape(-1, 1)
y = np.asarray(df.iloc[:, 0])
clf = LinearRegression().fit(X, y)
print("z(x):", clf.score(X, y))
X = np.asarray(df.iloc[:, -1]).reshape(-1, 1)
y = np.asarray(df.iloc[:, 0])
clf = LinearRegression().fit(X, y)
print("z(y):", clf.score(X, y))
X = np.asarray(df.iloc[:, 1:])
y = np.asarray(df.iloc[:, 0])
clf = LinearRegression().fit(X, y)
print("z(x, y):", clf.score(X, y))
X = np.asarray(df.iloc[:, 0:-1])
y = np.asarray(df.iloc[:, -1])
clf = LinearRegression().fit(X, y)
print("y(x, z):", clf.score(X, y))
y = np.asarray(df.iloc[:, 1])
df.drop('x', axis=1, inplace=True)
X = np.asarray(df)
clf = LinearRegression().fit(X, y)
print("x(y, z):", clf.score(X, y))
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from django import template
register = template.Library()
@register.filter(name='preview_position_h')
def preview_position_h(value):
result = ""
if value % 5 + 1 > 3:
result = "from-right"
return result
@register.simple_tag
def preview_position_v(length, index):
result = ""
round = 5 * math.ceil(length/5.0)
if length > 5 and index > round-5:
result = "from-bottom"
return result
@register.filter
def is_registered(box, user):
return box.is_registered(user)
|
# -*- coding: utf-8 -*-
# Line segmentation script for images with PAGE xml.
# Derived from the `import_from_larex.py` script, parts of kraken and ocropy,
# with additional tweaks e.g. pre rotation of text regions
#
# nashi project:
# https://github.com/andbue/nashi
# ocropy:
# https://github.com/tmbdev/ocropy/
# kraken:
# https://github.com/mittagessen/kraken
from ocr4all_helper_scripts.lib import imgmanipulate, morph, sl, pseg, nlbin
from ocr4all_helper_scripts.utils.datastructures import Record
from ocr4all_helper_scripts.utils import pageutils, imageutils
from pathlib import Path
import sys
from typing import List, Tuple
import logging
import numpy as np
from skimage.measure import find_contours, approximate_polygon
from skimage.draw import line_aa
from scipy.ndimage.filters import gaussian_filter, uniform_filter
import math
from lxml import etree
from PIL import Image, ImageChops
from shapely.geometry import Polygon
# Add printing for every thread
from threading import Lock
s_print_lock = Lock()
logging.getLogger('shapely.geos').setLevel(logging.CRITICAL)
def s_print(*args, **kwargs):
with s_print_lock:
print(*args, **kwargs)
def s_print_error(*objs):
s_print("ERROR: ", *objs, file=sys.stderr)
def compute_lines(segmentation: np.ndarray,
smear_strength: Tuple[float, float],
scale: int,
growth: Tuple[float, float],
max_iterations: int,
filter_strength: float,
bounding_box: bool) -> List[Record]:
"""Given a line segmentation map, computes a list of tuples consisting of 2D slices and masked images.
Implementation derived from ocropy with changes to allow extracting the line coords/polygons.
"""
lobjects = morph.find_objects(segmentation)
lines = []
for i, o in enumerate(lobjects):
if o is None:
continue
if sl.dim1(o) < 2 * scale * filter_strength or sl.dim0(o) < scale * filter_strength:
s_print(f"Filter strength of {filter_strength} too high. Skipping detected line object...")
continue
mask = (segmentation[o] == i + 1)
if np.amax(mask) == 0:
continue
result = Record()
result.label = i + 1
result.bounds = o
polygon = []
if ((segmentation[o] != 0) == (segmentation[o] != i + 1)).any() and not bounding_box:
ppoints = approximate_smear_polygon(mask, smear_strength, growth, max_iterations)
ppoints = ppoints[1:] if ppoints else []
polygon = [(o[1].start + x, o[0].start + y) for x, y in ppoints]
if not polygon:
polygon = [(o[1].start, o[0].start), (o[1].stop, o[0].start),
(o[1].stop, o[0].stop), (o[1].start, o[0].stop)]
result.polygon = polygon
result.mask = mask
lines.append(result)
return lines
def compute_gradmaps(binary: np.array, scale: float, vscale: float = 1.0, hscale: float = 1.0,
usegauss: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Use gradient filtering to find baselines
"""
boxmap = pseg.compute_boxmap(binary, scale)
cleaned = boxmap * binary
if usegauss:
# this uses Gaussians
grad = gaussian_filter(1.0 * cleaned, (vscale * 0.3 * scale, hscale * 6 * scale), order=(1, 0))
else:
# this uses non-Gaussian oriented filters
grad = gaussian_filter(1.0 * cleaned, (max(4, vscale * 0.3 * scale), hscale * scale), order=(1, 0))
grad = uniform_filter(grad, (vscale, hscale * 6 * scale))
def norm_max(a):
return a / np.amax(a)
bottom = norm_max((grad < 0) * (-grad))
top = norm_max((grad > 0) * grad)
return bottom, top, boxmap
def boundary(contour: np.ndarray) -> List[np.float64]:
"""Calculates boundary of contour
"""
x_min = np.min(contour[:, 0])
x_max = np.max(contour[:, 0])
y_min = np.min(contour[:, 1])
y_max = np.max(contour[:, 1])
return [x_min, x_max, y_min, y_max]
def approximate_smear_polygon(line_mask: np.ndarray, smear_strength: Tuple[float, float] = (1.0, 2.0),
growth: Tuple[float, float] = (1.1, 1.1), max_iterations: int = 50):
"""Approximate a single polygon around high pixels in a mask, via smearing
"""
padding = 1
work_image = np.pad(np.copy(line_mask), pad_width=padding, mode='constant', constant_values=False)
contours = find_contours(work_image, 0.5, fully_connected="low")
if len(contours) > 0:
iteration = 1
while len(contours) > 1:
# Get bounds with dimensions
bounds = [boundary(contour) for contour in contours]
widths = [b[1] - b[0] for b in bounds]
heights = [b[3] - b[2] for b in bounds]
# Calculate x and y median distances (or at least 1)
width_median = sorted(widths)[int(len(widths) / 2)]
height_median = sorted(heights)[int(len(heights) / 2)]
# Calculate x and y smear distance
smear_distance_x = math.ceil(width_median * smear_strength[0] * (iteration * growth[0]))
smear_distance_y = math.ceil(height_median * smear_strength[1] * (iteration * growth[1]))
# Smear image in x and y direction
height, width = work_image.shape
gaps_current_x = [float('Inf')] * height
for x in range(width):
gap_current_y = float('Inf')
for y in range(height):
if work_image[y, x]:
# Entered Contour
gap_current_x = gaps_current_x[y]
if gap_current_y < smear_distance_y and gap_current_y > 0:
# Draw over
work_image[y - gap_current_y:y, x] = True
if gap_current_x < smear_distance_x and gap_current_x > 0:
# Draw over
work_image[y, x - gap_current_x:x] = True
gap_current_y = 0
gaps_current_x[y] = 0
else:
# Entered/Still in Gap
gap_current_y += 1
gaps_current_x[y] += 1
# Find contours of current smear
contours = find_contours(work_image, 0.5, fully_connected="low")
# Failsave if contours can't be smeared together after x iterations
# Draw lines between the extreme points of each contour in order
if iteration >= max_iterations and len(contours) > 1:
s_print(f"Start fail save, since precise line generation took too many iterations ({max_iterations}).")
extreme_points = []
for contour in contours:
sorted_x = sorted(contour, key=lambda c: c[0])
sorted_y = sorted(contour, key=lambda c: c[1])
extreme_points.append((tuple(sorted_x[0]), tuple(sorted_y[0]),
tuple(sorted_x[-1]), tuple(sorted_y[-1])))
sorted_extreme = sorted(extreme_points, key=lambda e: e)
for c1, c2 in zip(sorted_extreme, sorted_extreme[1:]):
for p1 in c1:
nearest = None
nearest_dist = math.inf
for p2 in c2:
distance = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
if distance < nearest_dist:
nearest = p2
nearest_dist = distance
if nearest:
# Draw line between nearest points
yy, xx, _ = line_aa(int(p1[0]), int(nearest[0]), int(p2[1]), int(nearest[1]))
# Remove border points
line_points = [(x, y) for x, y in zip(xx, yy) if 0 < x < width and 0 < y < height]
xx_filtered, yy_filtered = zip(*line_points)
# Paint
work_image[yy_filtered, xx_filtered] = True
contours = find_contours(work_image, 0.5, fully_connected="low")
iteration += 1
return [(p[1] - padding, p[0] - padding) for p in approximate_polygon(contours[0], 0.1)]
return []
def segment(im: Image, scale: float = None, max_blackseps: int = 0, widen_blackseps: int = 10, max_whiteseps: int = 3,
minheight_whiteseps: int = 10, filter_strength: float = 1.0,
smear_strength: Tuple[float, float] = (1.0, 2.0), growth: Tuple[float, float] = (1.1, 1.1),
orientation: int = 0, fail_save_iterations: int = 50, vscale: float = 1.0, hscale: float = 1.0,
minscale: float = 12.0, maxlines: int = 300, threshold: float = 0.2, usegauss: bool = False,
bounding_box: bool = False):
"""
Segments a page into text lines.
Segments a page into text lines and returns the absolute coordinates of
each line in reading order.
"""
colors = im.getcolors(2)
if (im.mode not in ['1', "L"]) and not (colors is not None and len(colors) == 2):
raise ValueError('Image is not bi-level')
# rotate input image for vertical lines
im_rotated = im.rotate(-orientation, expand=True, fillcolor="white")
a = np.array(im_rotated.convert('L')) if im_rotated.mode == '1' else np.array(im_rotated)
binary = np.array(a > 0.5 * (np.amin(a) + np.amax(a)), 'i')
binary = 1 - binary
if not scale:
scale = pseg.estimate_scale(binary)
if scale < minscale:
s_print_error(f"scale ({scale}) less than --minscale; skipping")
return
binary = pseg.remove_hlines(binary, scale)
# emptyish images will cause exceptions here.
try:
colseps, binary = pseg.compute_colseps(binary,
scale,
max_blackseps,
widen_blackseps,
max_whiteseps,
minheight_whiteseps)
except ValueError:
return []
bottom, top, boxmap = compute_gradmaps(binary, scale, vscale, hscale, usegauss)
seeds = pseg.compute_line_seeds(binary, bottom, top, colseps, scale, threshold=threshold)
llabels1 = morph.propagate_labels(boxmap, seeds, conflict=0)
spread = morph.spread_labels(seeds, maxdist=scale)
llabels = np.where(llabels1 > 0, llabels1, spread * binary)
segmentation = llabels * binary
if np.amax(segmentation) > maxlines:
s_print_error(f"too many lines {np.amax(segmentation)}")
return
lines_and_polygons = compute_lines(segmentation,
smear_strength,
scale,
growth,
fail_save_iterations,
filter_strength,
bounding_box)
# Translate each point back to original
delta_x = (im_rotated.width - im.width) / 2
delta_y = (im_rotated.height - im.height) / 2
center_x = im_rotated.width / 2
center_y = im_rotated.height / 2
def translate_back(point):
# rotate point around center
orient_rad = -1 * orientation * (math.pi / 180)
rotated_x = ((point[0] - center_x) * math.cos(orient_rad)
- (point[1] - center_y) * math.sin(orient_rad)
+ center_x)
rotated_y = ((point[0] - center_x) * math.sin(orient_rad)
+ (point[1] - center_y) * math.cos(orient_rad)
+ center_y)
# move point
return int(rotated_x - delta_x), int(rotated_y - delta_y)
return [[translate_back(p) for p in record.polygon] for record in lines_and_polygons]
def pagelineseg(xmlfile: str,
imgpath: str,
scale: float = None,
vscale: float = 1.0,
hscale: float = 1.0,
max_blackseps: int = 0,
widen_blackseps: int = 10,
max_whiteseps: int = -1,
minheight_whiteseps: int = 10,
minscale: float = 12.0,
maxlines: int = 300,
smear_strength: Tuple[float, float] = (1.0, 2.0),
growth: Tuple[float, float] = (1.1, 1.1),
filter_strength: float = 1.0,
fail_save_iterations: int = 50,
maxskew: float = 2.0,
skewsteps: int = 8,
usegauss: bool = False,
remove_images: bool = False,
bounding_box: bool = False):
name = Path(imgpath).name.split(".")[0]
s_print(f"""Start process for '{name}'
|- Image: '{imgpath}'
|- Annotations: '{xmlfile}' """)
root = pageutils.get_root(xmlfile)
s_print(f"[{name}] Retrieve TextRegions")
pageutils.convert_point_notation(root)
coordmap = pageutils.construct_coordmap(root)
s_print(f"[{name}] Extract Textlines from TextRegions")
im = Image.open(imgpath)
width, height = im.size
if remove_images:
imageutils.remove_images(im, root)
pageutils.remove_existing_textlines(root)
for coord_idx, coord in enumerate(sorted(coordmap)):
region_coords = coordmap[coord]['coords']
if len(region_coords) < 3:
continue
cropped, [min_x, min_y, max_x, max_y] = imgmanipulate.cutout(im, region_coords)
if coordmap[coord].get("orientation"):
orientation = coordmap[coord]['orientation']
else:
orientation = -1 * nlbin.estimate_skew(cropped, 0, maxskew=maxskew,
skewsteps=skewsteps)
s_print(f"[{name}] Skew estimate between +/-{maxskew} in {skewsteps} steps. Estimated {orientation}°")
if cropped is not None:
# Check whether cropped are is completely white or black and skip if true
if not cropped.getbbox() or not ImageChops.invert(cropped).getbbox():
s_print(f"[{name}] Skipping fully black / white region...")
continue
colors = cropped.getcolors(2)
if not (colors is not None and len(colors) == 2):
cropped = Image.fromarray(nlbin.adaptive_binarize(np.array(cropped)).astype(np.uint8))
if coordmap[coord]["type"] == "drop-capital":
lines = [1]
else:
lines = segment(cropped,
scale=scale,
max_blackseps=max_blackseps,
widen_blackseps=widen_blackseps,
max_whiteseps=max_whiteseps,
minheight_whiteseps=minheight_whiteseps,
filter_strength=filter_strength,
smear_strength=smear_strength,
growth=growth,
orientation=orientation,
fail_save_iterations=fail_save_iterations,
vscale=vscale,
hscale=hscale,
minscale=minscale,
maxlines=maxlines,
usegauss=usegauss,
bounding_box=bounding_box)
else:
lines = []
# Interpret whole region as TextLine if no TextLines are found
if not lines or len(lines) == 0:
coord_str = " ".join([f"{x},{y}" for x, y in region_coords])
textregion = root.find(f'.//{{*}}TextRegion[@id="{coord}"]')
if orientation:
textregion.set('orientation', str(orientation))
linexml = etree.SubElement(textregion, "TextLine",
attrib={"id": "{}_l{:03d}".format(coord, coord_idx + 1)})
etree.SubElement(linexml, "Coords", attrib={"points": coord_str})
else:
for poly_idx, poly in enumerate(lines):
if coordmap[coord]["type"] == "drop-capital":
coord_str = coordmap[coord]["coordstring"]
else:
line_coords = Polygon([(x + min_x, y + min_y) for x, y in poly])
sanitized_coords = pageutils.sanitize(line_coords, Polygon(region_coords), width, height)
coord_str = " ".join([f"{int(x)},{int(y)}" for x, y in sanitized_coords])
textregion = root.find(f'.//{{*}}TextRegion[@id="{coord}"]')
if orientation:
textregion.set('orientation', str(orientation))
linexml = etree.SubElement(textregion, "TextLine",
attrib={"id": "{}_l{:03d}".format(coord, poly_idx + 1)})
etree.SubElement(linexml, "Coords", attrib={"points": coord_str})
s_print(f"[{name}] Generate new PAGE XML with text lines")
xmlstring = etree.tounicode(root.getroottree()).replace(
"http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19",
"http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15")
return xmlstring
|
import phidgets_relay_class as relay
import time
print(f'Starting relay init ')
brd_ser_nums = [439515, 449740, 449901, 439525]
brd1 = relay.phidget_relay_class(brd_ser_nums[0])
brd2 = relay.phidget_relay_class(brd_ser_nums[1])
brd3 = relay.phidget_relay_class(brd_ser_nums[2])
brd4 = relay.phidget_relay_class(brd_ser_nums[3])
# reset all relay blocks
brd1.relay_reset_all()
brd2.relay_reset_all()
brd3.relay_reset_all()
brd4.relay_reset_all()
def relay_FR_HA(sing_gnd, pwr):
relay_sensor(brd1, 0, brd2, 0, sing_gnd, pwr)
def relay_FL_HA(sing_gnd, pwr):
relay_sensor(brd1, 1, brd2, 1, sing_gnd, pwr)
def relay_RR_HA(sing_gnd, pwr):
relay_sensor(brd1, 2, brd2, 2, sing_gnd, pwr)
def relay_RL_HA(sing_gnd, pwr):
relay_sensor(brd1, 3, brd2, 3, sing_gnd, pwr)
def relay_FR_HEIGHT(sing_gnd, pwr):
relay_sensor(brd1, 4, brd2, 4, sing_gnd, pwr)
def relay_FL_HEIGHT(sing_gnd, pwr):
relay_sensor(brd1, 5, brd2, 5, sing_gnd, pwr)
def relay_RR_HEIGHT(sing_gnd, pwr):
relay_sensor(brd1, 6, brd2, 6, sing_gnd, pwr)
def relay_RL_HEIGHT(sing_gnd, pwr):
relay_sensor(brd1, 7, brd2, 7, sing_gnd, pwr)
def relay_AIR_PRESS(sing_gnd, pwr):
relay_sensor(brd3, 0, brd3, 1, sing_gnd, pwr)
def relay_uprlata(sing_gnd, pwr):
brd4.relay_switch(3, sing_gnd)
def relay_lwrlata(sing_gnd, pwr):
brd4.relay_switch(2, sing_gnd)
def relay_lower_sig(state):
brd3.relay_switch(2, state)
def relay_ris_sig(state):
brd3.relay_switch(3, state)
def relay_dig_in3(state):
brd3.relay_switch(4, state)
def relay_dig_in4(state):
brd3.relay_switch(5, state)
def relay_air_sus_cont(state):
brd3.relay_switch(6, state)
def relay_ign_in(state):
brd3.relay_switch(7, state)
def relay_sensor_source_supply(state):
brd4.relay_switch(1, state)
def relay_group1_switch(state):
relay_air_sus_cont(state)
relay_lower_sig(state)
relay_ris_sig(state)
relay_dig_in3(state)
relay_dig_in4(state)
relay_AIR_PRESS(state, 0)
def relay_group2_switch(state):
relay_ign_in(state)
relay_AIR_PRESS(state, 0)
def relay_group3_switch(state):
relay_FR_HA(state, 0)
relay_FL_HA(state, 0)
relay_RR_HA(state, 0)
relay_RL_HA(state, 0)
relay_FR_HEIGHT(state, 0)
relay_FL_HEIGHT(state, 0)
relay_RR_HEIGHT(state, 0)
relay_RL_HEIGHT(state, 0)
relay_AIR_PRESS(state, 0)
relay_sensor_source_supply(state)
def relay_power(wire_name, state=0):
if wire_name.lower() == "all":
brd4.relay_switch(4, state) # V_bat_A
brd4.relay_switch(5, state) # V_BAT_B
brd4.relay_switch(6, state) # V_BAT_C
brd4.relay_switch(7, state) #power supply connect powet rail
elif wire_name.lower() == "p_vbat_a":
brd4.relay_switch(4, state)
brd4.relay_switch(7, state)
elif wire_name.lower() == "p_vbat_b":
brd4.relay_switch(5, state)
brd4.relay_switch(7, state)
elif wire_name.lower() == "p_vbat_c":
brd4.relay_switch(6, state)
brd4.relay_switch(7, state)
elif wire_name.lower() == "all_on":
brd4.relay_switch(4, 1)
brd4.relay_switch(5, 1)
brd4.relay_switch(6, 1)
brd4.relay_switch(7, 1)
elif wire_name.lower() == "all_off":
brd4.relay_switch(7, 0)
brd4.relay_switch(4, 0)
brd4.relay_switch(5, 0)
brd4.relay_switch(6, 0)
elif wire_name.lower() == "p_vbat_a_only":
#print(wire_name)
brd4.relay_switch(4, 1)
brd4.relay_switch(5, 1)
brd4.relay_switch(6, 1)
brd4.relay_switch(7, 1)
delay()
brd4.relay_switch(5, 0) # V_BAT_B
brd4.relay_switch(6, 0) # V_BAT_C
delay()
elif wire_name.lower() == "p_vbat_b_only":
#print(wire_name)
brd4.relay_switch(4, 1)
brd4.relay_switch(5, 1)
brd4.relay_switch(6, 1)
brd4.relay_switch(7, 1)
delay()
brd4.relay_switch(4, 0) # V_bat_A
brd4.relay_switch(6, 0) # V_BAT_C
delay()
elif wire_name.lower() == "p_vbat_c_only":
#print(wire_name)
brd4.relay_switch(4, 1)
brd4.relay_switch(5, 1)
brd4.relay_switch(6, 1)
brd4.relay_switch(7, 1)
delay()
brd4.relay_switch(4, 0) # V_bat_A
brd4.relay_switch(5, 0) # V_BAT_B
delay()
else:
print(f"Function: relay_power() Wrong signal name: {wire_name}, state: {state}")
print("Available: all, p_vbat_a, p_vbat_b, p_vbat_c, all_on, all_off, p_vbat_a_only, p_vbat_b_only, p_vbat_c_only")
def relay_dmm(pwr_sensor):
# state = 0 - dmm connected to 5V sensor power bus
# state = 1 - dmm connected to sensor input bus
brd4.relay_switch(0, pwr_sensor)
def relay_sensor_source_supply(state):
brd4.relay_switch(1, state)
if __name__ == "__main__":
try:
# relay_power("all_on")
# relay_sensor_source_supply(1)
# while 1:
# print("On")
# relay_group1_switch(1)
# time.sleep(5)
# print("Off")
# relay_group1_switch(0)
# time.sleep(5)
# print("start self test ")
# # print("brd1")
# # brd1.relay_self_test()
# # print("brd2")
# # brd2.relay_self_test()
# # print("brd3")
# # brd3.relay_self_test()
# print("brd4")
# brd4.relay_self_test()
sensor_5VON_volt = [["FR_HA_5V", 0, relay_FR_HA],
["FL_HA_5V", 0, relay_FL_HA],
["RR_HA_5V", 0, relay_RR_HA],
["RL_HA_5V", 0, relay_RL_HA],
["FR_HEIGHT_5V", 0, relay_FR_HEIGHT],
["FL_HEIGHT_5V", 0, relay_FL_HEIGHT],
["RR_HEIGHT_5V", 0, relay_RR_HEIGHT],
["RL_HEIGHT_5V", 0, relay_RL_HEIGHT],
["AIR_PRESS_5V", 0, relay_AIR_PRESS],
]
print(len(sensor_5VON_volt))
except:
brd1.close()
brd2.close()
brd3.close()
brd4.close()
|
# Generated by Django 2.2.7 on 2019-12-02 12:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('is_accepted', models.BooleanField(default=False)),
('accepted_date', models.DateTimeField(blank=True, null=True)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Client')),
('suppliers', models.ManyToManyField(blank=True, to='accounts.Supplier')),
],
),
]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: args
# Purpose: Module defining all switches and arguments used by the following
# components:
#
# - InitSync
# - CDCExtract
# - CDCApply
#
# Notes:
#
###############################################################################
import json
import os
import sys
import configargparse
import multiprocessing
import data_pipeline.constants.const as const
import data_pipeline.utils.filesystem as filesystem_utils
import data_pipeline.utils.utils as utils
from pprint import PrettyPrinter
def positive_int_type(x):
x = int(x)
if x < 0:
raise configargparse.ArgumentTypeError("A negative number was supplied")
return x
def commitpoint_type(x):
x = int(x)
if x < const.MIN_COMMIT_POINT:
raise configargparse.ArgumentTypeError(
"Minimum allowed commitpoint is: {}"
.format(const.MIN_COMMIT_POINT))
return x
class AppendDateTimeDirAction(configargparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(AppendDateTimeDirAction, self).__init__(option_strings,
dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
values = filesystem_utils.append_datetime_dir(values)
setattr(namespace, self.dest, values)
def _is_initsync(mode):
return (mode == const.INITSYNC or
mode == const.INITSYNCEXTRACT or
mode == const.INITSYNCAPPLY)
def _is_extract(mode):
return mode == const.CDCEXTRACT
def _is_apply(mode):
return mode == const.CDCAPPLY
def parse_args(arg_list, mode):
common_args_parser = configargparse.ArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
usage='%(prog)s [options]',
add_help=False)
common_args_parser.add_argument(
"-c", "--config",
is_config_file=True,
help="config file path")
common_args_parser.add_argument(
"--quiet",
action="store_true",
help="quiet mode")
common_args_parser.add_argument(
"--verbose",
action="store_true",
help="verbose mode")
common_args_parser.add_argument(
"--veryverbose",
action="store_true",
help="very verbose mode")
common_args_parser.add_argument(
"--tempdirectory",
nargs='?',
help="temporary working directory")
common_args_parser.add_argument(
"--workdirectory",
nargs='?',
action=AppendDateTimeDirAction,
help="output working directory")
common_args_parser.add_argument(
"--audituser",
nargs='?',
help=("process audit user credentials requried for logging processing "
"metrics"))
common_args_parser.add_argument(
"--auditschema",
nargs='?',
default='',
help="schema name containing process audit tables")
common_args_parser.add_argument(
"--streamchannel",
nargs='?',
help="stream channel name / kafka topic")
common_args_parser.add_argument(
"--streamgroup",
nargs='?',
help="stream group identifer / kafka consumer group ")
common_args_parser.add_argument(
"--streamhost",
nargs='?',
help="stream host name / kafka cluster host")
common_args_parser.add_argument(
"--streamschemahost",
nargs='?',
help="stream schema host name / kafka cluster host")
common_args_parser.add_argument(
"--streamschemafile",
nargs='?',
help="stream schema file")
common_args_parser.add_argument(
"--dateformat",
nargs='?',
default='dd-mon-yyyy',
help="date format mask used for data extraction")
common_args_parser.add_argument(
"--timeformat",
nargs='?',
default='hh24:mi:ss',
help="time format mask used for data extraction")
common_args_parser.add_argument(
"--timestampformat",
nargs='?',
default='dd-mon-yyyy hh24:mi:ss',
help="timestamp format mask used for data extraction")
common_args_parser.add_argument(
"--outputfile",
nargs='?',
help=("name of file where data is written to prior to being sent to "
"an external source"))
common_args_parser.add_argument(
"--rawfile",
nargs='?',
help="name of file where raw extract source output is written to")
common_args_parser.add_argument(
"--tablelist",
nargs='+',
help="list of table(s) or file contain table list")
common_args_parser.add_argument(
"--profilename",
nargs='?',
help=("list of table(s) held as an application profile in process "
"control database"))
common_args_parser.add_argument(
"--profileversion",
nargs='?',
type=float,
help="application profile version number")
common_args_parser.add_argument(
"--delimiter",
nargs='?',
default=const.DELIMITER,
help="fields-data value separator character")
common_args_parser.add_argument(
"--sourcedbtype",
nargs='?',
default=const.ORACLE,
choices=[const.FILE, const.ORACLE, const.MSSQL, const.POSTGRES, const.GREENPLUM],
help="")
common_args_parser.add_argument(
"--samplerows",
nargs='?',
type=positive_int_type,
help="process a row sample")
common_args_parser.add_argument(
"--clientencoding",
nargs='?',
default='utf-8',
help="source data character encoding")
common_args_parser.add_argument(
"--donotload",
action="store_true",
help="do not stream data - used for debugging source extraction")
common_args_parser.add_argument(
"--donotsend",
action="store_true",
help="do not stream data - used for debugging source extraction")
common_args_parser.add_argument(
"--sourcesystem",
nargs='?',
help="source system identifier (code)")
common_args_parser.add_argument(
"--kill",
action="store_true",
default=False,
help=("Insert a poison pill down into kafka topic as a signal for "
"consumers to exit gracefully"))
common_args_parser.add_argument(
"--auditcommitpoint",
type=commitpoint_type,
default=1000,
help=("when looping over a large number of records, "
"this will log audit updates every given number of records"))
common_args_parser.add_argument(
"--arraysize",
nargs='?',
type=int,
default=const.DEFAULT_ARRAYSIZE,
help=("this read-write attribute specifies the number of rows to "
"fetch at a time internally and is the default number of rows "
"to fetch with the fetchmany() call. Note this attribute can "
"drastically affect the performance of a query since it "
"directly affects the number of network round trips that need "
"to be performed"))
common_args_parser.add_argument(
"--notifysmtpserver",
nargs='?',
default="localhost",
help="hostname of the smtp server for sending emails")
common_args_parser.add_argument(
"--notifysender",
nargs='?',
default="data_pipleine@example.com",
help="sender email address used when sending notification emails")
common_args_parser.add_argument(
"--notifyerrorlist",
nargs='+',
default=[],
help=("space-separated list of recipient email addresses who will "
"receive notifications upon an error in the application"))
common_args_parser.add_argument(
"--notifysummarylist",
nargs='+',
default=[],
help=("space-separated list of recipient email addresses who will "
"receive notifications of summary details and statistics upon "
"completion of a run"))
common_args_parser.add_argument(
"--sslmode",
choices=[
const.SSLMODE_DISABLE,
const.SSLMODE_ALLOW,
const.SSLMODE_PREFER,
const.SSLMODE_REQUIRE,
const.SSLMODE_VERIFY_CA,
const.SSLMODE_VERIFY_FULL,
],
default=const.SSLMODE_PREFER,
nargs='?',
help=("use this sslmode for the database connection"))
common_args_parser.add_argument(
"--sslcert",
nargs='?',
default=("{home}/.postgresql/postgresql.crt"
.format(home=os.environ['HOME'])),
help=("this parameter specifies the file name of the client SSL "
"certificate"))
common_args_parser.add_argument(
"--sslrootcert",
nargs='?',
default=("{home}/.postgresql/root.crt"
.format(home=os.environ['HOME'])),
help=("this parameter specifies the name of a file containing SSL "
"certificate authority (CA) certificate(s)"))
common_args_parser.add_argument(
"--sslkey",
nargs='?',
default=("{home}/.postgresql/postgresql.key"
.format(home=os.environ['HOME'])),
help=("this parameter specifies the location for the secret key used "
"for the client certificate"))
common_args_parser.add_argument(
"--sslcrl",
nargs='?',
default=("{home}/.postgresql/root.crl"
.format(home=os.environ['HOME'])),
help=("this parameter specifies the file name of the SSL certificate "
"revocation list (CRL). Certificates listed in this file, if it "
"exists, will be rejected while attempting to authenticate the "
"server's certificate"))
if _is_initsync(mode):
initsync_args_parser = configargparse.ArgumentParser(
prog=const.INITSYNC,
add_help=True,
parents=[common_args_parser])
initsync_args_parser.add_argument(
"--sourceuser",
nargs='?',
help=("source database user credentials in the form: "
"dbuser/dbpasswd@SRCSID[:PORT]"))
initsync_args_parser.add_argument(
"--sourceschema",
nargs='?',
help="schema name where source tables reside")
initsync_args_parser.add_argument(
"--sourceformat",
nargs='?',
default=const.TEXT,
choices=[const.CSV, const.TEXT],
help="format type of extracted data")
initsync_args_parser.add_argument(
"--loaddefinition",
nargs='?',
default=const.SRC,
choices=[const.SRC, const.DEST],
help="sql file or dictionary where ddl definition is extracted")
initsync_args_parser.add_argument(
"--parallelmode",
nargs='?',
type=int,
default=2,
help=("execute extraction query in parallel mode if supported by "
"database server"))
initsync_args_parser.add_argument(
"--querycondition",
nargs='+',
help="extra sql condition added to extraction query")
initsync_args_parser.add_argument(
"--sourcerowcount",
action="store_true",
help="get source row count")
initsync_args_parser.add_argument(
"--removectrlchars",
nargs='?',
help="remove control characters from extracted data")
initsync_args_parser.add_argument(
"--lock",
action="store_true",
help="execute extraction query without share lock")
initsync_args_parser.add_argument(
"--directunload",
action="store_true",
help=("use a direct unload utility to extract data from source - "
"mssql: bcp"))
initsync_args_parser.add_argument(
"--inputfile",
nargs='?',
help="full path to the stream data file - bypasses stream polling")
initsync_args_parser.add_argument(
"--targetdbtype",
nargs='?',
choices=[const.POSTGRES, const.GREENPLUM, const.JSON],
help="")
initsync_args_parser.add_argument(
"--targetuser",
nargs='?',
help=("target database user credentials in the form: "
"dbuser/dbpasswd@SRCSID[:PORT]"))
initsync_args_parser.add_argument(
"--targetschema",
nargs='?',
help="schema name where target tables reside")
initsync_args_parser.add_argument(
"--datatypemap",
nargs='?',
help=("full path to the yaml config file containing the source->"
"target data type mappings"))
initsync_args_parser.add_argument(
"--numprocesses",
nargs='?',
type=int,
default=multiprocessing.cpu_count(),
help=("pool size of available processes for executing initsync. "
"A process will be dedicated for each table being synced"))
initsync_args_parser.add_argument(
"--buffersize",
nargs='?',
type=int,
default=8192,
help=("Size, in bytes, of the buffer used to read data into "
"before flushing to target"))
initsync_args_parser.add_argument(
"--extracttimeout",
nargs='?',
default=None,
help=("Seconds to wait before timing out. By default, "
"initsync will wait indefinitely for extractor"))
initsync_args_parser.add_argument(
"--delete",
action="store_true",
help="Delete table records on target prior to initsync")
initsync_args_parser.add_argument(
"--truncate",
action="store_true",
help="Truncate tables on target prior to initsync")
initsync_args_parser.add_argument(
"--metacols",
type=json.loads,
default="{}",
help=("enable metadata columns to be populated on target. "
"The format must be in json format (no spaces) of "
"supported metadata columns paired with their respective "
"target metadata column names. For example: "
"--metacols '{\"insert_timestamp_column\":\"ctl_ins_ts\","
"\"update_timestamp_column\":\"ctl_upd_ts\"}'. Supported metadata "
"columns are: "
"[insert_timestamp_column, update_timestamp_column]"))
initsync_args_parser.add_argument(
"--vacuum",
action="store_true",
help=("executes a VACCUM FULL on target DB "
"per table after successful apply"))
initsync_args_parser.add_argument(
"--analyze",
action="store_true",
help="executes an ANALYZE on target DB after successful apply")
initsync_args_parser.add_argument(
"--consumertimeout",
nargs='?',
default=const.KAFKA_DEFAULT_POLL_TIMEOUT,
help=("Time to wait in blocking poll state while consuming "
"to end of queue at the end of an initsync"))
initsync_args_parser.add_argument(
"--extractlsn",
action="store_true",
help=("enables capturing of the source lsn at the point of "
"extract. Note, this will only be used on source "
"databases that support the concept of an "
"LSN/SCN/transactionid"))
initsync_args_parser.add_argument(
"--nullstring",
nargs='?',
default=const.NULL,
help="the string used to identify a NULL value")
parsed_args = initsync_args_parser.parse_args(arg_list)
initsync_args_parser.print_values()
elif _is_extract(mode):
extract_args_parser = configargparse.ArgumentParser(
prog=const.CDCEXTRACT,
add_help=True,
parents=[common_args_parser])
extract_args_parser.add_argument(
"--startscn",
nargs='?',
help="start scn or lsn for cdc transaction search")
extract_args_parser.add_argument(
"--endscn",
nargs='?',
help="end scn or lsn for cdc transaction search")
extract_args_parser.add_argument(
"--starttime",
nargs='?',
help="start time for cdc transaction search")
extract_args_parser.add_argument(
"--endtime",
nargs='?',
help="end time or lsn cdc for transaction search")
extract_args_parser.add_argument(
"--sourceschema",
nargs='?',
help="schema name where source tables reside")
extract_args_parser.add_argument(
"--sourcehost",
nargs='?',
help=("source server name or ip address containing application "
"database"))
extract_args_parser.add_argument(
"--sourceuser",
nargs='?',
help=("source database user credentials in the form: "
"dbuser/dbpasswd@SRCSID[:PORT]"))
extract_args_parser.add_argument(
"--scanlogs",
nargs='?',
type=int,
help="number of archived logs to scan")
extract_args_parser.add_argument(
"--sourcedictionary",
nargs='?',
choices=[const.ONLINE_DICT, const.REDOLOG_DICT],
default=const.ONLINE_DICT,
help="source dictionary to use when running logminer")
extract_args_parser.add_argument(
"--checkexistingextract",
action="store_true",
help=("Enables checking of existing extracts with the same "
"profile name and version which are currently in progress. "
"Use this to prevent multiple extract process from running "
"concurrently."))
extract_args_parser.add_argument(
"--extractnewtables",
action="store_true",
help="Enables extraction of new create table DDLs")
parsed_args = extract_args_parser.parse_args(arg_list)
extract_args_parser.print_values()
elif _is_apply(mode):
applier_args_parser = configargparse.ArgumentParser(
prog=const.CDCAPPLY,
add_help=True,
parents=[common_args_parser])
applier_args_parser.add_argument(
"--targetdbtype",
nargs='?',
choices=[const.POSTGRES, const.GREENPLUM, const.JSON],
help="")
applier_args_parser.add_argument(
"--targethost",
nargs='?',
help=("target server name or ip address containing destination "
"database"))
applier_args_parser.add_argument(
"--targetuser",
nargs='?',
help=("target database user credentials in the form: "
"dbuser/dbpasswd@SRCSID[:PORT]"))
applier_args_parser.add_argument(
"--targetschema",
nargs='?',
help="schema name where target tables reside")
applier_args_parser.add_argument(
"--bulkapply",
action="store_true",
help=("Enables bulk applying of DML statements for improved "
"performance"))
applier_args_parser.add_argument(
"--bulkinsertlimit",
nargs='?',
type=int,
default=50,
help="max number of rows within a bulk insert")
applier_args_parser.add_argument(
"--insertnull",
action="store_true",
help="blank and empty strings are loaded as NULL values")
applier_args_parser.add_argument(
"--generatestatistics",
action="store_true",
help="generate table statistics after load")
applier_args_parser.add_argument(
"--donotapply",
action="store_true",
help=("do not apply parsed sql statments to target database - "
"for testing purposes"))
applier_args_parser.add_argument(
"--donotcommit",
action="store_true",
help=("do not commit sql statements to target database - for "
"testing purposes"))
applier_args_parser.add_argument(
"--inputfile",
nargs='?',
help=("full path to the stream data file - for testing purposes, "
"bypasses stream polling"))
applier_args_parser.add_argument(
"--datatypemap",
nargs='?',
help=("full path to the yaml config file containing the "
"source->target data type mappings"))
applier_args_parser.add_argument(
"--skipbatch",
nargs='?',
default=0,
help=("skips the given number of batches without processing, "
"while still committing the offset"))
applier_args_parser.add_argument(
"--targetcommitpoint",
type=commitpoint_type,
default=1000,
help=("when looping over a large number of records, this will "
"cause a commit of executed transactions on target "
"every given number of records"))
applier_args_parser.add_argument(
"--seektoend",
action="store_true",
help="Seek to the end of the kafka queue")
applier_args_parser.add_argument(
"--metacols",
type=json.loads,
default="{}",
help=("enable metadata columns to be populated on target. "
"The format must be in json format (no spaces) of "
"supported metadata columns paired with their respective "
"target metadata column names. For example: "
"--metacols '{\"insert_timestamp_column\":\"ctl_ins_ts\","
"\"update_timestamp_column\":\"ctl_upd_ts\"}'. Supported metadata "
"columns are: "
"[insert_timestamp_column, update_timestamp_column]"))
applier_args_parser.add_argument(
"--retry",
nargs='?',
type=int,
default=0,
help=("When the applier exits due to an ERROR (environmental, "
"data etc), prior to exiting - retry execution from last "
"statement (the statement where error was detected) this "
"many times prior to exiting"))
applier_args_parser.add_argument(
"--retrypause",
nargs='?',
default=5,
type=int,
help="Pauses this number of seconds prior to retrying")
parsed_args = applier_args_parser.parse_args(arg_list)
applier_args_parser.print_values()
else:
parsed_args = None
parsed_args = prefix_workdirectory_to_file_args(parsed_args)
return parsed_args
def prefix_workdirectory_to_file_args(parsed_args):
parsed_args = join_dir_and_file(parsed_args,
parsed_args.workdirectory,
'outputfile')
parsed_args = join_dir_and_file(parsed_args,
parsed_args.workdirectory,
'rawfile')
return parsed_args
def join_dir_and_file(parsed_args, directory, file_attribute_name):
filename_arg = getattr(parsed_args, file_attribute_name)
if filename_arg:
filename = os.path.join(directory, filename_arg)
filesystem_utils.ensure_path_exists(filename)
setattr(parsed_args, file_attribute_name, filename)
return parsed_args
def get_program_args(mode):
argv = parse_args(sys.argv[1:], mode)
pp = PrettyPrinter()
print("Program input arguments:")
print("========================")
print(pp.pprint(argv.__dict__))
return argv
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
self.result = []
self.buildOrder(root, 0)
return self.result
def buildOrder(self, root, depth):
if not root:
return None
if len(self.result) == depth:
self.result.append([])
self.result[depth].append(root.val)
self.buildOrder(root.left, depth+1)
self.buildOrder(root.right, depth+1)
|
# -*- coding: utf-8 -*-
import json
import os
import random
import sys
sys.path.append('../')
from config import base_dir
data_dir = os.path.join(base_dir, 'data')
src = 'jieba'
target = 'fasttext'
process_name = ['train', 'valid', 'test']
def function():
for name in process_name:
content =open(os.path.join(data_dir, 'data_{}_jieba.json'.format(name)))
lables = open(os.path.join(data_dir, 'data_{}_content_label.json'.format(name)))
targetfile = open(os.path.join(data_dir, 'data_{}_{}.txt'.format(name, target)), 'w')
for x_cut, label in zip(content, lables):
x_cut = json.loads(x_cut)
x_cut[-1] = x_cut[-1][:-1]
label = json.loads(label)
targetfile.write('{} __label__{}\n'.format(' '.join(x_cut), ' __label__'.join(label)))
content.close()
lables.close()
targetfile.close()
if __name__ == "__main__":
function()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Git grep wrapper for arguments re-ordering, that can use options after filenames
#
# Usage: git-gerp [<git-grep-argument>...]
#
# Copyright (c) 2018 htaketani <h.taketani@gmail.com>
# This software is released under the MIT License.
import sys
import re
import subprocess
# git grep command
COMMAND = ['git', 'grep']
# git grep options that require a parameter
OPTS_WITH_PARAM = ['--max-depth', '--open-files-in-pager', '--context', '--after-context', '--before-context', '--threads', '-O', '-C', '-A', '-B', '-f', '-e']
def is_double_dash(token):
return token == '--'
def is_opt(token):
return is_long_opt(token) or is_short_opt(token) or is_group_opt(token)
def is_long_opt(token):
return re.match(r'^--.+', token)
def is_short_opt(token):
return re.match(r'^-\w+$', token)
def is_group_opt(token):
return token in ['(', ')']
def is_long_opt_without_param(token):
return is_long_opt(token) and ('=' not in token)
def tail_short_opt(token):
return '-' + token[-1]
def requires_param(token):
opt = token if is_long_opt_without_param(token) else tail_short_opt(token) if is_short_opt(token) else None
return opt in OPTS_WITH_PARAM
def replace_args(args):
args = args[:]
opt_args = [] # option (and parameter) args
plain_args = [] # non-option args
while len(args):
token = args.pop(0)
if is_double_dash(token):
plain_args.append(token)
plain_args.extend(args)
break;
elif is_opt(token):
opt_args.append(token)
if requires_param(token) and len(args):
opt_args.append(args.pop(0))
else:
plain_args.append(token)
return opt_args + plain_args
def git_gerp(args):
replaced_args = replace_args(args)
return subprocess.call(COMMAND + replaced_args)
def main():
args = sys.argv[1:]
rc = git_gerp(args)
sys.exit(rc)
if __name__ == '__main__':
main()
|
from .getDepends import getDepends
def importLib():
"""Load python dependent libraries dynamically"""
libList = getDepends()
from pip._internal import main as pip_main
import importlib
def install(package):
pip_main(['install', package])
createVar = locals()
for lib in libList:
print(lib)
try:
createVar[lib["name"]] = importlib.import_module(lib["name"])
except Exception as e:
try:
install(f'{lib["name"]}=={lib["version"]}')
createVar[lib["name"]] = importlib.import_module(lib["name"])
except Exception as e:
print(e)
|
# Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
class Mock(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class MockConfig(Mock):
def __init__(self):
self.tweens = []
def add_tween(self, mpath):
self.tweens.append(mpath)
class MockHandler(object):
def __init__(self, content_type=None):
self.headers = {}
self.content_type = content_type
def __call__(self, request):
return Mock(content_type=self.content_type,
headers=self.headers)
class MockRequest(object):
def __init__(self):
self.environ = {}
class TestSetup(unittest.TestCase):
def test_it(self):
from eucaconsole.tweens import setup_tweens
config = MockConfig()
self.assertTrue(len(config.tweens) == 0)
setup_tweens(config)
self.assertTrue(len(config.tweens) > 1)
class TestCTHeaders(unittest.TestCase):
def test_factory(self):
from eucaconsole.tweens import \
CTHeadersTweenFactory as factory
tween = factory(None, None)
self.assertTrue(callable(tween))
def test_tween(self):
from eucaconsole.tweens import \
CTHeadersTweenFactory as factory
tween = factory(MockHandler('image/jpeg'), None)
res = tween(None)
for name, value in factory.header_map['text/html'].items():
# make sure html resources *are* getting header
self.assertFalse(name in res.headers)
tween = factory(MockHandler('text/html'), None)
res = tween(None)
for name, value in factory.header_map['text/html'].items():
# make sure html resources *are* getting header
self.assertTrue(name in res.headers)
self.assertTrue(res.headers[name] == value)
class TestHTTPSTween(unittest.TestCase):
def test_it(self):
from eucaconsole.tweens import \
https_tween_factory as factory
tween = factory(MockHandler(), None)
request = Mock(scheme=None, environ={})
tween(request)
self.assertTrue(request.scheme is None)
request = Mock(scheme=None,
environ={'HTTP_X_FORWARDED_PROTO': 'https'})
tween(request)
self.assertEqual(request.scheme, 'https')
class TestRequestIDTween(unittest.TestCase):
def test_it(self):
from eucaconsole.tweens import request_id_tween_factory as factory
tween = factory(MockHandler(), None)
request = Mock(id=None)
request.session = dict(account='foo')
tween(request)
self.assertFalse(request.id is None)
|
from auth0_client.commands import administration, tickets, tenants, stats, jobs, guardian, emails, email_templates, blacklists, users_by_email, users, user_blocks, rules_config, rules, resource_servers, logs, client_grants,clients, connections, custom_domains, device_credentials, grants
|
"""Integration tests for Constellix"""
from unittest import TestCase
from lexicon.tests.providers.integration_tests import IntegrationTestsV2
# Constellix does not currently have a sandbox and they enforce domain
# uniqueness across the service. You'll need your own production credentials
# and a unique domain name if you want to run these tests natively.
class ConstellixProviderTests(TestCase, IntegrationTestsV2):
"""TestCase for Constellix"""
provider_name = "constellix"
domain = "example.org"
def _filter_headers(self):
return ["x-cnsdns-apiKey", "x-cnsdns-hmac", "x-cnsdns-requestDate"]
|
# Collected error samples:
# {'errorMessage': 'Pair(s) not found', 'event': 'subscriptionStatus', 'status': 'error'}
# {'errorMessage': ['Unknown field.']}
# => currently handled in subscription status schema
|
"""Top-level package for zillionare-backtest."""
__author__ = """Aaron Yang"""
__email__ = "aaron_yang@jieyu.ai"
|
import iprofile
import math
import numpy as np
import scipy.cluster.hierarchy as sch
import navigate
import matplotlib.pyplot as plt
"""
Reads file rms.matrix to fetch centroid RMSD and Similarity in I(Q) for all centroid pairs.
Finally, compare RMSD and Similarity
"""
# Location of sassena files containing I(Q)
sId = '/SNS/CAMM/users/jbq/development/LDRDSANS/scrkinase/sassena'
cframe = tuple(str(int(l.strip()))
for l in open('centroids.names').readlines()) # trajectory frame for each centroid
ncentroids = len(cframe)
qp_pair = lambda idx: iprofile.profile('{}/frame{}.h5.gz'.format(sId, cframe[idx]))
# Populate rms and sim matrixes
rmsM = np.zeros(ncentroids ** 2).reshape(ncentroids, ncentroids)
simM = np.zeros(ncentroids ** 2).reshape(ncentroids, ncentroids)
counter = 0
for line in open('rms.matrix', 'r').readlines():
id1, id2, rms = list(i for i in line.strip().split())
id1 = int(id1)
id2 = int(id2)
if id1 < 0:
break
rmsM[id1][id2] = float(rms)
rmsM[id2][id1] = rmsM[id1][id2]
q, p1 = qp_pair(id1)
p2 = qp_pair(id2).i
simM[id1][id2] = math.sqrt(iprofile.similarity(p1, p2))
simM[id2][id1] = simM[id1][id2]
counter += 1
print(counter)
# Calculate average rms and average sim between clusters
rmsl = list()
siml = list()
max_cluster_level = 3
Z = np.loadtxt('./centroids.linkage_matrix')
root_node = sch.to_tree(Z)
cad = navigate.clusters_afloat_depth(root_node, depth=max_cluster_level)
for level in range(1, len(cad)):
clusters = cad[level] # list of clusters at a particular level from the root node
nclusters = len(clusters)
for i in range(nclusters-1):
for j in range(i+1, nclusters):
rms = 0.0
sim = 0.0
ileafs = [leaf.id for leaf in navigate.leafs_under_node(clusters[i])]
jleafs = [leaf.id for leaf in navigate.leafs_under_node(clusters[j])]
for il in ileafs:
for jl in jleafs:
rms += rmsM[il][jl]
sim += simM[il][jl]
rmsl.append(rms/(len(ileafs)*len(jleafs)))
siml.append(sim/(len(ileafs)*len(jleafs)))
np.savetxt('correlate_avrms_avI.dat',np.array([rmsl,siml]).transpose())
plt.scatter(rmsl, siml)
plt.show()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo that runs hand tracking and object detection on camera frames using OpenCV. 2 EDGETPU
"""
import argparse
import collections
import common
import cv2
import numpy as np
import os
import math
from PIL import Image
import re
from edgetpu.detection.engine import DetectionEngine
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import time
import svgwrite
import gstreamer
from pose_engine import PoseEngine
import tflite_runtime.interpreter as tflite
Object = collections.namedtuple('Object', ['id', 'score', 'bbox'])
#==============================
EDGES = (
('nose', 'left eye'),
('nose', 'right eye'),
('nose', 'left ear'),
('nose', 'right ear'),
('left ear', 'left eye'),
('right ear', 'right eye'),
('left eye', 'right eye'),
('left shoulder', 'right shoulder'),
('left shoulder', 'left elbow'),
('left shoulder', 'left hip'),
('right shoulder', 'right elbow'),
('right shoulder', 'right hip'),
('left elbow', 'left wrist'),
('right elbow', 'right wrist'),
('left hip', 'right hip'),
('left hip', 'left knee'),
('right hip', 'right knee'),
('left knee', 'left ankle'),
('right knee', 'right ankle'),
)
HEADCHECK = ('nose', 'left eye','right eye' ,'left ear', 'right ear')
SHOULDERCHECK = ('left shoulder', 'right shoulder')
HIPCHECK = ('left hip','right hip')
KNEECHECK = ('left knee','right knee')
ANKLECHECK = ('left ankle','right ankle')
def shadow_text(cv2_im, x, y, text, font_size=16):
cv2_im = cv2.putText(cv2_im, text, (x + 1, y + 1),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
#dwg.add(dwg.text(text, insert=, fill='black',
# font_size=font_size, style='font-family:sans-serif'))
#dwg.add(dwg.text(text, insert=(x, y), fill='white',
# font_size=font_size, style='font-family:sans-serif'))
def draw_pose(cv2_im, cv2_sodidi, pose, numobject, src_size, color='yellow', threshold=0.2):
box_x = 0
box_y = 0
box_w = 641
box_h = 480
scale_x, scale_y = src_size[0] / box_w, src_size[1] / box_h
xys = {}
#==bien dung de tinh khoang cach giua cac bo phan trong co the ============
pts_sodidi = []
headarea={}
shoulderarea={}
elbow={}
lengbackbone=60
lengleg= 86
lengface = 30
#=======================================================
for label, keypoint in pose.keypoints.items():
if keypoint.score < threshold: continue
# Offset and scale to source coordinate space.
kp_y = int((keypoint.yx[0] - box_y) * scale_y)
kp_x = int((keypoint.yx[1] - box_x) * scale_x)
cv2_im = cv2.putText(cv2_im, str(numobject),(kp_x + 1, kp_y + 1), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
xys[label] = (numobject,kp_x, kp_y)
cv2.circle(cv2_im,(int(kp_x),int(kp_y)),5,(0,255,255),-1)
return xys
'''
for a, b in EDGES:
if a not in xys or b not in xys: continue
anum,ax, ay = xys[a]
bnum,bx, by = xys[b]
print(numobject,a,xys[a],b,xys[b])
cv2.line(cv2_im,(ax, ay), (bx, by),(0,255,255))
'''
def mapcamto2dplane(pts_in):
# provide points from image 1
pts_src = np.array([[154, 174], [702, 349], [702, 572],[1, 572], [1, 191]])
# corresponding points from image 2 (i.e. (154, 174) matches (212, 80))
pts_dst = np.array([[154, 174], [702, 349], [702, 572],[1, 572], [1, 191]])#np.array([[212, 80],[489, 80],[505, 180],[367, 235], [144,153]])
# calculate matrix H
h, status = cv2.findHomography(pts_src, pts_dst)
# provide a point you wish to map from image 1 to image 2
#pts_in = np.array([[154, 174]], dtype='float32')
#pts_in = np.array([pts_in])
# finally, get the mapping
pointsOut = cv2.perspectiveTransform(pts_in, h)
pointsOut = np.array([pointsOut])
point_out = [b for a in pointsOut for b in a]
return point_out
def check_distance(x1,y1,x2,y2):
dist = math.sqrt((x2-x1)**2 + (y2-y1)**2)
return dist
def avg_fps_counter(window_size):
window = collections.deque(maxlen=window_size)
prev = time.monotonic()
yield 0.0 # First fps value.
while True:
curr = time.monotonic()
window.append(curr - prev)
prev = curr
yield len(window) / sum(window)
#==============================
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])):
"""Bounding box.
Represents a rectangle which sides are either vertical or horizontal, parallel
to the x or y axis.
"""
__slots__ = ()
def get_output(interpreter, score_threshold, top_k, image_scale=1.0):
"""Returns list of detected objects."""
boxes = common.output_tensor(interpreter, 0)
class_ids = common.output_tensor(interpreter, 1)
scores = common.output_tensor(interpreter, 2)
count = int(common.output_tensor(interpreter, 3))
def make(i):
ymin, xmin, ymax, xmax = boxes[i]
return Object(
id=int(class_ids[i]),
score=scores[i],
bbox=BBox(xmin=np.maximum(0.0, xmin),
ymin=np.maximum(0.0, ymin),
xmax=np.minimum(1.0, xmax),
ymax=np.minimum(1.0, ymax)))
return [make(i) for i in range(top_k) if scores[i] >= score_threshold]
def main():
default_model_dir = '../all_models'
default_model = 'posenet/posenet_mobilenet_v1_075_481_641_quant_decoder_edgetpu.tflite'
default_labels = 'hand_label.txt'
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='.tflite model path',
default=os.path.join(default_model_dir,default_model))
parser.add_argument('--labels', help='label file path',
default=os.path.join(default_model_dir, default_labels))
parser.add_argument('--top_k', type=int, default=1,
help='number of categories with highest score to display')
parser.add_argument('--camera_idx', type=str, help='Index of which video source to use. ', default = 0)
parser.add_argument('--threshold', type=float, default=0.5,
help='classifier score threshold')
args = parser.parse_args()
#print('Loading Handtracking model {} with {} labels.'.format(args.model, args.labels))
#engine = DetectionEngine(args.model)
#labels = load_labels(args.labels)
#=====================================================================
src_size = (640, 480)
print('Loading Pose model {}'.format(args.model))
engine = PoseEngine(args.model)
#engine = PoseEngine('models/mobilenet/posenet_mobilenet_v1_075_481_641_quant_decoder_edgetpu.tflite')
#=====================================================================
# for detection
print('Loading Detection model {} with {} labels.'.format('../all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite', '../all_models/coco_labels.txt'))
#interpreter2 = common.make_interpreter('../all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
#interpreter2.allocate_tensors()
engine2 = DetectionEngine('../all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
labels2 = load_labels('../all_models/coco_labels.txt')
cap = cv2.VideoCapture(args.camera_idx)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
#declare new window for show pose in 2d plane========================
h_cap, w_cap, _ = cv2_im.shape
cv2_sodidi = np.zeros((h_cap,w_cap,3), np.uint8)
#======================================pose processing=================================
poses, inference_time = engine.DetectPosesInImage(np.uint8(pil_im.resize((641, 481), Image.NEAREST)))
#print('Posese is',poses)
n = 0
sum_process_time = 0
sum_inference_time = 0
ctr = 0
fps_counter = avg_fps_counter(30)
input_shape = engine.get_input_tensor_shape()
inference_size = (input_shape[2], input_shape[1])
#print('Shape is',input_shape)
#print('inference size is:',inference_size)
start_time = time.monotonic()
end_time = time.monotonic()
n += 1
sum_process_time += 1000 * (end_time - start_time)
sum_inference_time += inference_time
avg_inference_time = sum_inference_time / n
text_line = 'PoseNet: %.1fms (%.2f fps) TrueFPS: %.2f' % (
avg_inference_time, 1000 / avg_inference_time, next(fps_counter)
)
shadow_text(cv2_im, 10, 20, text_line)
numobject = 0
xys={}
pts_sodidi_arr=[]
pts_xys_arr=[]
listwarning=[]
#draw_pose(cv2_im, poses, dis, src_size)
for pose in poses:
for i in range(len(poses)-1):
pose = poses[i]
for label, keypoint in pose.keypoints.items():
#print(label)
#print(keypoint)
if keypoint.score < 0.2: continue
if (label=='nose'):
print('yx0,',keypoint.yx)
for j in range(len(poses)):
pose1 = poses[j]
#print(pose.keypoints.items())
for label, keypoint in pose1.keypoints.items():
if keypoint.score < 0.2: continue
if (label=='nose'):
print('yx1,',keypoint.yx)
'''
pts_sodidi, xys = draw_pose(cv2_im,cv2_sodidi, pose, numobject, src_size)
#print(pts_sodidi)
pts_sodidi_arr.append(pts_sodidi)
pts_xys_arr.append(xys)
numobject += 1
#print('len coor_av',coor_ave)
#print(xys,coor_ave)kghkkgkgkgerg.hbjbbsbdbs
pts_sodidi_arr = np.array([pts_sodidi_arr])
v2 = [b for a in pts_sodidi_arr for b in a]
print(v2)
print(xys)
print(numobject)
#leng = coor_ave.length
#print(leng)
#for a in pts_sodidi_arr:
# for b in a:
# print(b[0])
for i in range(0,len(v2)):
a,x1,y1 = v2[i]
for j in range(1,len(v2)):
if i == j:
break
b,x2,y2 = v2[j]
distance = check_distance(x1,y1,x2,y2)
print('distance',distance)
if distance > 100:
cv2_sodidi = cv2.circle(cv2_sodidi,(int(x1),int(y1)),5,(0,0,255),-1)
cv2_sodidi = cv2.circle(cv2_sodidi,(int(x2),int(y2)),5,(0,0,255),-1)
listwarning.append(i)
listwarning.append(j)
else:
cv2_sodidi = cv2.circle(cv2_sodidi,(int(x1),int(y1)),5,(255,0,0),-1)
cv2_sodidi = cv2.circle(cv2_sodidi,(int(x2),int(y2)),5,(255,0,0),-1)
print('listwarning',listwarning)
for a, b in EDGES:
if a not in xys or b not in xys: continue
num,ax, ay = xys[a]
num,bx, by = xys[b]
if num in listwarning:
#print(numobject,a,xys[a],b,xys[b])
cv2.line(cv2_im,(ax, ay), (bx, by),(0,0,255))
else:
cv2.line(cv2_im,(ax, ay), (bx, by),(255,0,0))
#==============================================================================================
#cv2_im = append_objs_to_img(cv2_im, objs, labels)
# detection
#common.set_input(interpreter2, pil_im)
#interpreter2.invoke()
#objs = get_output(interpreter2, score_threshold=0.2, top_k=3)
objs = engine2.detect_with_image(pil_im,
threshold=0.2,
keep_aspect_ratio=True,
relative_coord=True,
top_k=3)
cv2_im = append_objs_to_img(cv2_im, objs, labels2)
cv2.imshow('frame', cv2_im)
cv2.imshow('1', cv2_sodidi)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def append_objs_to_img(cv2_im, objs, labels):
height, width, channels = cv2_im.shape
for obj in objs:
x0, y0, x1, y1 = obj.bounding_box.flatten().tolist() #list(obj.bbox)
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
percent = int(100 * obj.score)
label = '{}% {}'.format(percent, labels.get(obj.label_id, obj.label_id))
if(labels.get(obj.label_id, obj.label_id)=='person'):
cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0), 2)
cv2_im = cv2.putText(cv2_im, label, (x0, y0+30),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
return cv2_im
'''
height, width, channels = cv2_im.shape
boxes_ob = []
confidences = []
classIDs = []
for obj in objs:
x0, y0, x1, y1 = obj.bounding_box.flatten().tolist() #list(obj.bbox)
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
percent = int(100 * obj.score)
confidence = obj.score
label = '{}% {}'.format(percent, labels.get(obj.label_id, obj.label_id))
if(labels.get(obj.label_id, obj.label_id)=='person'):
classIDs.append(0)
confidences.append(float(confidence))
boxes_ob.append([x0, y0, x1,y1])
#cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0), 2)
#cv2_im = cv2.putText(cv2_im, label, (x0, y0+30),
# cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
idxs = cv2.dnn.NMSBoxes(boxes_ob, confidences, 0.5,0.3)
#print('idxs',idxs)
#print('classID',classIDs)
ind = []
for i in range(0,len(classIDs)):
if(classIDs[i]==0):
ind.append(i)
a = []
b = []
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes_ob[i][0], boxes_ob[i][1])
(w, h) = (boxes_ob[i][2], boxes_ob[i][3])
a.append(x)
b.append(y)
distance=[]
nsd = []
for i in range(0,len(a)-1):
for k in range(1,len(a)):
if(k==i):
break
else:
x_dist = (a[k] - a[i])
y_dist = (b[k] - b[i])
d = math.sqrt(x_dist * x_dist + y_dist * y_dist)
distance.append(d)
if(d <=1000):
nsd.append(i)
nsd.append(k)
nsd = list(dict.fromkeys(nsd))
print(nsd)
color = (0, 0, 255)
for i in nsd:
(x, y) = (boxes_ob[i][0], boxes_ob[i][1])
(w, h) = (boxes_ob[i][2], boxes_ob[i][3])
cv2_im=cv2.rectangle(cv2_im, (x, y), (x + w, y + h), color, 2)
text = "Alert"
cv2_im=cv2.putText(cv2_im, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
color = (0, 255, 0)
if len(idxs) > 0:
for i in idxs.flatten():
if (i in nsd):
break
else:
(x, y) = (boxes_ob[i][0], boxes_ob[i][1])
(w, h) = (boxes_ob[i][2], boxes_ob[i][3])
cv2_im=cv2.rectangle(cv2_im, (x, y), (x + w, y + h), color, 2)
text = 'OK'
cv2_im=cv2.putText(cv2_im, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
#cv2.imshow("Social Distancing Detector", image)
return cv2_im
'''
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import os
import django
def get_all_subjects():
"""
Get all subjects with more than one period and annonate with active_period_objects
"""
from devilry.apps.core.models import Subject
subject_queryset = Subject.objects.all().prefetch_active_period_objects()
return subject_queryset
def get_all_assignments_in_period(period):
"""
Fetch all assignments for a given period
"""
return period.assignments.all()
def get_assignments_with_matching_shortname_in_previous_periods(subject, assignment):
"""
Fetch the assignments in earlier periods with same subject that have equal matching shortname
E.g. oblig1 - 2019 matches oblig1 - 2020
"""
from devilry.apps.core.models import Assignment
short_name = assignment.short_name
assignments = Assignment.objects.none()
for period in subject.periods.exclude(id=assignment.period.id):
assignments = assignments.union(period.assignments.filter(short_name=short_name))
return assignments
def print_assignment_statistics(assignment):
"""
Fetch the statistics from the given assignments
- Assignment name
- The total number of assignment groups within the given assignment
- The total number of first attempts given within all feedbackset across all groups within assignment
- The total number of first attempts with no deliveries within all feedbackset across all groups within assignment
- The total number of first attempts with deliveries within all feedbackset across all groups within assignment
- The total number of new attempts given within all feedbackset across all groups within assignment
- The total number of new attempts with no deliveries within all feedbackset across all groups within assignment
- The total number of new attempts with deliveries within all feedbackset across all groups within assignment
- The total number of moved deadlines with no deliveries in feedbackset across all groups within assignment
"""
from devilry.devilry_group.models import (FeedbackSet,
FeedbackSetDeadlineHistory,
GroupComment)
from django.db.models import F
statistics = {
'assignment': assignment,
'number_of_groups': assignment.assignmentgroups.all().count(),
'number_of_firstattempts': 0,
'number_of_newattempts': 0,
'number_of_firstattempts_with_no_delivery': 0,
'number_of_newattempts_with_no_delivery': 0,
'number_of_firstattempts_with_deliveries': 0,
'number_of_newattempts_with_deliveries': 0,
'number_of_moved_deadlines_with_no_delivery': 0
}
for group in assignment.assignmentgroups.all():
feedbackset_queryset = FeedbackSet.objects\
.filter(group=group)
for feedbackset in feedbackset_queryset:
number_of_group_comment_from_student = GroupComment.objects\
.filter(user_role=GroupComment.USER_ROLE_STUDENT)\
.filter(feedback_set=feedbackset).count()
if feedbackset.feedbackset_type == FeedbackSet.FEEDBACKSET_TYPE_NEW_ATTEMPT:
statistics['number_of_newattempts'] += 1
if number_of_group_comment_from_student == 0:
statistics['number_of_newattempts_with_no_delivery'] += 1
else:
statistics['number_of_newattempts_with_deliveries'] += 1
elif feedbackset.feedbackset_type == FeedbackSet.FEEDBACKSET_TYPE_FIRST_ATTEMPT:
statistics['number_of_firstattempts'] += 1
if number_of_group_comment_from_student == 0:
statistics['number_of_firstattempts_with_no_delivery'] += 1
else:
statistics['number_of_firstattempts_with_deliveries'] += 1
feedbackset_deadline_edit_count = FeedbackSetDeadlineHistory.objects.filter(
feedback_set=feedbackset).filter(deadline_old__lt=F('deadline_new')).count()
statistics['number_of_moved_deadlines_with_no_delivery'] += feedbackset_deadline_edit_count
print("{assignment},{number_of_groups},{number_of_firstattempts},{number_of_newattempts},{number_of_firstattempts_with_no_delivery},{number_of_newattempts_with_no_delivery},{number_of_firstattempts_with_deliveries},{number_of_newattempts_with_deliveries},{number_of_moved_deadlines_with_no_delivery}".format(
**statistics))
return statistics
if __name__ == "__main__":
# For development:
os.environ.setdefault('DJANGOENV', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devilry.project.settingsproxy")
django.setup()
# For production: Specify python path to your settings file here
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'devilry_settings')
# django.setup()
# Get number of deliveries
subject_set = get_all_subjects()
print("'assignment', 'number_of_groups', 'number_of_firstattempts', 'number_of_newattempts', 'number_of_firstattempts_with_no_delivery', 'number_of_newattempts_with_no_delivery', 'number_of_firstattempts_with_deliveries', 'number_of_newattempts_with_deliveries', 'number_of_moved_deadlines_with_no_delivery'")
for subject in subject_set:
for period in subject.active_period_objects:
for assignment in get_all_assignments_in_period(period):
print_assignment_statistics(assignment)
assignments_with_similar_shortname = get_assignments_with_matching_shortname_in_previous_periods(
subject, assignment)
for assignment_with_similar_shortname in assignments_with_similar_shortname:
print_assignment_statistics(assignment_with_similar_shortname)
|
from torch.nn.parallel import DataParallel, DistributedDataParallel
from rflib.utils import Registry
MODULE_WRAPPERS = Registry('module wrapper')
MODULE_WRAPPERS.register_module(module=DataParallel)
MODULE_WRAPPERS.register_module(module=DistributedDataParallel)
|
"""
Converting simple structured data from XML or JSON into authorityspoke objects.
These functions will usually be called by functions from the io.loaders module
after they import some data from a file.
"""
from typing import Any, NamedTuple
from typing import Dict, List, Optional, Tuple, Sequence, Union
from anchorpoint.textselectors import TextQuoteSelector
from legislice.download import Client
from legislice.enactments import AnchoredEnactmentPassage, RawEnactment
from nettlesome.entities import Entity
from authorityspoke.decisions import Decision, DecisionReading, RawDecision
from authorityspoke.facts import Fact, Exhibit, Evidence, Allegation, Pleading
from authorityspoke.holdings import Holding, RawHolding
from authorityspoke.opinions import (
AnchoredHoldings,
EnactmentWithAnchors,
TermWithAnchors,
HoldingWithAnchors,
)
from authorityspoke.facts import RawFactor
from authorityspoke.io.name_index import index_names, Mentioned, collect_enactments
from authorityspoke.io.text_expansion import expand_shorthand
RawSelector = Union[str, Dict[str, str]]
FACTOR_SUBCLASSES = {
class_obj.__name__: class_obj
for class_obj in (Allegation, Entity, Exhibit, Evidence, Fact, Pleading)
}
def read_fact(record: RawFactor) -> Fact:
r"""
Construct a :class:`Fact` after loading a dict from YAML.
:param record:
parameter values to pass to :class:`authorityspoke.schemas_yaml.FactSchema`\.
:returns:
a :class:`Fact`, with optional mentioned factors
"""
record = expand_shorthand(record)
record, mentioned = index_names(record)
expanded = expand_factor(record, mentioned)
return Fact(**expanded)
class HoldingsIndexed(NamedTuple):
"""Lists :class:`.Holding` objects with corresponding text selectors."""
holdings: List[Holding]
mentioned: Mentioned
holding_anchors: List[List[TextQuoteSelector]]
def collect_anchors_from_index(object_index, field_name: str):
"""Get text anchors out of an index of terms or enactments."""
result = []
for key, value in object_index.items():
if value.get("anchors"):
anchored_object: Dict[str, Any] = {}
anchors = value.pop("anchors")
if isinstance(anchors, List):
anchors = [anchor for anchor in anchors if anchor != "|"]
anchored_object["anchors"] = anchors
anchored_object[field_name] = value
result.append(anchored_object)
return result, object_index
def read_holdings_with_anchors(
record: Dict[str, Union[List[RawHolding], List[RawSelector]]],
client: Optional[Client] = None,
) -> AnchoredHoldings:
r"""
Load a list of Holdings from JSON, with text links.
:param record:
a list of dicts representing holdings, in the JSON input format
:param client:
Legislice client for downloading missing fields from `record`
:returns:
a namedtuple listing :class:`.Holding` objects with
a list matching :class:`.Holding`\s to selectors and
an index matching :class:`.Factor`\s to selectors.
"""
(
holdings,
enactment_anchors,
factor_anchors,
holding_anchors,
) = extract_anchors_from_holding_record(record, client)
holdings_with_anchors = []
for i, holding in enumerate(holdings):
new = HoldingWithAnchors(holding=holding, anchors=holding_anchors[i])
holdings_with_anchors.append(new)
return AnchoredHoldings(
holdings=holdings_with_anchors,
named_anchors=factor_anchors,
enactment_anchors=enactment_anchors,
)
def expand_factor(
record: Union[str, RawFactor], factor_index: Mentioned
) -> Union[str, RawFactor]:
"""Expand fields of Factor from index of mentioned factors."""
to_expand = [
"statement",
"statement_attribution",
"fact",
"offered_by",
"exhibit",
"to_effect",
"filer",
"pleading",
]
expanded = (
factor_index.get_if_present(record) if isinstance(record, str) else record
)
if not isinstance(expanded, Dict):
return expanded
if "terms" in expanded:
expanded["terms"] = expand_names(expanded["terms"], factor_index)
for field in to_expand:
if field in expanded:
expanded[field] = expand_factor(expanded[field], factor_index)
return expanded
def expand_names(
record: List[Union[str, RawFactor]], factor_index: Mentioned
) -> List[RawFactor]:
r"""Expand a list of names into a list of factors."""
if isinstance(record, str):
record = [record]
if isinstance(record, bool):
return record
result = []
for name in record:
result.append(expand_factor(name, factor_index=factor_index))
return result
def expand_enactments(
record: List[Union[str, RawEnactment]], enactment_index: Mentioned
) -> List[RawEnactment]:
r"""
Expand a list of enactments into a list of dicts.
:param record:
a list of enactments, either as strings or dicts
:param enactment_index:
a dict of names to enactments
:returns:
a list of dicts representing enactments
"""
return [enactment_index.get_if_present(name) for name in record]
def walk_tree_and_expand(
obj: Union[Dict, List], mentioned: Mentioned, ignore: Sequence[str] = ()
) -> Union[Dict, List]:
"""
Traverse tree of dicts and lists, and modify each node.
:param obj: the object to traverse
:param func:
the function to call on each dict node, returning a dict
:param ignore: the names of keys that should not be explored
:returns: a version of the tree with every node modified by `func`
"""
if isinstance(obj, str):
obj = mentioned.get_if_present(obj)
if isinstance(obj, List):
obj = [mentioned.get_if_present(item) for item in obj]
return [walk_tree_and_expand(item, mentioned, ignore) for item in obj]
if isinstance(obj, Dict):
obj_dict: Dict = {}
for key, value in obj.items():
if key not in ignore:
obj_dict[key] = mentioned.get_if_present(value)
else:
obj_dict[key] = value
for key, value in obj_dict.items():
if isinstance(value, (Dict, List)) and key not in ignore:
obj_dict[key] = walk_tree_and_expand(value, mentioned, ignore)
return obj_dict
return obj
def expand_holding(
record: RawHolding, factor_index: Mentioned, enactment_index: Mentioned
) -> RawHolding:
"""Expand one holding from index of expanded terms and enactments."""
new_index = Mentioned({**factor_index, **enactment_index})
return walk_tree_and_expand(
record,
mentioned=new_index,
ignore=["predicate", "enactment", "selection", "name"],
)
def expand_holdings(
record: List[Union[str, RawHolding]],
factor_index: Mentioned,
enactment_index: Mentioned,
) -> List[RawHolding]:
"""Expand holdings from index of expanded terms and enactments."""
if isinstance(record, dict):
record = [record]
holdings = [factor_index.get_if_present(holding) for holding in record]
holdings = [
expand_holding(
holding, factor_index=factor_index, enactment_index=enactment_index
)
for holding in holdings
]
return holdings
def extract_anchors_from_holding_record(
record: List[RawHolding], client: Optional[Client] = None
) -> Tuple[
List[RawHolding],
List[EnactmentWithAnchors],
List[TermWithAnchors],
List[Dict[str, str]],
]:
r"""
Load a list of Holdings from JSON, with text links.
:param record:
a list of dicts representing holdings, in the JSON input format
:param client:
Legislice client for downloading missing fields from `record`
:returns:
a tuple of four objects containing holdings, terms, enactments,
and anchors.
"""
record_post_enactments, enactment_index = collect_enactments(record)
if client:
enactment_index_post_client = client.update_entries_in_enactment_index(
enactment_index
)
else:
enactment_index_post_client = enactment_index
enactment_anchors, enactment_index_post_anchors = collect_anchors_from_index(
enactment_index_post_client, "passage"
)
enactment_result = []
for anchor in enactment_anchors:
anchor["passage"] = enactment_index_post_anchors.get_if_present(
anchor["passage"]
)
enactment_result.append(EnactmentWithAnchors(**anchor))
record_post_terms, factor_index = index_names(record_post_enactments)
factor_anchors, factor_index_post_anchors = collect_anchors_from_index(
factor_index, "term"
)
factor_result = []
for anchor in factor_anchors:
anchor["term"] = expand_holding(
anchor["term"],
factor_index=factor_index_post_anchors,
enactment_index=enactment_index_post_anchors,
)
factor_result.append(TermWithAnchors(**anchor))
factor_anchors = [TermWithAnchors(**anchor) for anchor in factor_anchors]
expanded = expand_holdings(
record_post_terms,
factor_index=factor_index_post_anchors,
enactment_index=enactment_index_post_anchors,
)
holding_anchors = [holding.pop("anchors", None) for holding in expanded]
result = []
for holding in expanded:
result.append(Holding(**holding))
return result, enactment_result, factor_result, holding_anchors
def read_holdings(
record: List[RawHolding], client: Optional[Client] = None
) -> List[Holding]:
r"""
Load a list of :class:`Holdings`\s from JSON.
:param record:
a list of dicts representing holdings, in the JSON input format
:parame regime:
A collection of :class:`.Jurisdiction`\s and the :class:`.Code`\s
that have been enacted in each. Used for constructing
:class:`.Enactment`\s referenced by :class:`.Holding`\s.
:returns:
a list of :class:`.Holding` objects
"""
(
holdings,
enactment_anchors,
factor_anchors,
holding_anchors,
) = extract_anchors_from_holding_record(record, client)
return holdings
def read_decision(decision: Union[RawDecision, Decision]) -> DecisionReading:
r"""
Create and return a :class:`~authorityspoke.decisions.DecisionReading` from a dict API response.
Relies on the JSON format from the `Caselaw Access Project
API <https://api.case.law/v1/cases/>`_.
:param decision_dict:
A dict created from a Caselaw Access Project API response.
"""
if not isinstance(decision, Decision):
decision = Decision(**decision)
return DecisionReading(decision=decision)
|
from letra._helpers import _check_for_duplicate_templates
from letra import Label
from ..helpers import stub_labels
from pytest import raises
def test__check_for_duplicate_templates_raises_on_duplicates():
dup_label = Label(name="dup", description="oops", color="a24b2a")
dup_first_copy = Label(name="dup", description="big oops", color="ffffff")
dup_second_copy = Label(name="dup", description="so bad", color="aaaaaa")
unique_label = Label(name="unique", description="special", color="abcdef")
second_dup = Label(
name="repeated", description="not again", color="bbbbbb"
)
second_dup_copy = Label(
name="repeated", description="yikes", color="cccccc"
)
templates = [
dup_label,
second_dup,
dup_first_copy,
second_dup_copy,
unique_label,
dup_second_copy,
]
exp_err = (
"Found duplicative label templates. The `name` of each "
"label template must be unique.\nDuplicate templates found:\n"
f"{dup_label.name}: 3 instances\n"
f"{second_dup.name}: 2 instances\n"
)
with raises(ValueError) as err:
_check_for_duplicate_templates(templates)
assert (str(err.value)) == exp_err
def test__check_for_duplicate_templates_does_not_raise_for_unique():
_check_for_duplicate_templates(stub_labels)
|
# Generated by Django 2.2.10 on 2020-04-06 13:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('salespipes', '0029_auto_20200330_0700'),
('contacts', '0029_historicalcandidate_historicalclient'),
('commissions', '0011_auto_20200328_1143'),
]
operations = [
migrations.CreateModel(
name='HistoricalCommission',
fields=[
('is_deleted', models.BooleanField(blank=True, default=False)),
('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('date', models.DateField()),
('rate_role_type', models.CharField(max_length=100, null=True)),
('rate_used', models.DecimalField(decimal_places=2, max_digits=10)),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('is_paid', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('employee', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='contacts.Employee')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('pipeline', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='salespipes.Pipeline')),
],
options={
'verbose_name': 'historical commission',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
def binary_search(arr, ele):
first = 0
last = len(arr) - 1
found = False
while first <= last and found == False:
mid = (first + last) // 2
if arr[mid] == ele:
found = True
else:
if arr[mid] > ele:
last = mid - 1
else:
first = mid + 1
return found
print(binary_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4))
|
from django.forms import Form
from rest_framework.response import Response
from rest_framework.serializers import HyperlinkedModelSerializer
from backend.models import Arena, Card
def raise_error(message, status=500):
return Response({'error': {'message': message}}, status=status)
def refresh_error(message, status=403):
return raise_error('Your request cannot be proceeded, ' + message, status=status)
def not_found_error(object, id, status=404):
return raise_error("%s %s was not found." % (object.title(), id), status=status)
def form_error(form: Form, status=403):
return Response({
'success': False,
'errors': dict(form.errors.items()),
}, status=status)
class ArenaSerializer(HyperlinkedModelSerializer):
class Meta:
model = Arena
fields = ('key', 'name', 'arena', 'min_trophy_limit', 'max_trophy_limit')
class CardSerializer(HyperlinkedModelSerializer):
class Meta:
model = Card
fields = ('key', 'name', 'rarity', 'arena', 'elixir', 'type', 'image')
|
import sys
import re
import pandas as pd
util_path = str ( sys.argv[1])
cpu_model_path = str ( sys.argv[2])
gpu_model_path = str ( sys.argv[3])
cpu_gpu_timeline_path = str ( sys.argv[4])
cpu_model = {}
fp = open ( cpu_model_path, 'r')
for line in fp:
m = re.match ( '(\S+),(\S+)', line)
if m != None:
freq = int ( m.group ( 1))
comp = float ( m.group ( 2))
cpu_model[freq] = comp
continue
m = re.match ( '(\S+),$', line)
if m != None:
cpu_base = float ( m.group ( 1))
gpu_busy_model = {}
gpu_idle_model = {}
fp = open ( gpu_model_path, 'r')
for line in fp:
m = re.match ( '(\S+),(\S+),(\S+)', line)
if m != None:
freq = int ( m.group ( 1))
busy = float ( m.group ( 2))
idle = float ( m.group ( 2))
gpu_busy_model[freq] = busy
gpu_idle_model[freq] = idle
df = pd.DataFrame ( columns=['Time','CPU','GPU'])
fp = open ( util_path, 'r')
for line in fp:
m = re.match ( '\s+(\S+)\s+(\S+)\s+(\S+)\s+\| (.*) \| (.*)', line)
if m != None:
start_time = float ( m.group ( 1))
end_time = float ( m.group ( 2))
avg_current = float ( m.group ( 3))
cpu_current = cpu_base
cpu_utils = m.group ( 4).split ( ',')[:-1]
for cpu_util in cpu_utils:
m_sub = re.match ( '\s*\((\S+):(\S+)\)', cpu_util)
if m_sub != None:
freq = int ( m_sub.group ( 1))
util = float ( m_sub.group ( 2))
sub_current = cpu_model[freq]*util
cpu_current += sub_current
gpu_current = 0
gpu_utils = m.group ( 5).split ( ',')[:-1]
for gpu_util in gpu_utils:
m_sub = re.match ( '\s*\((\S+):(\S+):(\S+)\)', gpu_util)
if m_sub != None:
freq = int ( m_sub.group ( 1))
busy_util = float ( m_sub.group ( 2))
idle_util = float ( m_sub.group ( 2))
sub_current = gpu_busy_model[freq]*busy_util + gpu_idle_model[freq]*idle_util
gpu_current += sub_current
df.loc[df.shape[0]] = [ end_time, cpu_current, gpu_current]
fp.close ()
df.to_csv ( cpu_gpu_timeline_path, index=False)
|
from django.contrib import admin
from .models import member
# Register your models here.
admin.site.index_title="DASHBOARD"
admin.site.site_title="ADMIN"
admin.site.site_header="Nidaghatta Family-ADMIN"
class memberAdmin(admin.ModelAdmin):
list_display = ('name',"image")
list_filter = ('is_requested_to_delete',)
admin.site.register(member,memberAdmin)
|
from sys import stdin,stdout
# stdin = open("/home/shiva/Learning/1.txt","r")
# stdout = open("/home/shiva/Learning/2.txt","w")
primelist = [2]
append = primelist.append
def mysieve(n):
primes = [True] * (n);
sqrtn = (int(n**.5)+1) | 1
for i in range(3,sqrtn,2):
if primes[i]:
append(i)
primes[i*i::2*i] = [False] * ((n-(i*i)-1)//(2*i)+1)
for i in range(sqrtn,n,2):
if primes[i]:
append(i)
return primelist
def isSumTwoSquare(n):
for i in primelist:
cnt = 0
while n%i==0:
n/=i
cnt+=1
if i%4==3 and cnt%2==1:
return False
if n%4==3:
return False
return True
mysieve(10**6+1)
# stdout.write(str(primelist))
t = int(stdin.readline())
for i in range(t):
flag = False;
n = int(stdin.readline())
if(int(n**.5) == n):
stdout.write("Yes\n")
else:
if isSumTwoSquare(n):
stdout.write("Yes\n")
else:
stdout.write("No\n")
|
import numpy
from cupy import _core
from cupy._core import _fusion_interface
from cupy._core import fusion
from cupy._sorting import search
from cupy_backends.cuda.api import runtime
def copyto(dst, src, casting='same_kind', where=None):
"""Copies values from one array to another with broadcasting.
This function can be called for arrays on different devices. In this case,
casting, ``where``, and broadcasting is not supported, and an exception is
raised if these are used.
Args:
dst (cupy.ndarray): Target array.
src (cupy.ndarray): Source array.
casting (str): Casting rule. See :func:`numpy.can_cast` for detail.
where (cupy.ndarray of bool): If specified, this array acts as a mask,
and an element is copied only if the corresponding element of
``where`` is True.
.. seealso:: :func:`numpy.copyto`
"""
src_type = type(src)
src_is_python_scalar = src_type in (
int, bool, float, complex,
fusion._FusionVarScalar, _fusion_interface._ScalarProxy)
if src_is_python_scalar:
src_dtype = numpy.dtype(type(src))
can_cast = numpy.can_cast(src, dst.dtype, casting)
else:
src_dtype = src.dtype
can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)
if not can_cast:
raise TypeError('Cannot cast %s to %s in %s casting mode' %
(src_dtype, dst.dtype, casting))
if fusion._is_fusing():
if where is None:
_core.elementwise_copy(src, dst)
else:
fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)
return
if where is not None:
_core.elementwise_copy(src, dst, _where=where)
return
if dst.size == 0:
return
if src_is_python_scalar:
dst.fill(src)
return
if _can_memcpy(dst, src):
dst.data.copy_from_async(src.data, src.nbytes)
return
device = dst.device
prev_device = runtime.getDevice()
try:
runtime.setDevice(device.id)
if src.device != device:
src = src.copy()
_core.elementwise_copy(src, dst)
finally:
runtime.setDevice(prev_device)
def _can_memcpy(dst, src):
c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous
f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous
return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \
dst.size == src.size
|
# -*- coding: utf-8 -*-
from mamaprofile.myprofile import profile
|
# coding: utf-8
# BlackSmith mark.2
# exp_name = "allweb" # /code.py v.x28
# Id: 26~27c
# Code © (2011-2013) by WitcherGeralt [alkorgun@gmail.com]
class expansion_temp(expansion):
def __init__(self, name):
expansion.__init__(self, name)
UserAgents = UserAgents
import htmlentitydefs, json
UserAgent = ("User-Agent", "%s/%s" % (ProdName[:10], CapsVer))
UserAgent_Moz = (UserAgent[0], "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36".format(UserAgents.get(DefLANG, "en-US")))
Web.Opener.addheaders = [UserAgent_Moz]
edefs = dict()
for Name, Numb in htmlentitydefs.name2codepoint.iteritems():
edefs[Name] = unichr(Numb)
del Name, Numb
edefs["'"] = unichr(39)
TagsDesc = {
"<br>": chr(10),
"<br />": chr(10)
}
compile_st = compile__("<[^<>]+?>")
compile_ehtmls = compile__("&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def sub_ehtmls(self, data):
if data.count("&"):
def e_sb(co):
co = co.group(1)
if co.startswith("#"):
if chr(120) == co[1].lower():
Char, c06 = co[2:], 16
else:
Char, c06 = co[1:], 10
try:
Numb = int(Char, c06)
assert (-1 < Numb < 65535)
Char = unichr(Numb)
except Exception:
Char = self.edefs.get(Char, "&%s;" % co)
else:
Char = self.edefs.get(co, "&%s;" % co)
return Char
data = self.compile_ehtmls.sub(e_sb, data)
return data
def decodeHTML(self, data):
data = sub_desc(data, self.TagsDesc)
data = self.compile_st.sub("", data)
data = self.sub_ehtmls(data)
return data.strip()
def command_jc(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
cName = body.lower()
if cName.count("@conf"):
cName = (cName.split("@conf"))[0]
else:
cName = (source[1].split("@conf"))[0]
Opener = Web("http://jc.jabber.ru/search.html?", [("search", cName.encode("utf-8"))])
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8")
comp = compile__("<li>((?:.|\s)+?)</li>", 16)
list = comp.findall(data)
if list:
ls = []
for numb, line in enumerate(list, 1):
line = line.strip()
ls.append("%d) %s" % (numb, line))
answer = chr(10) + self.decodeHTML(str.join(chr(10)*2, ls))
else:
answer = self.AnsBase[5]
else:
answer = AnsBase[0]
Answer(answer, stype, source, disp)
gCache = []
sMark = 1
tMark = 2
def command_google(self, stype, source, body, disp):
if body:
if (chr(42) != body):
Opener = Web("http://ajax.googleapis.com/ajax/services/search/web?", [("v", "1.0"), ("q", body.encode("utf-8"))])
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
try:
data = self.json.loads(data)
except Exception:
answer = self.AnsBase[1]
else:
try:
list = data["responseData"]["results"]
desc = list.pop(0)
except (TypeError, LookupError):
answer = self.AnsBase[5]
else:
ls = []
ls.append(desc.get("title", ""))
ls.append(desc.get("content", ""))
ls.append(desc.get("unescapedUrl", ""))
answer = self.decodeHTML(str.join(chr(10), ls))
if list:
source_ = get_source(source[1], source[2])
if source_:
for ls in self.gCache:
if ls[:2] == (source_, self.sMark):
self.gCache.pop(self.gCache.index(ls))
break
Numb = (len(Clients.keys())*8)
while len(self.gCache) >= Numb:
self.gCache.pop(0)
self.gCache.append((source_, self.sMark, list))
answer += self.AnsBase[4] % len(list)
else:
source_ = get_source(source[1], source[2])
if source_:
list = []
for ls in self.gCache:
if ls[:2] == (source_, self.sMark):
list = self.gCache.pop(self.gCache.index(ls))[2]
break
if list:
desc = list.pop(0)
ls = []
ls.append(desc.get("title", ""))
ls.append(desc.get("content", ""))
ls.append(desc.get("unescapedUrl", ""))
answer = self.decodeHTML(str.join(chr(10), ls))
if list:
self.gCache.append((source_, self.sMark, list))
answer += self.AnsBase[4] % len(list)
else:
answer = self.AnsBase[2]
else:
answer = self.AnsBase[3]
else:
answer = AnsBase[1]
Answer(answer, stype, source, disp)
LangMap = LangMap
def command_google_translate(self, stype, source, body, disp):
if body:
if (chr(42) != body):
body = body.split(None, 2)
if len(body) == 3:
lang0, langX, body = body
if langX in self.LangMap and (lang0 in self.LangMap or lang0 == "auto"):
desc = (("client", "bs-2"),
("sl", lang0),
("tl", langX),
("text", body.encode("utf-8")))
Opener = Web("http://translate.google.com/translate_a/t?", desc, headers = {"Accept-Charset": "utf-8"})
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
try:
data = self.json.loads(data)
except Exception:
answer = self.AnsBase[1]
else:
try:
body = data["sentences"][0]["trans"]
except (TypeError, LookupError):
answer = self.AnsBase[1]
else:
if lang0 == "auto":
try:
lang0 = data["src"]
except KeyError:
pass
answer = "%s -> %s:\n%s" % (lang0, langX, body)
try:
list = data["dict"][0]["terms"]
except LookupError:
pass
else:
source_ = get_source(source[1], source[2])
if source_:
if body in list:
list.pop(list.index(body))
if list:
for ls in self.gCache:
if ls[:2] == (source_, self.tMark):
self.gCache.pop(self.gCache.index(ls))
break
Numb = (len(Clients.keys())*8)
while len(self.gCache) >= Numb:
self.gCache.pop(0)
self.gCache.append((source_, self.tMark, list))
answer += self.AnsBase[7] % len(list)
else:
answer = self.AnsBase[6]
else:
answer = AnsBase[2]
else:
source_ = get_source(source[1], source[2])
if source_:
list = []
for ls in self.gCache:
if ls[:2] == (source_, self.tMark):
list = self.gCache.pop(self.gCache.index(ls))[2]
break
if list:
answer = self.decodeHTML(list.pop(0))
if list:
self.gCache.append((source_, self.tMark, list))
answer += self.AnsBase[7] % len(list)
else:
answer = self.AnsBase[2]
else:
answer = self.AnsBase[3]
else:
answer = self.AnsBase[8] + str.join(chr(10), ["%s - %s" % (k, l) for k, l in sorted(self.LangMap.items())])
if stype == sBase[1]:
Message(source[0], answer, disp)
answer = AnsBase[11]
Answer(answer, stype, source, disp)
kinoHeaders = {
"Host": "m.kinopoisk.ru",
"Accept": "text/html",
"Accept-Charset": "cp1251",
"Accept-Language": "ru"
}
C3oP = "СЗоР"
def command_kino(self, stype, source, body, disp):
if body:
ls = body.split()
c1st = (ls.pop(0)).lower()
if c1st in ("top250", "топ250".decode("utf-8")):
if ls:
limit = apply(int, (ls.pop(0),))
if limit <= 5:
limit = 5
else:
limit = None
kinoHeaders = self.kinoHeaders.copy()
kinoHeaders["Host"] = "www.kinopoisk.ru"
Opener = Web("http://www.kinopoisk.ru/level/20/", headers = kinoHeaders)
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
list = get_text(data, "<tr height=25>", "</table>")
if list:
comp = compile__('<a href="/film/\d+?/" class="all">(.+?)</a>(?:.|\s)+' \
'?<a href="/film/\d+?/votes/" class="continue">(.+?)</a> <span.*?>(.+?)</span>', 16)
list = comp.findall(list)
if list:
ls = ["\n[#] [Name, Year] [Rating] (Votes)"]
for Number, (Name, Numb, Count) in enumerate(list, 1):
ls.append("%d) %s - %s %s" % (Number, self.sub_ehtmls(Name), Numb, sub_desc(Count, [" "])))
if limit and limit <= Number:
break
if not limit or limit > 25:
if stype == sBase[1]:
Answer(AnsBase[11], stype, source, disp)
Top250 = str.join(chr(10), ls)
Message(source[0], Top250, disp)
else:
answer = str.join(chr(10), ls)
elif data.count(self.C3oP):
answer = self.AnsBase[-1]
else:
answer = self.AnsBase[1]
elif isNumber(body):
Opener = Web("http://m.kinopoisk.ru/movie/%d" % int(body), headers = self.kinoHeaders.copy())
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
rslt = get_text(data, "<p class=\"title\">", "</div>")
if rslt:
rslt = self.decodeHTML(rslt)
ls = ["\->"]
for line in rslt.splitlines():
line = line.strip()
if line:
if line[0].islower():
line = "{1}{0}".format(line[1:], line[0].upper())
ls.append(line)
answer = str.join(chr(10), ls)
elif data.count(self.C3oP):
answer = self.AnsBase[-1]
else:
answer = self.AnsBase[5]
else:
body = (body if chr(42) != c1st else body[2:].strip())
if body:
body = body.encode("cp1251")
Opener = Web("http://m.kinopoisk.ru/search/%s" % Web.One.quote_plus(body), headers = self.kinoHeaders.copy())
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
comp = compile__("<a href=\"http://m.kinopoisk.ru/movie/(\d+?)/\">(.+?)</a>")
list = comp.findall(data)
if list:
ls = ["\n[#] [Name, Year] (#id)"]
for Number, (Numb, Name) in enumerate(list, 1):
ls.append("%d) %s (#%s)" % (Number, self.sub_ehtmls(Name), Numb))
answer = str.join(chr(10), ls)
elif data.count(self.C3oP):
answer = self.AnsBase[-1]
else:
answer = self.AnsBase[5]
else:
answer = AnsBase[2]
else:
answer = AnsBase[1]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
IMDbHeaders = {"Accept-Language": "%s,en" % UserAgents.get(DefLANG, "en-US")}
IMDbRequest = { # imdbapi.org
"type": "json",
# "id": "tt", # get info by ID
# "q": "any title", # for the search
# "limit": str(choice(range(1, 11))), # for the search
"plot": "none", # or "simple" or "full"
"episode": "0", # or "1"
"lang": UserAgents.get(DefLANG, "en-US"),
"aka": "simple", # or "full"
"release": "simple", # or "full"
}
def command_imdb(self, stype, source, body, disp):
if body:
ls = body.split()
c1st = (ls.pop(0)).lower()
if c1st in ("top250", "топ250".decode("utf-8")):
if ls:
limit = apply(int, (ls.pop(0),))
if limit <= 5:
limit = 5
else:
limit = None
Opener = Web("http://m.imdb.com/chart/top_json", headers = self.IMDbHeaders)
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
try:
data = self.json.loads(data)
except Exception:
answer = self.AnsBase[1]
else:
try:
data = data["list"]
except (TypeError, LookupError):
answer = self.AnsBase[1]
else:
ls = ["\n[#] [Name, Year] [Rating] (Votes)"]
comp = compile__("([\d\.,]+).*\s([\d\.,]+)")
try:
assert isinstance(data, list)
for Number, desc in enumerate(data, 1):
Name = desc["title"]
Year = desc["extra"]
Numb, Count = comp.search(desc["detail"]).groups()
ls.append("%s) %s %s - %s (%s)" % (Number, Name, Year, Numb, Count))
if limit and limit <= Number:
break
except (AssertionError, TypeError, LookupError):
answer = self.AnsBase[5]
else:
if not limit or limit > 25:
if stype == sBase[1]:
Answer(AnsBase[11], stype, source, disp)
Top250 = str.join(chr(10), ls)
Message(source[0], Top250, disp)
else:
answer = str.join(chr(10), ls)
elif isNumber(body):
IMDbRequest = self.IMDbRequest.copy()
IMDbRequest["id"] = ("tt" + body)
IMDbRequest["plot"] = "full"
Opener = Web("http://imdbapi.org/?", IMDbRequest.items())
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
try:
data = self.json.loads(data)
except Exception:
answer = self.AnsBase[1]
else:
ls = ["\->"]
try:
assert isinstance(data, dict)
ls.append("%s, %s, %s." % (data["title"], data["year"], str.join(chr(32), data.get("runtime", ("??",)))))
ls.append(", ".join(data["genres"]))
ls.append(", ".join(data["country"]))
temp = data.get("directors")
if temp:
ls.append("Directors: " + ", ".join(temp[:3]))
temp = data.get("writers")
if temp:
ls.append("Writers: " + ", ".join(temp[:3]))
temp = data.get("actors")
if temp:
ls.append("Stars: " + ", ".join(temp[:5]))
temp = data.get("plot") or data.get("plot_simple")
if temp:
ls.append(unichr(171) + temp + unichr(187))
temp = data.get("rating")
if temp:
ls.append("IMDb rating: %s (%s)" % (temp, data.get("rating_count", 0)))
except (AssertionError, TypeError, LookupError):
answer = self.AnsBase[5]
else:
answer = self.sub_ehtmls(str.join(chr(10), ls))
else:
body = (body if chr(42) != c1st else body[2:].strip())
if body:
body = body.encode("utf-8")
IMDbRequest = self.IMDbRequest.copy()
IMDbRequest["q"] = body
IMDbRequest["limit"] = "10"
Opener = Web("http://imdbapi.org/?", IMDbRequest.items())
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
try:
data = self.json.loads(data)
except Exception:
answer = self.AnsBase[1]
else:
try:
assert isinstance(data, list)
data = sorted([(desc.get("rating"),
desc["title"],
desc["year"],
desc["imdb_id"][2:]) for desc in data], reverse = True)
except (AssertionError, TypeError, LookupError):
answer = self.AnsBase[5]
else:
ls = ["\n[#] [Name, Year] (#id)"]
for Number, (Numb, Name, Year, ID) in enumerate(data, 1):
ls.append("%d) %s, %s (#%s)" % (Number, Name, Year, ID))
answer = self.sub_ehtmls(str.join(chr(10), ls))
else:
answer = AnsBase[2]
else:
answer = AnsBase[1]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_python(self, stype, source, body, disp):
Opener = Web("http://python.org/")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("koi8-r")
data = get_text(data, "<h2 class=\"news\">", "</div>")
if data:
data = self.decodeHTML(data)
ls = []
for line in data.splitlines():
if line.strip():
ls.append(line)
answer = str.join(chr(10), ls)
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
def command_url_shorten(self, stype, source, body, disp):
if body:
Opener = Web("http://is.gd/create.php?", [("format", "json"), ("url", body.encode("utf-8"))])
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
try:
data = self.json.loads(data)
except Exception:
answer = self.AnsBase[1]
else:
try:
answer = data["shorturl"]
except KeyError:
try:
answer = data["errormessage"]
except KeyError:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
Answer(answer, stype, source, disp)
downloadLock = ithr.allocate_lock()
def download_process(self, info, blockNumb, blockSize, size, fb):
if not blockNumb:
Print("\n")
Print(str(info), color3)
elif size >= blockSize:
fb[3] += blockSize
if not fb[4]:
fb[4] = (size / 100)
if fb[4] in (0, 1):
fb[4] = 2
else:
residue = fb[4] % blockSize
if fb[4] == residue:
fb[4] = 2
while fb[4] < residue:
fb[4] *= 2
elif residue:
fb[4] -= residue
if fb[3] >= size:
Print("Done.", color3)
elif not fb[3] % fb[4]:
Pcts = fb[3] / fb[4]
if Pcts == 100:
Pcts = 99.95
Print("loaded - {0}%".format(Pcts), color4)
Time = time.time()
if Time - fb[1] >= 30:
fb[1] = Time
Message(fb[0], self.AnsBase[9].format(Pcts), fb[2])
def command_download(self, stype, source, body, disp):
if body:
if not self.downloadLock.locked():
with self.downloadLock:
body = body.split()
if len(body) == 1:
link = body.pop()
folder = None
filename = None
elif len(body) == 2:
link, folder = body
filename = None
else:
link, folder, filename = body[:3]
if not enough_access(source[1], source[2], 8):
folder = "Downloads"
if filename:
filename = os.path.basename(filename.rstrip("\\/"))
if folder:
folder = os.path.normpath(folder)
if AsciiSys:
folder = folder.encode("utf-8")
if not os.path.isdir(folder):
try:
os.makedirs(folder)
except Exception:
link = None
if AsciiSys:
folder = folder.decode("utf-8")
if link:
Message(source[0], self.AnsBase[10], disp)
Opener = Web(link)
try:
data = Opener.download(filename, folder, self.download_process, [source[0], time.time(), disp, 0, 0], self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except SelfExc as exc:
answer = "Error! %s." % exc[0].capitalize()
except Exception:
answer = self.AnsBase[0]
else:
answer = "Done.\nPath: %s\nSize: %s" % (data[0], Size2Text(data[2]))
else:
answer = AnsBase[2]
else:
answer = self.AnsBase[11]
else:
answer = AnsBase[1]
Answer(answer, stype, source, disp)
PasteLangs = PasteLangs
def command_paste(self, stype, source, body, disp):
if body:
args = body.split(None, 1)
arg0 = (args.pop(0)).lower()
if arg0 in self.PasteLangs:
if args:
body = args.pop()
else:
body = None
answer = AnsBase[2]
else:
arg0 = "text"
if body:
Opener = Web("http://paste.ubuntu.com/", data = Web.encode({"poster": ProdName, "syntax": arg0, "content": body.encode("utf-8")}))
try:
fp = Opener.open(self.UserAgent)
answer = fp.url
fp.close()
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[8] + str.join(chr(10), ["%s - %s" % (k, l) for k, l in sorted(self.PasteLangs.items())])
if stype == sBase[1]:
Message(source[0], answer, disp)
answer = AnsBase[11]
Answer(answer, stype, source, disp)
if DefLANG in ("RU", "UA"):
def command_chuck(self, stype, source, body, disp):
if body and isNumber(body):
Opener = Web("http://chucknorrisfacts.ru/quote/%d" % int(body))
else:
Opener = Web("http://chucknorrisfacts.ru/random")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
comp = compile__("<a href=/quote/(\d+?)>.+?<blockquote>(.+?)</blockquote>", 16)
data = comp.search(data)
if data:
answer = self.decodeHTML("#%s\n%s" % data.groups())
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
def command_bash(self, stype, source, body, disp):
if body and isNumber(body):
Opener = Web("http://bash.im/quote/%d" % int(body))
else:
Opener = Web("http://bash.im/random")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
comp = compile__('<span id="v\d+?" class="rating">(.+?)</span>(?:.|\s)+?<a href="/quote/\d+?" class="id">#(\d+?)</a>\s*?</div>\s+?<div class="text">(.+?)</div>', 16)
data = comp.search(data)
if data:
answer = self.decodeHTML("#{1} +[{0}]-\n{2}".format(*data.groups()))
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
else:
def command_chuck(self, stype, source, body, disp):
Opener = Web("http://www.chucknorrisfacts.com/all-chuck-norris-facts?page=%d" % randrange(974)) # 04:12 09.11.2012 by UTC number of pages was 974
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8")
comp = compile__("<span class=\"field-content\"><a.*?>(.+?)</a></span>", 16)
list = comp.findall(data)
if list:
answer = self.decodeHTML(choice(list))
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
def command_bash(self, stype, source, body, disp):
if body and isNumber(body):
Opener = Web("http://bash.org/?%d" % int(body))
else:
Opener = Web("http://bash.org/?random")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("iso-8859-1")
comp = compile__('<b>#(\d+?)</b></a>\s<a.*?>\+</a>\((.+?)\)<a.*?>-</a>\s<a.*?>\[X\]</a></p><p class="qt">(.+?)</p>', 16)
data = comp.search(data)
if data:
answer = self.decodeHTML("#%s +[%s]-\n%s" % data.groups())
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
def command_currency(self, stype, source, body, disp):
if body:
ls = body.split()
Code = (ls.pop(0)).lower()
if Code in ("code", "аббревиатура".decode("utf-8")):
if ls:
Code = (ls.pop(0)).upper()
if Code in self.CurrencyDesc:
answer = self.CurrencyDesc[Code].decode("utf-8")
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[2]
elif Code in ("list", "список".decode("utf-8")):
if stype == sBase[1]:
Answer(AnsBase[11], stype, source, disp)
Curls = ["\->"] + ["%s: %s" % desc for desc in sorted(self.CurrencyDesc.items())]
Message(source[0], str.join(chr(10), Curls), disp)
elif Code in ("calc", "перевести".decode("utf-8")):
if len(ls) >= 2:
Number = ls.pop(0)
if isNumber(Number) and ls[0].isalpha():
Number = int(Number)
Code = (ls.pop(0)).upper()
if (Code == "RUB"):
answer = "%d %s" % (Number, Code)
elif Code in self.CurrencyDesc:
Opener = Web("http://www.cbr.ru/scripts/XML_daily.asp")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
comp = compile__("<CharCode>%s</CharCode>\s+?<Nominal>(.+?)</Nominal>\s+?<Name>.+?</Name>\s+?<Value>(.+?)</Value>" % (Code), 16)
data = comp.search(data)
if data:
No, Numb = data.groups()
Numb = Numb.replace(chr(44), chr(46))
No = No.replace(chr(44), chr(46))
try:
Numb = (Number*(float(Numb)/float(No)))
except Exception:
answer = AnsBase[7]
else:
answer = "%.2f RUB" % (Numb)
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[2]
else:
answer = AnsBase[2]
else:
answer = AnsBase[2]
elif (Code != "rub") and Code.isalpha():
Code = Code.upper()
if Code in self.CurrencyDesc:
Opener = Web("http://www.cbr.ru/scripts/XML_daily.asp")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
comp = compile__("<CharCode>%s</CharCode>\s+?<Nominal>(.+?)</Nominal>\s+?<Name>.+?</Name>\s+?<Value>(.+?)</Value>" % (Code), 16)
data = comp.search(data)
if data:
No, Numb = data.groups()
answer = "%s/RUB - %s/%s" % (Code, No, Numb)
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[2]
else:
answer = AnsBase[2]
else:
Opener = Web("http://www.cbr.ru/scripts/XML_daily.asp")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
comp = compile__("<CharCode>(.+?)</CharCode>\s+?<Nominal>(.+?)</Nominal>\s+?<Name>.+?</Name>\s+?<Value>(.+?)</Value>", 16)
list = comp.findall(data)
if list:
ls, Number = ["\->"], itypes.Number()
for Code, No, Numb in sorted(list):
ls.append("%d) %s/RUB - %s/%s" % (Number.plus(), Code, No, Numb))
if stype == sBase[1]:
Answer(AnsBase[11], stype, source, disp)
Curls = str.join(chr(10), ls)
Message(source[0], Curls, disp)
else:
answer = self.AnsBase[1]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_jquote(self, stype, source, body, disp):
if body and isNumber(body):
Opener = Web("http://jabber-quotes.ru/api/read/?id=%d" % int(body))
else:
Opener = Web("http://jabber-quotes.ru/api/read/?id=random")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8")
comp = compile__("<id>(\d+?)</id>\s+?<author>(.+?)</author>\s+?<quote>(.+?)</quote>", 16)
data = comp.search(data)
if data:
Numb, Name, Quote = data.groups()
lt = chr(10)*3
answer = self.decodeHTML("Quote: #%s | by %s\n%s" % (Numb, Name, Quote))
while (lt in answer):
answer = answer.replace(lt, lt[:2])
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
def command_ithappens(self, stype, source, body, disp):
if body and isNumber(body):
Opener = Web("http://ithappens.ru/story/%d" % int(body))
else:
Opener = Web("http://ithappens.ru/random")
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("cp1251")
data = get_text(data, "<div class=\"text\">", "</p>")
if data:
answer = self.decodeHTML(sub_desc(data, {"<p class=\"date\">": chr(32)}))
else:
answer = self.AnsBase[1]
Answer(answer, stype, source, disp)
def command_gismeteo(self, stype, source, body, disp):
if body:
ls = body.split(None, 1)
Numb = ls.pop(0)
if ls and isNumber(Numb):
Numb = int(Numb)
City = ls.pop(0)
else:
Numb = None
City = body
if -1 < Numb < 13 or not Numb:
Opener = Web("http://m.gismeteo.ru/citysearch/by_name/?", [("gis_search", City.encode("utf-8"))])
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8")
data = get_text(data, "<a href=\"/weather/", "/(1/)*?\">", "\d+")
if data:
if Numb != None:
data = str.join(chr(47), [data, str(Numb) if Numb != 0 else "weekly"])
Opener = Web("http://m.gismeteo.ru/weather/%s/" % data)
try:
data = Opener.get_page(self.UserAgent)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8")
mark = get_text(data, "<th colspan=\"2\">", "</th>")
if Numb != 0:
comp = compile__('<tr class="tbody">\s+?<th.*?>(.+?)</th>\s+?<td.+?/></td>\s+?</tr>\s+?<tr>\s+?<td.+?>(.+?)</td>\s+?</tr>\s+?<tr class="dl">\s+?<td> </td>\s+?<td class="clpersp"><p>(.*?)</p></td>\s+?</tr>\s+?<tr class="dl"><td class="left">(.+?)</td><td>(.+?)</td></tr>\s+?<tr class="dl"><td class="left">(.+?)</td><td>(.+?)</td></tr>\s+?<tr class="dl bottom"><td class="left">(.+?)</td><td>(.+?)</td></tr>', 16)
list = comp.findall(data)
if list:
ls = [(self.decodeHTML(mark) if mark else "\->")]
for data in list:
ls.append("{0}:\n\t{2}, {1}\n\t{3} {4}\n\t{5} {6}\n\t{7} {8}".format(*data))
ls.append(self.AnsBase[-2])
answer = self.decodeHTML(str.join(chr(10), ls))
else:
answer = self.AnsBase[1]
else:
comp = compile__('<tr class="tbody">\s+?<td class="date" colspan="3"><a.+?>(.+?)</a></td>\s+?</tr>\s+?<tr>\s+?<td rowspan="2"><a.+?/></a></td>\s+?<td class="clpersp"><p>(.*?)</p></td>\s+?</tr>\s+?<tr>\s+?<td.+?>(.+?)</td>', 16)
list = comp.findall(data)
if list:
ls = [(self.decodeHTML(mark) if mark else "\->")]
for data in list:
ls.append("%s:\n\t%s, %s" % (data))
ls.append(self.AnsBase[-2])
answer = self.decodeHTML(str.join(chr(10), ls))
else:
answer = self.AnsBase[1]
else:
answer = self.AnsBase[5]
else:
answer = AnsBase[2]
else:
answer = AnsBase[1]
Answer(answer, stype, source, disp)
def command_yandex_market(self, stype, source, body, disp):
if body:
ls = body.split()
c1st = (ls.pop(0)).lower()
if isNumber(c1st):
if ls:
c2nd = ls.pop(0)
if isNumber(c2nd):
Opener = Web("http://m.market.yandex.ru/spec.xml?hid=%d&modelid=%d" % (int(c1st), int(c2nd)))
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8", "replace")
data = get_text(data, "<h2 class=\"b-subtitle\">", "</div>")
if data:
answer = self.decodeHTML(sub_desc(data, (chr(10), ("<li>", chr(10)), ("<h2 class=\"b-subtitle\">", chr(10)*2), ("</h2>", chr(10)))))
else:
answer = self.AnsBase[5]
else:
answer = AnsBase[30]
else:
answer = AnsBase[2]
else:
body = (body if chr(42) != c1st else body[2:].strip())
if body:
body = body.encode("utf-8")
Opener = Web("http://m.market.yandex.ru/search.xml?", [("nopreciser", "1"), ("text", body)])
try:
data = Opener.get_page(self.UserAgent_Moz)
except Web.Two.HTTPError as exc:
answer = str(exc)
except Exception:
answer = self.AnsBase[0]
else:
data = data.decode("utf-8", "replace")
comp = compile__("<a href=\"http://m\.market\.yandex\.ru/model\.xml\?hid=(\d+?)&modelid=(\d+?)&show-uid=\d+?\">(.+?)</a>", 16)
list = comp.findall(data)
if list:
Number = itypes.Number()
ls = ["\n[#] [Model Name] (hid & modelid)"]
for hid, modelid, name in list:
if not name.startswith("<img"):
ls.append("%d) %s (%s %s)" % (Number.plus(), self.sub_ehtmls(name), hid, modelid))
answer = str.join(chr(10), ls)
else:
answer = self.AnsBase[5]
else:
answer = AnsBase[2]
else:
answer = AnsBase[1]
Answer(answer, stype, source, disp)
commands = (
(command_jc, "jc", 2,),
(command_google, "google", 2,),
(command_google_translate, "tr", 2,),
(command_imdb, "imdb", 2,),
(command_python, "python", 2,),
(command_url_shorten, "shorten", 2,),
(command_download, "download", 7,),
(command_paste, "paste", 2,),
(command_chuck, "chuck", 2,),
(command_bash, "bash", 2,)
)
if DefLANG in ("RU", "UA"):
commands = commands.__add__((
(command_kino, "kino", 2,),
(command_currency, "currency", 2,),
(command_jquote, "jquote", 2,),
(command_ithappens, "ithappens", 2,),
(command_gismeteo, "gismeteo", 2,),
(command_yandex_market, "market", 2,)
))
CurrencyDesc = CurrencyDesc
else:
del kinoHeaders, C3oP, command_kino, command_currency, command_jquote, command_ithappens, command_gismeteo
if DefLANG in ("RU", "UA"):
del CurrencyDesc
del UserAgents, PasteLangs, LangMap
|
from ._filter import Filter
from ._registration._apply_transform import ApplyTransform
from ._registration._learn_transform import LearnTransform
from ._segmentation import Segmentation
|
from abc import ABC, abstractmethod, abstractproperty
from functools import wraps
from time import sleep
import pyvisa
def reject_nan(func):
@wraps(func)
def wrapped(*args, **kwargs):
for _ in range(10):
response = func(*args, **kwargs)
if response == response:
return response
sleep(0.2)
return None
return wrapped
def strip_str(func):
@wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
try:
response = response.strip()
except:
pass
return response
return wrapped
def to_float(func):
@wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
try:
response = float(response)
except:
pass
return response
return wrapped
class AbstractConnection(ABC):
@abstractmethod
def write(self, command):
pass
@abstractmethod
def read(self, command):
pass
@abstractmethod
def close(self):
pass
class SCPIConnection(AbstractConnection):
def __init__(self, comm_type, address, gpib_port=0, timeout=10000):
self.gpib_port = gpib_port
self.address = self._type_to_address(comm_type, address)
self.timeout = timeout
try:
rm = pyvisa.ResourceManager()
self.conn = rm.open_resource(self.address)
self.conn.timeout = timeout
except pyvisa.VisaIOError as e:
print(e)
def _type_to_address(self, comm_type, address):
if comm_type == "gpib":
return self._parse_gpib(address)
elif comm_type == "lan":
return self._parse_lan(address)
elif comm_type == "usb":
return self._parse_usb(address)
raise ValueError(f"SCPIConnection: Invalid address {address}")
def _parse_gpib(self, address):
return f"GPIB{self.gpib_port}::{address}::INSTR"
def _parse_lan(self, address):
return f"TCPIP::{address}::INSTR"
def _parse_usb(self, address):
pass
def write(self, command):
try:
self.conn.write(command)
except pyvisa.VisaIOError as e:
print(e)
except pyvisa.InvalidSession as e:
print(e)
@reject_nan
@to_float
@strip_str
def read(self, command):
response = None
try:
response = self.conn.query(command)
except pyvisa.VisaIOError as e:
print(e)
except pyvisa.InvalidSession as e:
print(e)
return response
def read_bytes(self, command):
response = None
try:
response = self.conn.query_binary_values(command)
except pyvisa.VisaIOError as e:
print(e)
except pyvisa.InvalidSession as e:
print(e)
return response
def close(self):
self.conn.close()
class SerialConnection(AbstractConnection):
pass
class NoConnection(AbstractConnection):
def __init__(self, *args, **kwargs):
pass
def write(self, command):
print(command)
def read(self, command):
print(command)
return 0
def close(self):
print("close")
|
import shutil
from pathlib import Path
from typing import IO
import pytest
from setup import _Package
_INIT_CONTENT = '''
"""Package stands for pytest plugin."""
__author__: str = 'Volodymyr Yahello'
__email__: str = 'vyahello@gmail.com'
__license__: str = 'MIT'
__version__: str = '0.0.0'
__package_name__: str = 'pytest-confluence-report'
'''
@pytest.fixture()
def package() -> _Package:
"""Returns a test package."""
report: Path = Path('test-report')
if not report.exists():
report.mkdir()
with (report / '__init__.py').open(mode='w') as init: # type: IO[str]
init.write(_INIT_CONTENT)
yield _Package(path=report)
shutil.rmtree(path=report)
def test_package_version(package: _Package) -> None:
assert package.version == '0.0.0', 'Package version is wrong'
def test_package_name(package: _Package) -> None:
assert package.name == 'pytest-confluence-report', 'Package name is wrong'
def test_package_author(package: _Package) -> None:
assert package.author == 'Volodymyr Yahello', 'Package author is wrong'
def test_package_email(package: _Package) -> None:
assert package.email == 'vyahello@gmail.com', 'Package email is wrong'
def test_package_license(package: _Package) -> None:
assert package.license_ == 'MIT', 'Package license is wrong'
def test_package_goal(package: _Package) -> None:
assert (
package.goal == 'Package stands for pytest plugin.'
), 'Package goal is wrong'
def test_package_read_property(package: _Package) -> None:
assert (
package._read(pattern=r"(?<=__license__: str = ')[\w-]+") == 'MIT'
), 'Package property is wrong'
|
# Time: O(m + n)
# Space: O(1)
class Solution(object):
def mergeAlternately(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: str
"""
result = []
i = 0
while i < len(word1) or i < len(word2):
if i < len(word1):
result.append(word1[i])
if i < len(word2):
result.append(word2[i])
i += 1
return "".join(result)
|
from sys import stdin, stdout
def coinFlip(I, N, Q):
if N % 2 == 0:
return N // 2
else:
if I == 1:
if Q == 1:
return N // 2
else:
return (N // 2) + 1
else:
if Q == 1:
return (N // 2) + 1
else:
return N // 2
for _ in range(int(stdin.readline())):
G = int(stdin.readline())
for _ in range(G):
I, N, Q = map(int, input().split())
stdout.write(str(coinFlip(I, N, Q)) + "\n")
|
import importlib
import traceback
info = {
"name": "reload",
"type": 1,
"description": "Reloads a command",
"id": "reload",
"options": [
{
"name": "command",
"description": "Command name",
"type": 3,
"required": True
},
{
"name": "send",
"description": "Update command JSON?",
"type": 5,
"required": False
}
],
"default_permission": False
}
async def command(WS, msg):
options = msg["data"]["options"][0]["value"]
try:
update = msg["data"]["options"][1]["value"]
print(update)
except:
update = False
try:
current = WS.cache.commands[options].module
try:
current.extras
except:
pass
else:
for m in current.extras:
importlib.reload(m)
importlib.reload(current)
WS.cache.commands[options].command = current.command
if update: #str(current.info) != str(WS.cache.commands[options].info):
resp = await WS.post(WS.cache.commands[options].url, headers = {"Authorization": "Bot " + WS.TOKENS.bot}, json = current.info)
print(resp)
WS.cache.commands[current.info["name"]] = WS.classify({
"command": current.command,
"module": current,
"info": current.info,
"data": resp,
"url": WS.cache.commands[options].url
})
WS.cache.commands[options].data
resp = await WS.post(WS.interaction(msg), data = WS.form({
"type": 4,
"data": {
"embeds": [
{
"title": "RELOADED ;]",
"description": f"Command `/{options}` reloaded",
"color": 0x00ff00
}
]
}
}))
except Exception as ex:
print(f"\x1b[91;1m{ex}\x1b[0m")
tb = "\n".join(traceback.format_tb(ex.__traceback__))
resp = await WS.post(WS.interaction(msg), data = WS.form({
"type": 4,
"data": {
"embeds": [
{
"title": f"{type(ex)} ;[",
"description": f'{ex}\n```{tb}```',
"color": 0xff0000
}
]
}
}))
#print(resp)
|
class Solution:
def numberOfArithmeticSlices(self, A):
"""
:type A: List[int]
:rtype: int
"""
if len(A) < 3:
return 0
left, right = 0,3
results = 0
while right <= len(A):
sli = A[left:right]
if sli[1] - sli[0] == sli[2] - sli[1]:
c = sli[1] - sli[0]
new = right
while new < len(A):
if A[new] - sli[-1] == c:
sli.append(A[new])
new += 1
else:
new -= 1
break
results += sum(range(1, len(sli)-1))
left = new
right = left+3
else:
left += 1
right += 1
return results
|
import rebound
import unittest
import ctypes
def getc(sim):
c = []
for i in range(sim.N):
c.append(sim.particles[0].x)
c.append(sim.particles[0].y)
c.append(sim.particles[0].z)
c.append(sim.particles[0].vx)
c.append(sim.particles[0].vy)
c.append(sim.particles[0].vz)
c.append(sim.particles[0].m)
c.append(sim.particles[0].r)
return c
class TestTransformations(unittest.TestCase):
def test_democratichelio(self):
sim = rebound.Simulation()
sim.add(m=1.2354)
sim.add(m=0.1,a=1.24,e=0.123,inc=0.14,omega=0.12,Omega=0.64,l=0.632)
sim.add(m=0.01,a=5.24,e=0.2123,inc=0.014,omega=0.012,Omega=0.0164,l=10.18632)
sim.add(m=1e-7,a=7.24,e=0.22123,inc=0.3014,omega=0.4012,Omega=0.110164,l=2.18632)
elems = (rebound.Particle * sim.N)()
p = ctypes.cast(elems,ctypes.POINTER(rebound.Particle))
c0 = getc(sim)
cl = rebound.clibrebound
cl.reb_transformations_inertial_to_democraticheliocentric_posvel(sim._particles,p,sim.N)
for i in range(sim.N):
sim.particles[i].x = 1234.
sim.particles[i].vx = 1234.
cl.reb_transformations_democraticheliocentric_to_inertial_posvel(sim._particles,p,sim.N)
c1 = getc(sim)
for i in range(len(c0)):
self.assertAlmostEqual(c0[i],c1[i],delta=1e-16)
for i in range(sim.N):
sim.particles[i].x = 1234.
cl.reb_transformations_democraticheliocentric_to_inertial_pos(sim._particles,p,sim.N)
c1 = getc(sim)
for i in range(len(c0)):
self.assertAlmostEqual(c0[i],c1[i],delta=1e-16)
def test_whds(self):
sim = rebound.Simulation()
sim.add(m=1.2354)
sim.add(m=0.1,a=1.24,e=0.123,inc=0.14,omega=0.12,Omega=0.64,l=0.632)
sim.add(m=0.01,a=5.24,e=0.2123,inc=0.014,omega=0.012,Omega=0.0164,l=10.18632)
sim.add(m=1e-7,a=7.24,e=0.22123,inc=0.3014,omega=0.4012,Omega=0.110164,l=2.18632)
elems = (rebound.Particle * sim.N)()
p = ctypes.cast(elems,ctypes.POINTER(rebound.Particle))
c0 = getc(sim)
cl = rebound.clibrebound
cl.reb_transformations_inertial_to_whds_posvel(sim._particles,p,sim.N)
for i in range(sim.N):
sim.particles[i].x = 1234.
sim.particles[i].vx = 1234.
cl.reb_transformations_whds_to_inertial_posvel(sim._particles,p,sim.N)
c1 = getc(sim)
for i in range(len(c0)):
self.assertAlmostEqual(c0[i],c1[i],delta=1e-16)
for i in range(sim.N):
sim.particles[i].x = 1234.
cl.reb_transformations_whds_to_inertial_pos(sim._particles,p,sim.N)
c1 = getc(sim)
for i in range(len(c0)):
self.assertAlmostEqual(c0[i],c1[i],delta=1e-16)
def test_jacoobi(self):
sim = rebound.Simulation()
sim.add(m=1.2354)
sim.add(m=0.1,a=1.24,e=0.123,inc=0.14,omega=0.12,Omega=0.64,l=0.632)
sim.add(m=0.01,a=5.24,e=0.2123,inc=0.014,omega=0.012,Omega=0.0164,l=10.18632)
sim.add(m=1e-7,a=7.24,e=0.22123,inc=0.3014,omega=0.4012,Omega=0.110164,l=2.18632)
elems = (rebound.Particle * sim.N)()
p = ctypes.cast(elems,ctypes.POINTER(rebound.Particle))
elemse = (ctypes.c_double * sim.N)()
c0 = getc(sim)
cl = rebound.clibrebound
cl.reb_transformations_inertial_to_jacobi_posvel(sim._particles,p,sim._particles,sim.N)
for i in range(sim.N):
sim.particles[i].x = 1234.
sim.particles[i].vx = 1234.
cl.reb_transformations_jacobi_to_inertial_posvel(sim._particles,p,sim._particles,sim.N)
c1 = getc(sim)
for i in range(len(c0)):
self.assertAlmostEqual(c0[i],c1[i],delta=1e-16)
for i in range(sim.N):
sim.particles[i].x = 1234.
cl.reb_transformations_jacobi_to_inertial_pos(sim._particles,p,sim._particles,sim.N)
c1 = getc(sim)
for i in range(len(c0)):
self.assertAlmostEqual(c0[i],c1[i],delta=1e-16)
if __name__ == "__main__":
unittest.main()
|
# SPDX-FileCopyrightText: 2018 ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2018 Michael Schroeder (sommersoft)
#
# SPDX-License-Identifier: MIT
# This is a library for the Adafruit Trellis w/HT16K33
#
# Designed specifically to work with the Adafruit Trellis
# ----> https://www.adafruit.com/products/1616
# ----> https://www.adafruit.com/products/1611
#
# These displays use I2C to communicate, 2 pins are required to
# interface
# Adafruit invests time and resources providing this open source code,
# please support Adafruit and open-source hardware by purchasing
# products from Adafruit!
#
# Also utilized functions from the CircuitPython HT16K33 library
# written by Radomir Dopieralski & Tony DiCola for Adafruit Industries
# https://github.com/adafruit/Adafruit_CircuitPython_HT16K33
"""
`adafruit_trellis` - Adafruit Trellis Monochrome 4x4 LED Backlit Keypad
=========================================================================
CircuitPython library to support Adafruit's Trellis Keypad.
* Author(s): Limor Fried, Radomir Dopieralski, Tony DiCola,
Scott Shawcroft, and Michael Schroeder
Implementation Notes
--------------------
**Hardware:**
* Adafruit `Trellis Monochrome 4x4 LED Backlit Keypad
<https://www.adafruit.com/product/1616>`_ (Product ID: 1616)
**Software and Dependencies:**
* Adafruit CircuitPython firmware (2.2.0+) for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Trellis.git"
from micropython import const
from adafruit_bus_device import i2c_device
# HT16K33 Command Contstants
_HT16K33_OSCILATOR_ON = const(0x21)
_HT16K33_BLINK_CMD = const(0x80)
_HT16K33_BLINK_DISPLAYON = const(0x01)
_HT16K33_CMD_BRIGHTNESS = const(0xE0)
_HT16K33_KEY_READ_CMD = const(0x40)
# LED Lookup Table
ledLUT = (
0x3A,
0x37,
0x35,
0x34,
0x28,
0x29,
0x23,
0x24,
0x16,
0x1B,
0x11,
0x10,
0x0E,
0x0D,
0x0C,
0x02,
)
# Button Loookup Table
buttonLUT = (
0x07,
0x04,
0x02,
0x22,
0x05,
0x06,
0x00,
0x01,
0x03,
0x10,
0x30,
0x21,
0x13,
0x12,
0x11,
0x31,
)
# pylint: disable=missing-docstring, protected-access
class TrellisLEDs:
def __init__(self, trellis_obj):
self._parent = trellis_obj
def __getitem__(self, x):
if 0 < x >= self._parent._num_leds:
raise ValueError(
("LED number must be between 0 -", self._parent._num_leds - 1)
)
led = ledLUT[x % 16] >> 4
mask = 1 << (ledLUT[x % 16] & 0x0F)
return bool(
(
(
self._parent._led_buffer[x // 16][(led * 2) + 1]
| self._parent._led_buffer[x // 16][(led * 2) + 2] << 8
)
& mask
)
> 0
)
def __setitem__(self, x, value):
if 0 < x >= self._parent._num_leds:
raise ValueError(
("LED number must be between 0 -", self._parent._num_leds - 1)
)
led = ledLUT[x % 16] >> 4
mask = 1 << (ledLUT[x % 16] & 0x0F)
if value:
self._parent._led_buffer[x // 16][(led * 2) + 1] |= mask & 0xFF
self._parent._led_buffer[x // 16][(led * 2) + 2] |= mask >> 8
elif not value:
self._parent._led_buffer[x // 16][(led * 2) + 1] &= ~mask
self._parent._led_buffer[x // 16][(led * 2) + 2] &= ~mask >> 8
else:
raise ValueError("LED value must be True or False")
if self._parent._auto_show:
self._parent.show()
# pylint: disable=invalid-name
def fill(self, on):
fill = 0xFF if on else 0x00
for buff in range(len(self._parent._i2c_devices)):
for i in range(1, 17):
self._parent._led_buffer[buff][i] = fill
if self._parent._auto_show:
self._parent.show()
# pylint: enable=missing-docstring, protected-access
class Trellis:
"""
Driver base for a single Trellis Board
:param ~busio.I2C i2c: The `busio.I2C` object to use. This is the only required parameter
when using a single Trellis board.
:param list addresses: The I2C address(es) of the Trellis board(s) you're using. Defaults
to ``[0x70]`` which is the default address for Trellis boards. See
Trellis product guide for using different/multiple I2C addresses.
https://learn.adafruit.com/adafruit-trellis-diy-open-source-led-keypad
.. literalinclude:: ../examples/trellis_simpletest.py
:caption: Usage Example
:linenos:
"""
def __init__(self, i2c, addresses=None):
if addresses is None:
addresses = [0x70]
self._i2c_devices = []
self._led_buffer = []
self._buttons = []
for i2c_address in addresses:
self._i2c_devices.append(i2c_device.I2CDevice(i2c, i2c_address))
self._led_buffer.append(bytearray(17))
self._buttons.append([bytearray(6), bytearray(6)])
self._num_leds = len(self._i2c_devices) * 16
self._temp = bytearray(1)
self._blink_rate = None
self._brightness = None
self._auto_show = True
self.led = TrellisLEDs(self)
"""
The LED object used to interact with Trellis LEDs.
- ``trellis.led[x]`` returns the current LED status of LED ``x`` (True/False)
- ``trellis.led[x] = True`` turns the LED at ``x`` on
- ``trellis.led[x] = False`` turns the LED at ``x`` off
- ``trellis.led.fill(bool)`` turns every LED on (True) or off (False)
"""
self.led.fill(False)
self._write_cmd(_HT16K33_OSCILATOR_ON)
self.blink_rate = 0
self.brightness = 15
def _write_cmd(self, byte):
self._temp[0] = byte
for device in self._i2c_devices:
with device:
device.write(self._temp)
@property
def blink_rate(self):
"""
The current blink rate as an integer range 0-3.
"""
return self._blink_rate
@blink_rate.setter
def blink_rate(self, rate):
if not 0 <= rate <= 3:
raise ValueError("Blink rate must be an integer in the range: 0-3")
rate = rate & 0x03
self._blink_rate = rate
self._write_cmd(_HT16K33_BLINK_CMD | _HT16K33_BLINK_DISPLAYON | rate << 1)
@property
def brightness(self):
"""
The current brightness as an integer range 0-15.
"""
return self._brightness
@brightness.setter
def brightness(self, brightness):
if not 0 <= brightness <= 15:
raise ValueError("Brightness must be an integer in the range: 0-15")
brightness = brightness & 0x0F
self._brightness = brightness
self._write_cmd(_HT16K33_CMD_BRIGHTNESS | brightness)
def show(self):
"""Refresh the LED buffer and show the changes."""
pos = 0
for device in self._i2c_devices:
temp_led_buffer = bytearray(self._led_buffer[pos])
with device:
device.write(temp_led_buffer)
pos += 1
@property
def auto_show(self):
"""
Current state of sending LED updates directly the Trellis board(s). ``True``
or ``False``.
"""
return self._auto_show
@auto_show.setter
def auto_show(self, value):
if value not in (True, False):
raise ValueError("Auto show value must be True or False")
self._auto_show = value
def read_buttons(self):
"""
Read the button matrix register on the Trellis board(s). Returns two
lists: 1 for new button presses, 1 for button relases.
"""
for i in range(len(self._buttons)):
self._buttons[i][0] = bytearray(self._buttons[i][1])
self._write_cmd(_HT16K33_KEY_READ_CMD)
pos = 0
for device in self._i2c_devices:
with device:
device.readinto(self._buttons[pos][1])
pos += 1
pressed = []
released = []
for i in range(self._num_leds):
if self._just_pressed(i):
pressed.append(i)
elif self._just_released(i):
released.append(i)
return pressed, released
def _is_pressed(self, button):
mask = 1 << (buttonLUT[button % 16] & 0x0F)
return self._buttons[button // 16][1][(buttonLUT[button % 16] >> 4)] & mask
def _was_pressed(self, button):
mask = 1 << (buttonLUT[button % 16] & 0x0F)
return self._buttons[button // 16][0][(buttonLUT[button % 16] >> 4)] & mask
def _just_pressed(self, button):
# pylint: disable=invalid-unary-operand-type
return self._is_pressed(button) & ~self._was_pressed(button)
def _just_released(self, button):
# pylint: disable=invalid-unary-operand-type
return ~self._is_pressed(button) & self._was_pressed(button)
|
import argparse
import os
import sys
import preprocessing.util as util
import preprocessing.config as config
import traceback
def wikidump_to_new_format():
doc_cnt = 0
hyperlink2EntityId = util.EntityNameIdMap()
hyperlink2EntityId.init_hyperlink2id()
if args.debug:
infilepath = config.base_folder + "data/mydata/tokenized_toy_wiki_dump2.txt"
outfilepath = args.out_folder+"toy_wikidump.txt"
else:
infilepath = config.base_folder+"data/basic_data/tokenizedWiki.txt"
outfilepath = args.out_folder+"wikidump.txt"
with open(infilepath) as fin,\
open(outfilepath, "w") as fout:
in_mention = False
for line in fin:
line = line.rstrip() # omit the '\n' character
if line.startswith('<doc\xa0id="'):
docid = line[9:line.find('"', 9)]
doctitle = line[line.rfind('="') + 2:-2]
fout.write("DOCSTART_" + docid + "_" + doctitle.replace(' ', '_') + "\n")
elif line.startswith('<a\xa0href="'):
ent_id = hyperlink2EntityId.hyperlink2id(line)
if ent_id != config.unk_ent_id:
in_mention = True
fout.write("MMSTART_"+ent_id+"\n")
elif line == '</doc>':
fout.write("DOCEND\n")
doc_cnt += 1
if doc_cnt % 5000 == 0:
print("document counter: ", doc_cnt)
elif line == '</a>':
if in_mention:
fout.write("MMEND\n")
in_mention = False
else:
fout.write(line+"\n")
def subset_wikidump_only_relevant_mentions():
# consider only the RLTD entities (484048). take them from the files
entities_universe = set()
with open("/home/other_projects/deep_ed/data/generated/nick/"
"wikiid2nnid.txt") as fin:
for line in fin:
ent_id = line.split('\t')[0]
entities_universe.add(ent_id)
# filter wikidump
doc_cnt = 0
mention_errors = 0
if args.debug:
infilepath = args.out_folder+"toy_wikidump.txt"
outfilepath = args.out_folder+"toy_wikidumpRLTD.txt"
else:
infilepath = args.out_folder+"wikidump.txt"
outfilepath = args.out_folder+"wikidumpRLTD.txt"
with open(infilepath) as fin, open(outfilepath, "w") as fout:
in_mention_acc = []
for line in fin:
if line.startswith('DOCSTART_'):
document_acc = [line]
paragraph_acc = []
paragraph_relevant = False
in_mention_acc = []
elif line == '*NL*\n': # the or not necessary.
# there is always a *NL* before DOCEND
# end of paragraph so check if relevant
if in_mention_acc:
in_mention_acc.append(line)
else:
paragraph_acc.append(line) # normal word
if in_mention_acc: # we have a parsing error resulting to enter a mention but
#print("in_mention_acc", in_mention_acc)
mention_errors += 1 # never detecting the end of it so still in_mention
paragraph_acc.extend(in_mention_acc[1:])
#print("paragraph_acc", paragraph_acc)
in_mention_acc = []
if paragraph_relevant:
try:
assert(len(paragraph_acc) >= 4) # MMSTART, word, MMEND *NL*orDOCEND
assert(len(document_acc) >= 1)
document_acc.extend(paragraph_acc)
except AssertionError:
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print('An error occurred on line {} in statement {}'.format(line, text))
print("in_mention_acc", in_mention_acc)
print("paragraph_acc", paragraph_acc)
paragraph_acc = []
paragraph_relevant = False
elif line == "DOCEND\n":
assert(in_mention_acc == []) # because there is always an *NL* before DOCEND
if len(document_acc) > 1:
document_acc.append(line)
fout.write(''.join(document_acc))
document_acc = [] # those 3 commands are not necessary
paragraph_acc = []
paragraph_relevant = False
doc_cnt += 1
if doc_cnt % 5000 == 0:
print("document counter: ", doc_cnt)
elif line.startswith('MMSTART_'):
if in_mention_acc: # not a parsing error resulting to enter a mention but
#print("in_mention_acc", in_mention_acc)
mention_errors += 1 # never detecting the end of it so still in_mention
paragraph_acc.extend(in_mention_acc[1:])
#print("paragraph_acc", paragraph_acc)
in_mention_acc = []
ent_id = line.rstrip()[8:] # assert that ent_id in wiki_name_id_map
if ent_id in entities_universe:
paragraph_relevant = True
in_mention_acc.append(line)
elif line == 'MMEND\n':
if in_mention_acc:
in_mention_acc.append(line)
paragraph_acc.extend(in_mention_acc)
in_mention_acc = []
# else this mention is not in our universe so we don't accumulate it.
else:
if in_mention_acc:
in_mention_acc.append(line)
else:
paragraph_acc.append(line) # normal word
print("mention_errors =", mention_errors)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--entities_universe_file",
default="/home/master_thesis_share/data/entities/entities_universe.txt")
parser.add_argument("--out_folder", default="/home/master_thesis_share/data/new_datasets/wikidump/")
parser.add_argument("--debug", type=bool, default=False)
return parser.parse_args()
if __name__ == "__main__":
args = _parse_args()
#if args.debug:
# wikidump_to_new_format()
subset_wikidump_only_relevant_mentions()
|
#C1949699
#
#
import math
import random
import re
import time
from turtle import numinput
from cv2 import INTER_AREA, INTER_BITS, INTER_CUBIC, INTER_LANCZOS4, INTER_LINEAR, INTER_LINEAR_EXACT, INTER_MAX, imread, imshow, waitKey
import numpy as np
import cv2
import os
from multiprocessing import Process, Manager
data_path = os.getcwd()
dataset_path = data_path+"\\cross_validation\\"
imageDataLocation = dataset_path+"\\images\\"
truthDataLocation = dataset_path+"\\truth\\"
LOG = []
datasetTrain = []
datasetVal = []
runTimeNumber = int(1)
smallRangeWorker = np.arange(55,101,5)
LongRangeWorker = np.arange(3,31,3)
confidenceList = np.divide(np.subtract(np.arange(15,85,15),5),100)
#REGION functions
# Threshold to detect object
def calculateSTD(evaluationList, evaluationMean):
n = 1
if len(evaluationList)!=1:
n = len(evaluationList)-1
sumX = 0
for score in evaluationList:
sumX+=(score-evaluationMean)**2
standardDeviation = sumX / n
standardDeviation = math.sqrt(standardDeviation)
return standardDeviation
def runTimeCount():
i = 1
fileList = os.listdir(data_path)
while "log"+str(i)+".txt" in fileList:
i+=1
return int(i)
def saveLog():
i = 1
fileList = os.listdir(data_path)
if "log1.txt" not in fileList:
with open("log"+str(i)+".txt","a") as f:
for line in LOG:
f.write(line+"\n")
else:
while "log"+str(i)+".txt" in fileList:
i+=1
with open("log"+str(i)+".txt","a") as f:
for line in LOG:
f.write(line+"\n")
LOG.clear()
def addToLog(line,varname):
# print("Line",line)
if varname == "BinaryMasks":
LOG.append(varname,line)
elif isinstance(line, list):
# log.append(varname)
LOG.append(str(varname+" "+f'{line}'.split('=')[0]))
elif isinstance(line, str or int):
# log.append(varname)
LOG.append(str(varname)+" "+str(line))
elif isinstance(line, float):
LOG.append(str(varname)+" "+str(line))
def calc_IoU(mask1, mask2): # From the question.
mask1_area = np.count_nonzero(mask1)
mask2_area = np.count_nonzero(mask2)
# print(mask1_area, " : ", mask2_area)
intersection = np.count_nonzero(np.logical_and( mask1, mask2))
# print("intersection",intersection)
iou = intersection/(mask1_area+mask2_area-intersection)
return iou
# def runTest():
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def select_new_dataset():
datasetTrain.clear()
datasetVal.clear()
files = os.listdir(imageDataLocation)
for i in range(14):
file = random.choice(files)
while file in datasetTrain:
file = random.choice(files)
datasetTrain.append(file)
for file in files:
if file not in datasetTrain:
datasetVal.append(file)
#ENDREGION
# configPath = 'frozen_inference_graph.pb'
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net4 = cv2.dnn_DetectionModel(weightsPath,configPath)
net4.setInputSize(320,320)
net4.setInputScale(1.0/127.5)
net4.setInputMean((127.5,127.5,127.5))
net4.setInputSwapRB(True)
# test_data_path = os.path.dirname(os.getcwd())+"\\data\\new_test_data\\"
def selectParameters4():#selecting the parameters that will be swapped for each iteration
PARAMETERS = list()
PARAMETERS.append(random.choice(confidenceList))
PARAMETERS.append(random.choice(smallRangeWorker))
PARAMETERS.append(random.choice(LongRangeWorker))
return PARAMETERS
def workFunc(img, workerRange,x,y):
count = 0
down = 0
up = 0
left = 0
right = 0
tempFalse = False
for i in range(x+1,min(x+workerRange,len(img))):
if img[i][y] != 0:
if tempFalse!= True:
count+=1
down+=1
tempFalse=True
break
tempFalse = False
for i in range(x-1,max(x-workerRange,-1),-1):
if img[i][y] != 0:
if tempFalse != True:
count+=1
up+=1
tempFalse=True
break
tempFalse = False
for i in range(y+1,min(y+workerRange,len(img[0])-1)):
if img[x][i] != 0:
if tempFalse!= True:
count+=1
right+=1
tempFalse=True
break
tempFalse = False
for i in range(y-1,max(y-workerRange,-1),-1):
if img[x][i] != 0:
if tempFalse != True:
count+=1
left+=1
tempFalse=True
break
return count
# return up,down,left,right
def fillSmallSpace(img, workerRange,x,y):
count = 0
down = 0
up = 0
left = 0
right = 0
tempFalse = False
for i in range(x+1,min(x+workerRange,len(img))):
if img[i][y] != 0:
if tempFalse!= True:
count+=1
down+=1
tempFalse=True
break
tempFalse = False
for i in range(x-1,max(x-workerRange,-1),-1):
if img[i][y] != 0:
if tempFalse != True:
count+=1
up+=1
tempFalse=True
break
if count == 2:
return count
else:
count = 0
tempFalse = False
for i in range(y+1,min(y+workerRange,len(img[0])-1)):
if img[x][i] != 0:
if tempFalse!= True:
count+=1
right+=1
tempFalse=True
break
tempFalse = False
for i in range(y-1,max(y-workerRange,-1),-1):
if img[x][i] != 0:
if tempFalse != True:
count+=1
left+=1
tempFalse=True
break
return count
# return up,down,left,right
def find_optimal_lines(ogSlice,numb):
temp = ogSlice.copy()
temp = cv2.GaussianBlur(ogSlice, (13,13), cv2.BORDER_CONSTANT)
# ogSlice = cv2.Canny(ogSlice,125,150)
temp = cv2.Canny(temp,100,175)
size = np.size(temp)
whiteCount = np.count_nonzero(temp)
compactness = (whiteCount/size)*100
print("Compactness ",compactness,"%1",numb)
# while (compactness < 6 or compactness > 9):
if compactness < 3:
temp = ogSlice.copy()
temp = cv2.GaussianBlur(ogSlice, (5,5), cv2.BORDER_CONSTANT)
temp = cv2.Canny(temp,100,175)
size = np.size(temp)
whiteCount = np.count_nonzero(temp)
compactness = (whiteCount/size)*100
print("Compactness ",compactness,"%2",numb)
if compactness < 3.5:
temp = ogSlice.copy()
temp = cv2.GaussianBlur(ogSlice, (5,5), cv2.BORDER_CONSTANT)
# threshold
temp = cv2.Canny(temp,100,175)
size = np.size(temp)
whiteCount = np.count_nonzero(temp)
compactness = (whiteCount/size)*100
print("Compactness ",compactness,"%5",numb)
if compactness > 8:
temp = ogSlice.copy()
temp = cv2.GaussianBlur(ogSlice, (7,7), cv2.BORDER_REFLECT)
temp = cv2.Canny(temp,100,175)
size = np.size(temp)
whiteCount = np.count_nonzero(temp)
compactness = (whiteCount/size)*100
print("Compactness ",compactness,"%3",numb)
if compactness > 9:
temp = ogSlice.copy()
temp = cv2.GaussianBlur(ogSlice, (9,9), cv2.BORDER_CONSTANT)
temp = cv2.Canny(temp,100,175)
size = np.size(temp)
whiteCount = np.count_nonzero(temp)
compactness = (whiteCount/size)*100
print("Compactness ",compactness,"%4",numb)
if compactness < 6:
temp = ogSlice.copy()
temp = cv2.GaussianBlur(ogSlice, (5,5), cv2.BORDER_CONSTANT)
# threshold
temp = cv2.Canny(temp,150,175)
size = np.size(temp)
whiteCount = np.count_nonzero(temp)
compactness = (whiteCount/size)*100
print("Compactness ",compactness,"%5",numb)
print("Compactness ",compactness,"%",numb)
return temp
def v4(img,numb,PARAMETERS,dictOfBinaryMask):
thres = PARAMETERS[0]
classIds, confs, bbox = net4.detect(img,confThreshold = thres)
# print("BOX: ",bbox[0])
# x1 = bbox[0][0]
# y1 = bbox[0][1]
# x2 = bbox[0][2]
# y2 = bbox[0][3]
boxes = []
confidenceList = []
imgNew = img.copy()
imgNew.fill(0)
if not isinstance(classIds,tuple):
for classId,confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
x1 = box[0]
y1 = box[1]
x2 = box[2]
y2 = box[3]
boxes.append([x1,y1,x2,y2])
confidenceList.append(confs)
# cv2.rectangle(img,[x1,y1,x2,y2],color=(0,0,255),thickness=2)
maxX = 0
maxY = 0
minX = len(img)*len(img[0])
minY = len(img)*len(img[0])
for b in boxes:
for i in range(b[0],b[2]+b[0]):
for j in range(b[1],b[3]+b[1]):
imgNew[j][i] = img[j][i]
for b in boxes:
maxX = max(maxX,b[0]+b[2])
maxY = max(maxY,b[1]+b[3])
minX = min(minX,b[0])
minY = min(minY,b[1])
fMask = np.copy(img)
fMask.fill(0)
ogSlice = img[minY:maxY,minX:maxX]
ogSlice = find_optimal_lines(ogSlice,numb)
ogSlice = cv2.dilate(ogSlice,(7,7))
newSlice = ogSlice.copy()
newSlice.fill(0)
for b in boxes:
for i in range(b[0]-minX,(b[2]+b[0])-minX):
for j in range(b[1]-minY,(b[3]+b[1])-minY):
newSlice[j][i] = ogSlice[j][i]
ogSlice = newSlice
cpSlice = ogSlice.copy()
smallRange = int(max(len(ogSlice)/PARAMETERS[1],len(ogSlice[0])/PARAMETERS[1]))
longRange = int(max(len(ogSlice)/PARAMETERS[2],len(ogSlice[0])/PARAMETERS[2]))
workVal = 0
ogSlice = cpSlice.copy()
for i in range(len(ogSlice)):
for j in range(len(ogSlice[0])):
workVal = fillSmallSpace(ogSlice,smallRange,i,j)
if(ogSlice[i][j] != 255):
if workVal == 2:
cpSlice[i][j] = 255
ogSlice = cpSlice.copy()
for i in range(len(ogSlice)):
for j in range(len(ogSlice[0])):
workVal = workFunc(ogSlice,longRange,i,j)
if(ogSlice[i][j] != 255):
if workVal > 3:
cpSlice[i][j] = 255
ogSlice = cpSlice.copy()
# print(tempRange)
for i in range(len(ogSlice)):
for j in range(len(ogSlice[0])):
workVal = fillSmallSpace(ogSlice,smallRange,i,j)
if(ogSlice[i][j] != 255):
if workVal == 2:
cpSlice[i][j] = 255
ogSlice[i][j] = 255
# imshow("ogSlice",ogSlice)
# imshow("cpSlice",cpSlice)
# waitKey(0)
ogSlice = cpSlice.copy()
for i in range(len(ogSlice)):
for j in range(len(ogSlice[0])):
workVal = workFunc(ogSlice,smallRange,i,j)
if(ogSlice[i][j] != 255):
if workVal == 4:
cpSlice[i][j] = 255
elif(ogSlice[i][j] != 0):
if workVal < 2:
# ogSlice[i][j] = 0
cpSlice[i][j] = 0
# imshow("ogSlice",ogSlice)
# imshow("cpSlice",cpSlice)
# waitKey(0)
rmSlice = cpSlice.copy()
drcontours = rmSlice.copy()
drcontours = cv2.cvtColor(drcontours, cv2.COLOR_GRAY2RGB)
removeIslands = cv2.pyrDown(rmSlice)
_, threshed = cv2.threshold(rmSlice, 0, 255, cv2.THRESH_BINARY)
contours,_ = cv2.findContours(threshed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print("Cont",contours)
#find maximum contour and draw
# print("NUMB",numb)
if len(contours) > 0:
cmax = max(contours, key = cv2.contourArea)
epsilon = 0.002 * cv2.arcLength(cmax, True)
approx = cv2.approxPolyDP(cmax, epsilon, True)
cv2.drawContours(drcontours, [approx], -1, (0, 255, 0), 2)
width, height = rmSlice.shape
# imshow("Contour", drcontours)
# waitKey(0)
#fill maximum contour and draw
removeIslands = np.zeros( [width, height, 3],dtype=np.uint8 )
cv2.fillPoly(removeIslands, pts =[cmax], color=(255,255,255))
cpSlice = cv2.cvtColor(removeIslands, cv2.COLOR_BGR2GRAY)
# print("Cont",cmax)
for i in range(len(ogSlice)):
for j in range(len(ogSlice[0])):
fMask[minY+i][minX+j] = cpSlice[i][j]
fMask = cv2.cvtColor(fMask, cv2.COLOR_BGR2GRAY)
dictOfBinaryMask[numb] = fMask
else:
fMask = img.copy()
fMask = cv2.cvtColor(fMask, cv2.COLOR_BGR2GRAY)
fMask.fill(0)
dictOfBinaryMask[numb] = fMask
def findOptimalParams(listOfEvaluations,evaluationParameters,dictOfBinaryMask,PARAMETERS,datasetTrain):
for i in range(1):
PARAMETERS = selectParameters4()
print("PARAMS ",PARAMETERS)
if PARAMETERS in evaluationParameters:
break
evaluationParameters.append(PARAMETERS)#[PARAMETERS[0],PARAMETERS[1],PARAMETERS[2]]
# print(evaluationParameters)
# addToLog(PARAMETERS.copy(),"Parameters")
jobs = []
for image in datasetTrain:
current = imread(imageDataLocation+image)
p1 = Process(target=v4,args=[current,image,PARAMETERS,dictOfBinaryMask])
p1.start()
jobs.append(p1)
for job in jobs:
job.join()
averageIOU=0
for image in datasetTrain:
imageNumber = int(re.compile(r'\d+(?:\.\d+)?').findall(image)[0])
max_IoU = 0
for i in range (1,6):
mask2 = str(imageNumber)+"_gt"+str(i)+".jpg"
mask2 = imread(truthDataLocation+mask2)
mask2 = cv2.cvtColor(mask2,cv2.COLOR_BGR2GRAY)
max_IoU = max(max_IoU,calc_IoU(dictOfBinaryMask[image],mask2))
averageIOU+=max_IoU
averageIOU /= len(datasetTrain)
# print(averageIOU)
listOfEvaluations.append(averageIOU)
def runTestV4():
listTestEvaluations = []
numRange = 50
for i in range(numRange):
if i % 10 == 0 and numRange > 9:
print(i*2,"% Done")
elif numRange < 10:
print(i,"iteration")
addToLog(i,"Loop: ")
select_new_dataset()
addToLog(datasetTrain,f'{datasetTrain=}'.split('=')[0])
addToLog(datasetVal,f'{datasetVal=}'.split('=')[0])
# listOfBinaryMask = []
# for i in range(10):
listOfEvaluations = []
evaluationParameters = []
averageIOU = 0
optimalParams = []
with Manager() as manager:
dictOfBinaryMask = manager.dict()
evaluationParameters = manager.list()
listOfEvaluations = manager.list()
PARAMETERS = manager.list()
jobs = []
for i in range(6):
p1 = Process(target=findOptimalParams,args=[listOfEvaluations,evaluationParameters,dictOfBinaryMask,PARAMETERS,datasetTrain])
p1.start()
jobs.append(p1)
for job in jobs:
job.join()
jobs.clear()
for i in range(6):
p1 = Process(target=findOptimalParams,args=[listOfEvaluations,evaluationParameters,dictOfBinaryMask,PARAMETERS,datasetTrain])
p1.start()
jobs.append(p1)
for job in jobs:
job.join()
jobs.clear()
for i in range(6):
p1 = Process(target=findOptimalParams,args=[listOfEvaluations,evaluationParameters,dictOfBinaryMask,PARAMETERS,datasetTrain])
p1.start()
jobs.append(p1)
for job in jobs:
job.join()
jobs.clear()
for i in range(6):
p1 = Process(target=findOptimalParams,args=[listOfEvaluations,evaluationParameters,dictOfBinaryMask,PARAMETERS,datasetTrain])
p1.start()
jobs.append(p1)
for job in jobs:
job.join()
jobs.clear()
print("Length of listOfEvaluations",len(listOfEvaluations))
print("Length of evaluationParameters",len(evaluationParameters))
print("Length of listTestEvaluations",len(listTestEvaluations))
optimalParamsResults = listOfEvaluations[listOfEvaluations.index(max(listOfEvaluations))]
optimalParams = evaluationParameters[listOfEvaluations.index(max(listOfEvaluations))]
addToLog(optimalParamsResults,"Evaluation Score Train Set")
print("Second part")
for image in datasetVal:
current = imread(imageDataLocation+image)
p1 = Process(target=v4,args=[current,image,optimalParams,dictOfBinaryMask])
p1.start()
jobs.append(p1)
for job in jobs:
job.join()
averageIOU=0
for image in datasetVal:
imageNumber = int(re.compile(r'\d+(?:\.\d+)?').findall(image)[0])
max_IoU = 0
for i in range (1,6):
mask2 = str(imageNumber)+"_gt"+str(i)+".jpg"
mask2 = imread(truthDataLocation+mask2)
mask2 = cv2.cvtColor(mask2,cv2.COLOR_BGR2GRAY)
max_IoU = max(max_IoU,calc_IoU(dictOfBinaryMask[image],mask2))
averageIOU+=max_IoU
averageIOU /= len(datasetVal)
addToLog(optimalParams,"Optimal Parameters")
addToLog(averageIOU,"Evaluation Score Validation Set")
listTestEvaluations.append(averageIOU)
averageTestScore = sum(listTestEvaluations)/len(listTestEvaluations)
addToLog(averageTestScore,"Average Validation Score")
addToLog(calculateSTD(listTestEvaluations,averageTestScore), "Standard Deviation")
addToLog("V4","Pipeline: ")
if __name__ == "__main__":
start_time = time.time()
runTestV4()
print("--- %s seconds ---" % (time.time() - start_time))
runTimeNumber = str(runTimeCount())
saveLog()
print("Run time number ",runTimeNumber)
|
"""
This is a dedicated editor for specifying camera set rigs. It allows
the artist to preset a multi-rig type. They can then use the
create menus for quickly creating complicated rigs.
"""
class BaseUI:
"""
Each region of the editor UI is abstracted into a UI class that
contains all of the widgets for that objects and relavant callbacks.
This base class should not be instanced as this is only a container
for common code.
"""
def __init__(self, parent):
"""
Class initialization. We simply hold onto the parent layout.
"""
pass
def control(self):
"""
Return the master control for this UI.
"""
pass
def parent(self):
"""
Return the parent instace of this UI. This can be None.
"""
pass
def setControl(self, control):
"""
Set the pointer to the control handle for this UI. This is
the main control that parents or children can reference
for UI updates.
"""
pass
class NewCameraSetUI(BaseUI):
"""
UI for adding 'new' camera set.
"""
def __init__(self, parent):
"""
Class constructor
"""
pass
def buildLayout(self):
"""
Construct the UI for this class.
"""
pass
def name(self):
"""
Name of this UI component.
"""
pass
def newTemplate(self, *args):
"""
Create a new template and adding the template to the
UI layout for user manipulation.
"""
pass
def rebuild(self):
"""
Rebuild the UI. We find the parent class and tell it
to kickstart the rebuild.
"""
pass
def resetSettings(self, *args):
"""
Reset to the default settings and rebuild the UI.
"""
pass
def resetUI(self):
"""
Tells all ui templates to reset their ui handles. This
is done because this UI templates hold onto some
local data that must be cleared out before rebuilding
"""
pass
def saveSettings(self, *args):
"""
Call the template manager to store its current settings
"""
pass
class NamingTemplateUI(BaseUI):
"""
This class encapsulates all of the UI around multi rig naming templates.
"""
def __init__(self, parent, mgr, template):
"""
Class initializer.
"""
pass
def addLayer(self, *args):
"""
Add a new layer to this object.
"""
pass
def autoCreateCkboxChange(self, args, layer='0'):
"""
Called when the check box is changed.
"""
pass
def buildLayout(self):
"""
Build a new multi-rig template UI.
"""
pass
def cameraSetNameChanged(self, arg):
pass
def createIt(self, *args):
"""
Function called when the user clicks the 'Create' button.
This will force the creation of a new rig.
"""
pass
def deleteLayer(self, args, layer='0'):
"""
Called when the delete layer button is clicked.
"""
pass
def layerCameraChanged(self, args, layer='0'):
"""
Called when the option menu group changes.
"""
pass
def layerPrefixChanged(self, args, layer='0'):
"""
Called when the prefix changes for a layer.
"""
pass
def layoutForLayer(self, layer):
"""
Build the UI for the specified layer. We need to access the
UI data later in callbacks. So we store the data inside
a dictionary for reference layer.
"""
pass
def multiRigNameChanged(self, ui):
"""
Called when the user changes the name of this multi-rig
using the supplied text box.
"""
pass
def namingPrefixChanged(self, arg):
"""
Called when the users changes the prefix name used for the
multi-rig.
"""
pass
def removeDef(self, *args):
"""
Remove this object from the list.
"""
pass
def resetUI(self):
pass
class CameraSetEditor(BaseUI):
"""
Main class for the camera set editor.
"""
def __init__(self, parent='None'):
"""
Class constructor.
"""
pass
def buildLayout(self):
"""
Build the main layout for the class. This will kickstart all
UI creation for the class. You should have a window instance
created for the layouts to parent under.
"""
pass
def create(self):
"""
Create a new instance of the window. If there is already a instance
then show it instead of creating a new instance.
"""
pass
def name(self):
"""
Return the name for this editor.
"""
pass
def rebuild(self):
"""
Force the rebuild of the UI. This happens when
users create new templates.
"""
pass
def createIt():
"""
Create a new window instance of the Camera Set Editor.
"""
pass
gEditorWindowInstance = None
|
################################################################################
# file: settings.py
# description: energydash settings
################################################################################
# Copyright 2013 Chris Linstid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
################################################################################
# Local settings
################################################################################
DEBUG=True
LOCAL_TIMEZONE='America/New_York'
################################################################################
# MongoDB settings
################################################################################
MONGO_DATABASE_NAME='energydash'
# Host name, user name and password are defined in a file NOT in revision
# control.
import mongodb_secret
MONGO_USER=mongodb_secret.MONGO_USER
MONGO_PASSWORD=mongodb_secret.MONGO_PASSWORD
MONGO_HOST=mongodb_secret.MONGO_HOST
MONGO_REPLICA_SET=mongodb_secret.MONGO_REPLICA_SET
# The XML provided by the EnviR includes "days since birth" so we need to know
# the "birth date" so we can calculate the actual timestamp for each message.
ENVIR_BIRTH_YEAR = 2013
ENVIR_BIRTH_MONTH = 5
ENVIR_BIRTH_DAY = 18
ENVIR_BIRTH_TZ_NAME = LOCAL_TIMEZONE
# Serial port options
ENVIR_SERIAL_PORT = '/dev/ttyUSB0'
ENVIR_SERIAL_BAUDRATE = 57600
import serial
ENVIR_SERIAL_BYTESIZE = serial.EIGHTBITS
ENVIR_SERIAL_PARITY = serial.PARITY_NONE
ENVIR_SERIAL_STOPBITS = serial.STOPBITS_ONE
ENVIR_SERIAL_TIMEOUT = 1
|
import os
import json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from segmenter.visualizers.BaseVisualizer import BaseVisualizer
class BoostVisualizer(BaseVisualizer):
def boxplot(self, clazz, results):
results = results[["loss", "fold", "boost_fold"]]
results = results.groupby(['fold', 'boost_fold']).min().reset_index()
results = results.sort_values(["fold", "boost_fold"])
results["loss"] = pd.to_numeric(results["loss"], errors='coerce')
results["baseline"] = results.apply(lambda x: results[
(results["fold"] == x["fold"]) &
(results["boost_fold"] == 0)].iloc[0].loss,
axis=1)
results["improvement"] = results.apply(
lambda x: 100 * (x["baseline"] - x["loss"]) / x["baseline"],
axis=1)
results = results[results["boost_fold"] > 0]
plot = results.boxplot(column=['improvement'],
by='boost_fold',
grid=False)
title = "Loss Improvement by Number of Boost Folds"
subtitle = "{} - Class {}".format(self.label, clazz)
fig = plot.get_figure()
plt.title('')
fig.suptitle(title, y=1.05, fontsize=14)
plt.figtext(.5, .96, subtitle, fontsize=12, ha='center')
plot.set_ylabel('Improvement Over Baseline Loss (%)')
plot.set_xlabel('Number of Boost Folds')
outfile = os.path.join(self.data_dir, "boost_boxplot.png")
fig.savefig(outfile, dpi=150, bbox_inches='tight', pad_inches=0.5)
plt.close()
def lineplot(self, clazz, results):
results = results[["epoch", "loss", "boost_fold"]]
results = results.rename(columns={'boost_fold': 'Boost Fold'})
results["loss"] = pd.to_numeric(results["loss"], errors='coerce')
results = results[results['loss'].notna()]
results = results.groupby(["epoch", 'Boost Fold']).mean().unstack()
plot = results["loss"].plot.line()
title = "Mean Validation Loss by Training Epoch"
subtitle = "{} - Class {}".format(self.label, clazz)
fig = plot.get_figure()
plt.title('')
fig.suptitle(title, y=1.05, fontsize=14)
plt.figtext(.5, .96, subtitle, fontsize=12, ha='center')
plt.ylabel('Validation Loss')
plt.xlabel('Training Epoch')
outfile = os.path.join(self.data_dir, "boost_lineplot.png")
fig.savefig(outfile, dpi=150, bbox_inches='tight', pad_inches=0.5)
plt.close()
def execute(self):
csv_file = os.path.join(self.data_dir, "train_results.csv")
clazz = self.data_dir.split("/")[-2]
if not os.path.exists(csv_file):
print("CSV file does not exist {}".format(csv_file))
return
results = pd.read_csv(csv_file)
self.boxplot(clazz, results.copy())
self.lineplot(clazz, results.copy())
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from os import link
from itemadapter import ItemAdapter
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from scrapy.exceptions import DropItem
import time
class AmazonbotPipeline:
def process_item(self, item, spider):
return item
class AmazonbotSellersRankFilterPipeline:
def process_item(self, item, spider):
asin = item['asin']
total_tr_price = item['total_tr_price']
total_us_price = item['total_us_price']
us_sellers_rank = item['us_sellers_rank']
if not us_sellers_rank or us_sellers_rank > spider.max_sellers_rank:
raise DropItem(f"DROPPED - ASIN: {asin} - Sellers rank is not defined or not below {spider.max_sellers_rank}")
else:
return {
"asin": asin,
"total_tr_price": total_tr_price,
"total_us_price": total_us_price,
"us_sellers_rank": us_sellers_rank
}
class AmazonbotFBAProfitabilityFilterPipeline:
INCH_TO_CM = 2.54
USD_TO_TRY = 8.5
POUND_TO_KG = 0.4535923
AIR_COST_PER_W = 5.5 # Cost per weight as USD
SEA_COST_PER_W = 3 # Cost per weight as USD
FBA_SELLING_FEE_RATE = 0.15
MINIMUM_ALLOWED_NET_PROFIT = -2 # as USD
def __init__(self):
self.driver = webdriver.Chrome()
self.fba_calculator_url = "https://sellercentral.amazon.com/hz/fba/profitabilitycalculator/index?lang=en_US"
def process_item(self, item, spider):
asin = item['asin']
total_tr_price = item['total_tr_price']
if not total_tr_price:
raise DropItem(f"DROPPED - ASIN: {asin} - Missing total_tr_price")
total_us_price = item['total_us_price']
if not total_us_price:
raise DropItem(f"DROPPED - ASIN: {asin} - Missing total_us_price")
us_sellers_rank = item['us_sellers_rank']
self.driver.get(self.fba_calculator_url)
time.sleep(5)
# If link continue popup occurs, click that
try_count = 0
while try_count < 5:
link_continue = self.driver.find_elements_by_xpath("//*[@id='link_continue']")
if link_continue:
link_continue[0].click()
time.sleep(5)
break
try_count += 1
time.sleep(3)
# Enter asin
search_string = self.driver.find_element_by_xpath("//*[@id='search-string']")
for s in asin:
search_string.send_keys(s)
time.sleep(0.5)
search_string.send_keys(Keys.ENTER)
time.sleep(5)
# Product dimensions and weight
product_length = float(self.driver.find_element_by_xpath("//*[@id='product-info-length']").text)
product_width = float(self.driver.find_element_by_xpath("//*[@id='product-info-width']").text)
product_height = float(self.driver.find_element_by_xpath("//*[@id='product-info-height']").text)
dimensional_weight = (product_length * self.INCH_TO_CM) * (product_width * self.INCH_TO_CM) * (product_height * self.INCH_TO_CM) / 5000
product_weight = float(self.driver.find_element_by_xpath("//*[@id='product-info-weight']").text)
max_product_weight = max(dimensional_weight, product_weight)
# Product revenue
revenue = total_us_price # Revenue thought as minimum of the us sellers prices
# Click Calculate button to see fulfillment fee
self.driver.find_element_by_xpath("//*[@id='update-fees-link-announce']").click()
time.sleep(5)
# Fees
fba_fulfillment_fee = float(self.driver.find_element_by_xpath("//*[@id='afn-amazon-fulfillment-fees']").text)
air_ship_to_amazon_cost = self.AIR_COST_PER_W * max_product_weight
sea_ship_to_amazon_cost = self.SEA_COST_PER_W * max_product_weight
fba_charge = max(0.3, total_us_price*self.FBA_SELLING_FEE_RATE)
air_total_fulfillment_cost = air_ship_to_amazon_cost + fba_charge
sea_total_fulfillment_cost = sea_ship_to_amazon_cost + fba_charge
# Storage cost
storage_cost = 0
# Product cost
product_cost = total_tr_price / self.USD_TO_TRY
# Calculate Profitability
air_net_profit = revenue - fba_fulfillment_fee - air_total_fulfillment_cost - storage_cost - product_cost
sea_net_profit = revenue - fba_fulfillment_fee - sea_total_fulfillment_cost - storage_cost - product_cost
air_roi = round(air_net_profit / (air_ship_to_amazon_cost+product_cost),2)
sea_roi = round(sea_net_profit / (sea_ship_to_amazon_cost+product_cost),2)
if air_net_profit > self.MINIMUM_ALLOWED_NET_PROFIT:
return {
"asin": asin,
"total_tr_price": total_tr_price,
"total_us_price": total_us_price,
"us_sellers_rank": us_sellers_rank,
"net_profit": air_net_profit,
"roi": air_roi,
"profitable_with": 'AIR_SHIPPING'
}
elif sea_net_profit > self.MINIMUM_ALLOWED_NET_PROFIT:
return {
"asin": asin,
"total_tr_price": total_tr_price,
"total_us_price": total_us_price,
"us_sellers_rank": us_sellers_rank,
"net_profit": sea_net_profit,
"roi": sea_roi,
"profitable_with": 'SEA_SHIPPING'
}
else:
raise DropItem(f"DROPPED - ASIN: {asin} - Product is not profitable, Air Net Profit: {air_net_profit}, Sea Net Profit: {sea_net_profit}")
|
# *-* coding: utf-8 *-*
import sys
import os
from email.mime.application import MIMEApplication
from asn1crypto import cms, core
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.asymmetric import padding
from endesive import signer
class EncryptedData(object):
def email(self, data):
msg = MIMEApplication(data)
del msg['Content-Type']
msg['Content-Disposition'] = 'attachment; filename="smime.p7m"'
msg['Content-Type'] = 'application/x-pkcs7-mime; smime-type=enveloped-data; name="smime.p7m"'
data = msg.as_string()
return data
def pad(self, s, block_size):
n = (block_size - len(s)) % block_size
n = bytes([n] * n)
return s + n
def recipient_info(self, cert, session_key):
public_key = cert.get_pubkey().to_cryptography_key()
encrypted_key = public_key.encrypt(session_key, padding.PKCS1v15())
cert = signer.cert2asn(cert.to_cryptography())
tbs_cert = cert['tbs_certificate']
# TODO: use subject_key_identifier when available
return cms.RecipientInfo(
name=u'ktri',
value={
'version': u'v0',
'rid': cms.RecipientIdentifier(
name=u'issuer_and_serial_number',
value={
'issuer': tbs_cert['issuer'],
'serial_number': tbs_cert['serial_number']
}
),
'key_encryption_algorithm': {
'algorithm': u'rsa',
},
'encrypted_key': core.OctetString(encrypted_key)
}
)
def build(self, data, certs, algo):
key_size = {
'aes128': 16,
'aes192': 24,
'aes256': 32,
}[algo.split('_', 1)[0]]
block_size = 16
session_key = os.urandom(key_size)
iv = os.urandom(block_size)
cipher = Cipher(algorithms.AES(session_key), getattr(modes, algo.split('_', 1)[1].upper())(iv), default_backend())
data = self.pad(data, block_size)
encryptor = cipher.encryptor()
data = encryptor.update(data) + encryptor.finalize()
recipient_infos = []
for cert in certs:
recipient_info = self.recipient_info(cert, session_key)
recipient_infos.append(recipient_info)
algo = unicode(algo) if sys.version[0] < '3' else algo
enveloped_data = cms.ContentInfo({
'content_type': u'enveloped_data',
'content': {
'version': u'v0',
'recipient_infos': recipient_infos,
'encrypted_content_info': {
'content_type': u'data',
'content_encryption_algorithm': {
'algorithm': algo,
'parameters': iv
},
'encrypted_content': data
}
}
})
data = self.email(enveloped_data.dump())
return data
def encrypt(data, certs, algo=u'aes256_cbc'):
assert algo[:3] == 'aes' and algo.split('_', 1)[1] in ('cbc', 'ofb')
cls = EncryptedData()
return cls.build(data, certs, algo)
|
# Here is the list of modules we need to import
# from . import authorizations
from authorizations import at,at_url,headers_at,encoded_u,encoded_u_td,td_base_url,get_headers
import atws
import atws.monkeypatch.attributes
import pandas as pd
import requests
def get_notes_for_list_of_note_ids(id_=[]):
query_notes=atws.Query('TicketNote')
query_notes.open_bracket('AND')
if len(id_)==1:
query_notes.WHERE('id',query_notes.Equals,id_[0])
# query_notes.AND('NoteType',query_notes.Equals,3) #at.picklist['TicketNote']['NoteType']['Task Notes']
else:
query_notes.WHERE('id',query_notes.Equals,id_[0])
# query_notes.AND('NoteType',query_notes.Equals,3) #at.picklist['TicketNote']['NoteType']['Task Notes']
for element in id_[1:]:
query_notes.OR('id',query_notes.Equals,element)
query_notes.close_bracket()
query_notes.open_bracket('AND')
query_notes.AND('NoteType',query_notes.NotEqual,13)
query_notes.AND('Publish',query_notes.Equals,1)
query_notes.close_bracket()
notes = at.query(query_notes).fetch_all()
df = pd.DataFrame([dict(note) for note in notes])
return df,notes
# def get_ticket_notes_at(id_=[0]):
# """
# Returns all notes, belonging to the tickets from the given list.
# Parameters:
# id_ [list]: list of Autotask ticket ids
# at [Autotask connect object] : Autotask atws.connect object
# Returns:
# Tuple: (Python DataFrame, list of notes)
# """
# query_notes=atws.Query('TicketNote')
# query_notes.AND('NoteType',query_notes.Equals,3)
# if len(id_)==1:
# query_notes.WHERE('TicketID',query_notes.Equals,id_[0])
# else:
# query_notes.WHERE('TicketID',query_notes.Equals,id_[0])
# for element in id_[1:]:
# query_notes.OR('TicketID',query_notes.Equals,element)
# notes = at.query(query_notes).fetch_all()
# df = pd.DataFrame([dict(note) for note in notes])
# return df,notes
def get_ticket_notes_at(id_=[0]):
"""
Returns all notes, belonging to the tickets from the given list.
Parameters:
id_ [list]: list of Autotask ticket ids
at [Autotask connect object] : Autotask atws.connect object
Returns:
Tuple: (Python DataFrame, list of notes)
"""
query_notes=atws.Query('TicketNote')
# query_notes.WHERE('NoteType',query_notes.Equals,3)
# query.open_bracket('AND')
query_notes.open_bracket('AND')
if len(id_)==1:
query_notes.WHERE('TicketID',query_notes.Equals,id_[0])
else:
query_notes.WHERE('TicketID',query_notes.Equals,id_[0])
for element in id_[1:]:
query_notes.OR('TicketID',query_notes.Equals,element)
query_notes.close_bracket()
query_notes.open_bracket('AND')
query_notes.AND('NoteType',query_notes.NotEqual,13)
query_notes.AND('Publish',query_notes.Equals,1)
query_notes.close_bracket()
notes = at.query(query_notes).fetch_all()
df = pd.DataFrame([dict(note) for note in notes])
return df,notes
def make_note_in_at(title='Title',descr='Long description 3200 chars',note_type=6,ticket_id=0):
note = at.new('TicketNote')
note.Title = title
note.Description = descr
note.NoteType = 3
note.TicketID= ticket_id
note.Publish = 1
note.create()
return note
|
import os
import gzip
import cPickle
import numpy as np
from scipy.io import loadmat
import pyhsmm_spiketrains.models
reload(pyhsmm_spiketrains.models)
from pyhsmm_spiketrains.internals.utils import split_train_test
def load_synth_data(T, K, N, T_test=1000,
model='hdp-hmm',
version=1,
alpha_obs=1.0, beta_obs=1.0):
"""
Make a synthetic HMM dataset
:param T: Number of time bins
:param K: Number of latent states (or max number in the HDP case
:param N: Number of neurons
:return:
"""
data_dir = "data"
file_name = 'synth_%s_K%d_T%d_N%d_v%d.pkl' % (model, K, T, N, version)
data_file = os.path.join(data_dir, file_name)
if os.path.exists(data_file):
with gzip.open(data_file, "r") as f:
res = cPickle.load(f)
else:
if model == 'hmm':
hmm = pyhsmm_spiketrains.models.PoissonHMM(
N=N, K=K,
alpha_obs=alpha_obs, beta_obs=beta_obs,
alpha=12.0, gamma=12.0,
init_state_concentration=1.0
)
elif model == 'hdp-hmm':
hmm = pyhsmm_spiketrains.models.PoissonHDPHMM(
N=N, K_max=K,
alpha_obs=alpha_obs, beta_obs=beta_obs,
alpha=12.0, gamma=12.0,
init_state_concentration=1.0,
)
else:
raise Exception('Unrecognized model')
S_train, Z_train = hmm.generate(T)
S_test, Z_test = hmm.generate(T_test)
print "Num used states: ", len(np.unique(Z_train))
res = hmm, S_train, Z_train, S_test, Z_test
with gzip.open(data_file, "w") as f:
cPickle.dump(res, f, protocol=-1)
return res
def load_hipp_data(dataname="hipp_2dtrack_a", trainfrac=0.8):
raw_data = loadmat("data/%s.mat" % dataname)
S = raw_data['S'].astype(np.int).copy("C")
# Get the time stamps
T,N = S.shape
dt = 0.25
ts = np.arange(T) * dt
# Get the corresponding position
pos = raw_data['pos']
S_train, pos_train, S_test, pos_test = split_train_test(S, pos, trainfrac=trainfrac)
if "cp" in raw_data and "r" in raw_data:
center = raw_data['cp'].ravel()
radius = np.float(raw_data['r'])
else:
center = radius = None
return N, S_train, pos_train, S_test, pos_test, center, radius
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 claviering <claviering@sunn>
#
# Distributed under terms of the WTFPL license.
from sys import stdout
for j in range(2,1001):
k = []
n = -1
s = j
for i in range(1,j):
if j % i == 0:
n += 1
s -= i
k.append(i)
if s == 0:
print j
for i in range(n):
stdout.write(str(k[i]))
stdout.write(' ')
print k[n]
|
import mock
import unittest2
import urllib
import urllib2
from mlabns.util import constants
from mlabns.util import message
from mlabns.util import prometheus_status
class ParseSliverToolStatusTest(unittest2.TestCase):
def test_parse_sliver_tool_status_returns_successfully_parsed_tuple(self):
status = {
"metric": {
"experiment": "ndt.iupui",
"machine": "mlab1-abc01.mlab-oti.measurement-lab.org"
},
"value": [1522782427.81, "1"]
}
expected_parsed_status = (
'ndt-iupui-mlab1-abc01.mlab-oti.measurement-lab.org', '1',
constants.PROMETHEUS_TOOL_EXTRA)
actual_parsed_status = prometheus_status.parse_sliver_tool_status(
status)
self.assertTupleEqual(expected_parsed_status, actual_parsed_status)
def test_parse_sliver_tool_status_raises_PrometheusStatusUnparseableError_because_of_illformatted_status(
self):
status = 'mock status'
with self.assertRaises(
prometheus_status.PrometheusStatusUnparseableError):
prometheus_status.parse_sliver_tool_status(status)
class GetSliceInfoTest(unittest2.TestCase):
def setUp(self):
self.prometheus_base_url = 'https://prom.mock.mlab.net/api/?query='
def test_get_slice_info_returns_none_with_nonexistent_tool(self):
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'nonexistent_tool', '')
self.assertIsNone(retrieved)
def test_get_slice_info_returns_valid_objects_when_tools_stored(self):
ndt_url_ipv4 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['ndt'])
ndt_url_ipv6 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['ndt_ipv6'])
neubot_url_ipv4 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['neubot'])
neubot_url_ipv6 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['neubot_ipv6'])
expected_slice_data = {
'ndt': {
'info':
prometheus_status.PrometheusSliceInfo(ndt_url_ipv4, 'ndt', ''),
'info_ipv6': prometheus_status.PrometheusSliceInfo(
ndt_url_ipv6, 'ndt', '_ipv6'),
},
'neubot': {
'info': prometheus_status.PrometheusSliceInfo(neubot_url_ipv4,
'neubot', ''),
'info_ipv6': prometheus_status.PrometheusSliceInfo(
neubot_url_ipv6, 'neubot', '_ipv6'),
}
}
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'ndt', '')
self.assertEqual(expected_slice_data['ndt']['info'], retrieved)
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'ndt', '_ipv6')
self.assertEqual(expected_slice_data['ndt']['info_ipv6'], retrieved)
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'neubot', '')
self.assertEqual(expected_slice_data['neubot']['info'], retrieved)
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'neubot', '_ipv6')
self.assertEqual(expected_slice_data['neubot']['info_ipv6'], retrieved)
class StatusUpdateHandlerTest(unittest2.TestCase):
def setUp(self):
self.mock_response = mock.Mock()
self.mock_response.msg = 'mock message'
self.mock_response.code = '200'
@mock.patch.object(urllib2.OpenerDirector, 'open', autospec=True)
def test_get_slice_status_returns_none_with_invalid_json(self, mock_open):
self.mock_response.read.return_value = '{lol, not valid json'
mock_open.return_value = self.mock_response
result = prometheus_status.get_slice_status(
'https://prometheus.measurementlab.mock.net',
urllib2.OpenerDirector())
self.assertIsNone(result)
@mock.patch.object(urllib2.OpenerDirector, 'open', autospec=True)
@mock.patch.object(prometheus_status, 'parse_sliver_tool_status')
def test_get_slice_status_returns_populated_dictionary_when_it_gets_valid_statuses(
self, mock_parse_sliver_tool_status, mock_open):
self.mock_response.read.return_value = """
{
"status": "success",
"data": {
"resultType": "vector",
"result": [
{ "metric": {
"experiment": "mock",
"machine": "mlab1-xyz01.mlab-oti.measurement-lab.org" },
"value": [1522782427.81, "1"]
},
{ "metric": {
"experiment": "mock",
"machine": "mlab2-xyz01.mlab-oti.measurement-lab.org" },
"value": [1522773427.51, "0"]
}
]
}
}"""
mock_open.return_value = self.mock_response
mock_parse_sliver_tool_status.side_effect = [
('mock-mlab1-xyz01.mlab-oti.measurement-lab.org', '1',
constants.PROMETHEUS_TOOL_EXTRA),
('mock-mlab2-xyz01.mlab-oti.measurement-lab.org', '0',
constants.PROMETHEUS_TOOL_EXTRA)
]
expected_status = {
'mock-mlab1-xyz01.mlab-oti.measurement-lab.org': {
'status': message.STATUS_ONLINE,
'tool_extra': constants.PROMETHEUS_TOOL_EXTRA
},
'mock-mlab2-xyz01.mlab-oti.measurement-lab.org': {
'status': message.STATUS_OFFLINE,
'tool_extra': constants.PROMETHEUS_TOOL_EXTRA
}
}
actual_status = prometheus_status.get_slice_status(
'https://prometheus.measurementlab.mock.net',
urllib2.OpenerDirector())
self.assertDictEqual(actual_status, expected_status)
@mock.patch.object(urllib2.OpenerDirector, 'open', autospec=True)
def test_get_slice_status_returns_none_when_a_HTTPError_is_raised_by_urlopen(
self, mock_open):
# urllib2.HTTPError() requires 6 arguments. Subclassing to override
# __init__ makes instantiating this easier.
class MockHttpError(urllib2.HTTPError):
def __init__(self, cause):
self.cause = cause
self.mock_response.read.side_effect = MockHttpError('mock http error')
mock_open.return_value = self.mock_response
self.assertIsNone(prometheus_status.get_slice_status(
'https://prometheus.measurementlab.mock.net',
urllib2.OpenerDirector()))
if __name__ == '__main__':
unittest2.main()
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from magnumclient.common.apiclient import exceptions
from magnumclient.tests.v1 import shell_test_base
from magnumclient.v1.baymodels import BayModel
class FakeBayModel(BayModel):
def __init__(self, manager=None, info={}, **kwargs):
BayModel.__init__(self, manager=manager, info=info)
self.apiserver_port = kwargs.get('apiserver_port', None)
self.uuid = kwargs.get('uuid', 'x')
self.links = kwargs.get('links', [])
self.server_type = kwargs.get('server_type', 'vm')
self.image_id = kwargs.get('image_id', 'x')
self.tls_disabled = kwargs.get('tls_disabled', False)
self.registry_enabled = kwargs.get('registry_enabled', False)
self.coe = kwargs.get('coe', 'x')
self.public = kwargs.get('public', False)
self.name = kwargs.get('name', 'x')
class ShellTest(shell_test_base.TestCommandLineArgument):
def _get_expected_args_list(self, limit=None, sort_dir=None,
sort_key=None, detail=False):
expected_args = {}
expected_args['limit'] = limit
expected_args['sort_dir'] = sort_dir
expected_args['sort_key'] = sort_key
expected_args['detail'] = detail
return expected_args
def _get_expected_args(self, image_id, external_network_id, coe,
master_flavor_id=None, name=None,
keypair_id=None, fixed_network=None,
fixed_subnet=None, network_driver=None,
volume_driver=None, dns_nameserver='8.8.8.8',
flavor_id='m1.medium',
docker_storage_driver='devicemapper',
docker_volume_size=None, http_proxy=None,
https_proxy=None, no_proxy=None, labels={},
tls_disabled=False, public=False,
master_lb_enabled=False, server_type='vm',
registry_enabled=False, floating_ip_enabled=None):
expected_args = {}
expected_args['image_id'] = image_id
expected_args['external_network_id'] = external_network_id
expected_args['coe'] = coe
expected_args['master_flavor_id'] = master_flavor_id
expected_args['name'] = name
expected_args['keypair_id'] = keypair_id
expected_args['fixed_network'] = fixed_network
expected_args['fixed_subnet'] = fixed_subnet
expected_args['network_driver'] = network_driver
expected_args['volume_driver'] = volume_driver
expected_args['dns_nameserver'] = dns_nameserver
expected_args['flavor_id'] = flavor_id
expected_args['docker_volume_size'] = docker_volume_size
expected_args['docker_storage_driver'] = docker_storage_driver
expected_args['http_proxy'] = http_proxy
expected_args['https_proxy'] = https_proxy
expected_args['no_proxy'] = no_proxy
expected_args['labels'] = labels
expected_args['tls_disabled'] = tls_disabled
expected_args['public'] = public
expected_args['master_lb_enabled'] = master_lb_enabled
expected_args['server_type'] = server_type
expected_args['registry_enabled'] = registry_enabled
return expected_args
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--image-id test_image '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--coe swarm '
'--dns-nameserver test_dns '
'--flavor-id test_flavor '
'--fixed-network private '
'--fixed-subnet private-subnet '
'--volume-driver test_volume '
'--network-driver test_driver '
'--labels key=val '
'--master-flavor-id test_flavor '
'--docker-volume-size 10 '
'--docker-storage-driver devicemapper '
'--public '
'--server-type vm '
'--master-lb-enabled '
'--floating-ip-enabled ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
dns_nameserver='test_dns', public=True,
flavor_id='test_flavor',
master_flavor_id='test_flavor',
fixed_network='private',
fixed_subnet='private-subnet',
server_type='vm',
network_driver='test_driver',
volume_driver='test_volume',
docker_storage_driver='devicemapper',
docker_volume_size=10,
master_lb_enabled=True,
labels={'key': 'val'})
expected_args['floating_ip_enabled'] = True
mock_create.assert_called_with(**expected_args)
self._test_arg_success('baymodel-create '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe kubernetes '
'--name test '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair',
coe='kubernetes',
external_network_id='test_net',
server_type='vm')
mock_create.assert_called_with(**expected_args)
self._test_arg_success('baymodel-create '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe kubernetes '
'--name test '
'--server-type vm '
'--floating-ip-disabled ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair',
coe='kubernetes',
external_network_id='test_net',
server_type='vm',
floating_ip_enabled=False)
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success_no_servertype(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--image-id test_image '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--coe swarm '
'--dns-nameserver test_dns '
'--flavor-id test_flavor '
'--fixed-network public '
'--network-driver test_driver '
'--labels key=val '
'--master-flavor-id test_flavor '
'--docker-volume-size 10 '
'--docker-storage-driver devicemapper '
'--public ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
dns_nameserver='test_dns', public=True,
flavor_id='test_flavor',
master_flavor_id='test_flavor',
fixed_network='public',
network_driver='test_driver',
docker_storage_driver='devicemapper',
docker_volume_size=10,
labels={'key': 'val'})
mock_create.assert_called_with(**expected_args)
self._test_arg_success('baymodel-create '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe kubernetes '
'--name test ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair',
coe='kubernetes',
external_network_id='test_net')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success_with_registry_enabled(
self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--network-driver test_driver '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--registry-enabled')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
network_driver='test_driver',
registry_enabled=True)
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_public_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --network-driver test_driver '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--public '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
public=True, server_type='vm',
network_driver='test_driver')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success_with_master_flavor(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--image-id test_image '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--coe swarm '
'--dns-nameserver test_dns '
'--master-flavor-id test_flavor')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
dns_nameserver='test_dns',
master_flavor_id='test_flavor')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_docker_vol_size_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --docker-volume-size 4514 '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
docker_volume_size=4514)
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_docker_storage_driver_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--docker-storage-driver devicemapper '
'--coe swarm'
)
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
docker_storage_driver='devicemapper')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_fixed_network_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
fixed_network='private',
external_network_id='test_net',
server_type='vm')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_network_driver_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --network-driver test_driver '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
network_driver='test_driver')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_volume_driver_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --volume-driver test_volume '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
volume_driver='test_volume')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_http_proxy_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--http-proxy http_proxy '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
fixed_network='private',
server_type='vm',
http_proxy='http_proxy')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_https_proxy_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--https-proxy https_proxy '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
fixed_network='private',
server_type='vm',
https_proxy='https_proxy')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_no_proxy_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--no-proxy no_proxy '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
fixed_network='private',
server_type='vm',
no_proxy='no_proxy')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_labels_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--labels key=val '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
labels={'key': 'val'})
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_separate_labels_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--labels key1=val1 '
'--labels key2=val2 '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
labels={'key1': 'val1', 'key2': 'val2'})
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_combined_labels_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--labels key1=val1,key2=val2 '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
labels={'key1': 'val1', 'key2': 'val2'})
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_failure_few_arg(self, mock_create):
self._test_arg_failure('baymodel-create '
'--name test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--image-id test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--keypair-id test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--external-network-id test',
self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--coe test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--server-type test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create', self._mandatory_arg_error)
mock_create.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.get')
def test_baymodel_show_success(self, mock_show):
self._test_arg_success('baymodel-show xxx')
mock_show.assert_called_once_with('xxx')
@mock.patch('magnumclient.v1.baymodels.BayModelManager.get')
def test_baymodel_show_failure_no_arg(self, mock_show):
self._test_arg_failure('baymodel-show', self._few_argument_error)
mock_show.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.delete')
def test_baymodel_delete_success(self, mock_delete):
self._test_arg_success('baymodel-delete xxx')
mock_delete.assert_called_once_with('xxx')
@mock.patch('magnumclient.v1.baymodels.BayModelManager.delete')
def test_baymodel_delete_multiple_id_success(self, mock_delete):
self._test_arg_success('baymodel-delete xxx xyz')
calls = [mock.call('xxx'), mock.call('xyz')]
mock_delete.assert_has_calls(calls)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.delete')
def test_baymodel_delete_failure_no_arg(self, mock_delete):
self._test_arg_failure('baymodel-delete', self._few_argument_error)
mock_delete.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_success(self, mock_update):
self._test_arg_success('baymodel-update test add test=test')
patch = [{'op': 'add', 'path': '/test', 'value': 'test'}]
mock_update.assert_called_once_with('test', patch)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_success_many_attribute(self, mock_update):
self._test_arg_success('baymodel-update test '
'add test=test test1=test1')
patch = [{'op': 'add', 'path': '/test', 'value': 'test'},
{'op': 'add', 'path': '/test1', 'value': 'test1'}]
mock_update.assert_called_once_with('test', patch)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_failure_wrong_op(self, mock_update):
_error_msg = [
'.*?^usage: magnum baymodel-update ',
'.*?^error: argument <op>: invalid choice: ',
".*?^Try 'magnum help baymodel-update' for more information."
]
self._test_arg_failure('baymodel-update test wrong test=test',
_error_msg)
mock_update.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_failure_few_args(self, mock_update):
_error_msg = [
'.*?^usage: magnum baymodel-update ',
'.*?^error: (the following arguments|too few arguments)',
".*?^Try 'magnum help baymodel-update' for more information."
]
self._test_arg_failure('baymodel-update', _error_msg)
mock_update.assert_not_called()
self._test_arg_failure('baymodel-update test', _error_msg)
mock_update.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_success(self, mock_list):
self._test_arg_success('baymodel-list')
expected_args = self._get_expected_args_list()
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_success_with_arg(self, mock_list):
self._test_arg_success('baymodel-list '
'--limit 1 '
'--sort-dir asc '
'--sort-key uuid')
expected_args = self._get_expected_args_list(1, 'asc', 'uuid')
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_success_detailed(self, mock_list):
self._test_arg_success('baymodel-list '
'--detail')
expected_args = self._get_expected_args_list(detail=True)
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_ignored_duplicated_field(self, mock_list):
mock_list.return_value = [FakeBayModel()]
self._test_arg_success('baymodel-list --fields coe,coe,coe,name,name',
keyword='\n| uuid | name | Coe |\n')
# Output should be
# +------+------+-----+
# | uuid | name | Coe |
# +------+------+-----+
# | x | x | x |
# +------+------+-----+
expected_args = self._get_expected_args_list()
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_failure_with_invalid_field(self, mock_list):
mock_list.return_value = [FakeBayModel()]
_error_msg = [".*?^Non-existent fields are specified: ['xxx','zzz']"]
self.assertRaises(exceptions.CommandError,
self._test_arg_failure,
'baymodel-list --fields xxx,coe,zzz',
_error_msg)
expected_args = self._get_expected_args_list()
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_failure_invalid_arg(self, mock_list):
_error_msg = [
'.*?^usage: magnum baymodel-list ',
'.*?^error: argument --sort-dir: invalid choice: ',
".*?^Try 'magnum help baymodel-list' for more information."
]
self._test_arg_failure('baymodel-list --sort-dir aaa', _error_msg)
mock_list.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_failure(self, mock_list):
self._test_arg_failure('baymodel-list --wrong',
self._unrecognized_arg_error)
mock_list.assert_not_called()
|
import time
import os
# for organization, encode parameters in dir name
def setOutDir(params):
timestamp = str(int(time.time()))
try:
jobid = os.environ['SLURM_JOBID']
except:
jobid = 'NOID'
if params['root'] is None:
root = os.path.join(os.environ['HOME'], "STM", "experiments")
else:
root = params['root']
if os.environ['IS_INTERACTIVE'] == 'true':
vers = "tmp"
else:
vers = params['version']
out_dir = os.path.abspath(os.path.join(
root,
params.get("type", "unknown_type"), # classifier/agent
"runs",
vers,
jobid + "_" + timestamp))
if params.get("type", "unknown_type") == "agent":
out_dir += "_mbs" + str(params['miniBatchSize'])
out_dir += "_tau" + str(params['tau'])
out_dir += "_es" + str(params['epsilonStart'])
out_dir += "_en" + str(params['epsilonStop'])
out_dir += "_opt" + str(params['optimizer'])
out_dir += "_lr" + str(params['learning-rate'])
if params['batchnorm']:
out_dir += "_bn"
else:
out_dir += "_noBn"
out_dir += "_rew" + str(params['reward'])
print("Number of random episodes: ", params['randomEps'])
out_dir += "_randEps" + str(params['randomEps'])
print("gamma: ", params['gamma'])
out_dir += "_gamma" + str(params['gamma'])
if params['dqnNN'] is not None:
try:
out_dir += "_init" + params['dqnNN'].split("/")[8].split("_")[0]
except:
pass
elif params['useClassNN'] is not None:
try:
out_dir += "_init" + params['classNN'].split("/")[8].split("_")[0]
except:
pass
else:
out_dir += "_initRand"
return out_dir
if params.get("type", "unknown_type") == "classifier":
print("Number of training steps: ", params['numTrainSteps'])
out_dir += "_" + str(params['numTrainSteps'])
print("miniBatchSize: ", params['miniBatchSize'])
out_dir += "_" + str(params['miniBatchSize'])
print("dropout", params['dropout'])
if params['dropout']:
out_dir += "_" + "drp" + str(params['dropout'])
else:
out_dir += "_" + "noDrp"
if params['distortBrightnessRelative'] or params['distortContrast']:
params['distorted'] = True
print("distorted", params['distorted'])
if params['distorted']:
out_dir += "_" + "augm"
delta = params['distortBrightnessRelative']
factor = params['distortContrast']
stddev = params['distortGaussian']
fracSP = params['distortSaltPepper']
if delta != 0:
out_dir += "_Br" + str(delta)
if factor != 0:
out_dir += "_Cntr-" + str(factor)
if stddev != 0:
out_dir += "_Gau-" + str(stddev)
if fracSP != 0:
out_dir += "_SP-" + str(fracSP)
else:
out_dir += "_" + "noAugm"
print("batchnorm", params['batchnorm'])
if params['batchnorm']:
out_dir += "_" + "bn-" + str(params['batchnorm-decay'])
else:
out_dir += "_" + "noBn"
out_dir += "_" + "cSz" + str(params['pxRes'])
print("weight decay", params['weight-decay'])
out_dir += "_wd" + str(params['weight-decay'])
print("learning rate", params['learning-rate'])
out_dir += "_lr" + str(params['learning-rate'])
if params["lr-decay"]:
out_dir += "Dc"
print("momentum", params['momentum'])
out_dir += "_mom" + str(params['momentum'])
print("optimizer", params['optimizer'])
out_dir += "_opt" + params['optimizer']
if params['in_dir'] is not None:
print("reading data from: ", params['in_dir'])
out_dir += params['in_dir'].split("/")[-1]
if params['aucLoss']:
out_dir += "_auc"
if params['penalizeFP']:
out_dir += "_penFP"
if params['relWeightPosSamples'] is not None:
out_dir += "_wPos" + str(params['relWeightPosSamples'])
if params['RANSAC']:
out_dir += "_ransac"
else:
out_dir += "_noRansac"
return out_dir
|
# -*- coding: utf-8 -*-
import os, sys, pdb
import argparse
import torch
from torch.utils import data
import torchvision.transforms as standard_transforms
import numpy as np
FILE_PATH = os.path.abspath(__file__)
PRJ_PATH = os.path.dirname(os.path.dirname(FILE_PATH))
sys.path.append(PRJ_PATH)
from yolo_v2.proj_utils.local_utils import mkdirs
from yolo_v2.cfgs.config_knee import cfg
from yolo_v2.darknet import Darknet19
from yolo_v2.datasets.knee import Knee
from yolo_v2.train_yolo import train_eng
def set_args():
# Arguments settinge
parser = argparse.ArgumentParser(description="Knee Bone Detection")
parser.add_argument('--batch_size', type=int, default=8, help='batch size.')
parser.add_argument('--maxepoch', type=int, default=500, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=2.0e-4, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=0.8, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=list, default=[60, 120, 180, 240, 300, 360, 420, 480],
help='decay the learning rate at this epoch')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay for training')
parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum (default: 0.9)')
parser.add_argument('--display_freq', type=int, default=10, help='plot the results per batches')
parser.add_argument('--save_freq', type=int, default=10, help='how frequent to save the model')
parser.add_argument('--device-id', type=int, default=0)
parser.add_argument('--model-name', type=str, default='kneedet')
parser.add_argument('--seed', type=int, default=1234)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = set_args()
np.random.seed(args.seed)
# Data and Model settings
data_root = "../../data/DetKneeData"
model_root = os.path.join(data_root, args.model_name)
mkdirs(model_root, erase=True)
# Replace as mean and std
input_transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(cfg.rgb_mean, cfg.rgb_var)])
train_dataset = Knee(data_root, "train", transform=input_transform)
train_dataloader = data.DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = Knee(data_root, "val", transform=input_transform)
val_dataloader = data.DataLoader(val_dataset, batch_size=args.batch_size)
# Set Darknet
net = Darknet19(cfg)
# CUDA Settings
cuda_avail = torch.cuda.is_available()
print("\n==== Starting training ====\n" + "===="*20)
if cuda_avail:
print("CUDA {} in use".format(args.device_id))
net.cuda(args.device_id)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
else:
print("CPU in use")
# print ('>> START training ')
train_eng(train_dataloader, val_dataloader, model_root, net, args)
|
#
# cogs/messages.py
#
# mawabot - Maware's selfbot
# Copyright (c) 2017 Ma-wa-re, Ammon Smith
#
# mawabot is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
''' Has several commands that deal with messages '''
import asyncio
import json
import logging
import discord
from discord.ext import commands
MAX_DELETE_POSTS = 80
logger = logging.getLogger(__name__)
class Messages:
__slots__ = (
'bot',
)
def __init__(self, bot):
self.bot = bot
# Helper methods
@staticmethod
async def _get_messages(channel, ids):
''' Gets a list of the messages with the given IDs, in that order '''
messages = []
async for msg in channel.history():
try:
messages.append((ids.index(msg.id), msg))
except ValueError:
pass
return map(lambda t: t[1], sorted(messages))
@staticmethod
async def _hit(ctx, content):
if content is not None:
await ctx.send(content=content, delete_after=0)
# Commands
@commands.command()
async def hit(self, ctx, *, content: str = None):
''' Sends the given message and then immediately deletes it '''
await asyncio.gather(
self._hit(ctx, content),
ctx.message.delete(),
)
@commands.command()
async def delay(self, ctx, seconds: float, *, content: str):
''' Sends the given message after the specified number of seconds '''
logger.info(f'Queued up delayed message for {seconds} seconds from now')
await asyncio.gather(
asyncio.sleep(seconds),
ctx.message.delete(),
)
logger.info(f'Posting delayed message: {content}')
await ctx.send(content=content)
@commands.command()
async def embed(self, ctx, *, content: str):
''' Inserts the given message into an embed. '''
try:
args = json.loads(content)
embed = discord.Embed(**args)
except:
embed = discord.Embed(type='rich', description=content)
await asyncio.gather(
ctx.send(embed=embed),
ctx.message.delete(),
)
@commands.command()
async def quote(self, ctx, id: int, cid: int = 0):
''' Quotes the given post(s) '''
tasks = [ctx.message.delete()]
if cid:
channel = self.bot.get_channel(cid)
if channel is None:
logger.warning(f'Cannot find the channel with ID {cid}')
return
else:
channel = ctx.channel
to_quote = await self._get_messages(channel, (id,))
for msg in to_quote:
embed = discord.Embed(type='rich', description=msg.content)
embed.set_author(name=msg.author.display_name, icon_url=msg.author.avatar_url)
embed.timestamp = msg.created_at
if msg.attachments:
urls = '\n'.join(attach.url for attach in msg.attachments)
embed.add_field(name='Attachments:', value=urls)
tasks.append(ctx.send(embed=embed))
tasks.append(self.bot.output_send(embed=embed))
await asyncio.gather(*tasks)
@commands.command()
async def dump(self, ctx, *ids: int):
''' Outputs the literal contents of the given post(s) '''
tasks = [ctx.message.delete()]
to_copy = await self._get_messages(ctx.channel, ids)
for msg in to_copy:
if msg.content:
content = '\n'.join((
'Plain:',
'```',
msg.content.replace("`", "'"),
'```',
'',
'Chars:',
'```',
' '.join(f'{ord(c):02x}' for c in msg.content),
'```',
))
else:
content = '(Message is empty)'
embed = discord.Embed(type='rich', description=content)
embed.set_author(name=msg.author.display_name, icon_url=msg.author.avatar_url)
embed.timestamp = msg.edited_at or msg.created_at
if msg.attachments:
urls = '\n'.join(attach.url for attach in msg.attachments)
embed.add_field(name='Attachments:', value=urls)
tasks.append(self.bot.output_send(embed=embed))
tasks += [self.bot.send(embed=embed) for embed in msg.embeds]
await asyncio.gather(*tasks)
@commands.command(aliases=['delet'])
async def delete(self, ctx, posts: int = 1):
''' Deletes the last X posts you made, including the trigger '''
if posts > MAX_DELETE_POSTS:
logger.error((f'Asked to delete {posts} posts which is greater than '
f'the self-imposed limit of {MAX_DELETE_POSTS}'))
return
tasks = [ctx.message.delete()]
deleted = 0
async for msg in ctx.channel.history():
if msg.author == self.bot.user:
tasks.append(msg.delete())
deleted += 1
if deleted >= posts + 1:
break
await asyncio.gather(*tasks)
@commands.command()
async def purge(self, ctx, posts: int = 1):
''' Deletes the last X posts in the channel '''
if posts > MAX_DELETE_POSTS:
logger.error((f'Asked to delete {posts} posts which is greater than '
f'the self-imposed limit of {MAX_DELETE_POSTS}'))
return
tasks = []
async for msg in ctx.channel.history(limit=posts + 1):
tasks.append(msg.delete())
await asyncio.gather(*tasks)
|
#Crie um programa que tenha uma funcao chamada voto() que vai receber como parametro o ano de nascimento
#de uma pessoa, retornando um valor literal indicando se uma pessoa tem voto NEGADO, OPCIONAL ou OBRIGATORIO nas
#eleicoes.
#minha resposta
def voto(ano):
from datetime import date
atual = date.today().year
idade = atual - ano
if idade < 16:
return f'Com {idade} anos: NÃO VOTA'
if 16 <= idade < 18 or idade > 65:
return f'Com {idade} anos: VOTO OPCIONAL'
else:
return f'Com {idade} anos: VOTO OBRIGATORIO'
#Programa principal
nasc = int(input('Em que ano voce nasceu? '))
print(voto(nasc))
|
def octal_to_string(octal):
result = ""
value_letters = [(4,"r"),(2,"w"),(1,"x")]
for octet in [int(n) for n in str(octal)]:
b = format(octet, '03b')
for n,v in enumerate(b):
if (int(v) > 0):
result += value_letters[n][-1]
else:
result += '-'
return result
print(octal_to_string(755)) # Should be rwxr-xr-x
print(octal_to_string(644)) # Should be rw-r--r--
print(octal_to_string(750)) # Should be rwxr-x---
print(octal_to_string(600)) # Should be rw-------
|
# -*- coding: utf-8 -*
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import numpy as np
import random
from time import sleep
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
from common import face_image
import cv2
import time
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def main(args):
# facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = face_image.get_dataset('lfw', args.input_dir)
print('dataset size', 'lfw', len(dataset))
output_filename = os.path.join(args.output_dir, 'train.lst')
with open(output_filename, "w") as text_file:
nrof_images_total = 0
nrof = np.zeros((5,), dtype=np.int32)
for fimage in dataset:
if nrof_images_total % 50000 == 0:
print("Processing %d, (%s)" % (nrof_images_total, nrof))
nrof_images_total += 1
_paths = fimage.image_path.split('/')
a, b = _paths[-2], _paths[-1]
target_dir = os.path.join(args.input_dir, a)
# if not os.path.exists(target_dir):
# os.makedirs(target_dir)
target_file = os.path.join(target_dir, b)
oline = '%d\t%s\t%d\n' % (1, target_file, int(fimage.classname))
text_file.write(oline)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input-dir', type=str, help='Directory with unaligned images.',
default='/data1t/mask/glint-mask')
parser.add_argument('--output-dir', type=str, help='Directory with aligned face thumbnails.',
default='/data1t/mask/mask-output')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
import requests
import lxml.html as lh
import pandas as pd
url = 'https://www.bphc.org/onlinenewsroom/Blog/Lists/Posts/Post.aspx?List=24ee0d58%2D2a85%2D4a4a%2D855b%2Df5af9d781627&ID=1282&RootFolder=%2Fonlinenewsroom%2FBlog%2FLists%2FPosts&Source=https%3A%2F%2Fwww%2Ebphc%2Eorg%2FPages%2Fdefault%2Easpx&Web=03126e14%2D4972%2D4333%2Db8a3%2D800cbc1cafce'
page = requests.get(url)
doc = lh.fromstring(page.content)
table = doc.xpath('//tbody')[-1]
for row in table.iterchildren():
for col in row.iterchildren():
print(col.text_content().strip())
|
from pathlib import Path
import json
from commiter.src.backend.tasks import Task1, AbstractTask
from typing import *
import pandas as pd
class Backend:
def __init__(self, path: Path, tasks_parsers: Sequence[AbstractTask]):
self.path = path
self.tasks_parsers = tasks_parsers
if not self.path.is_file():
self.data = self.get_default_project_content()
self.write()
self.read()
def get_default_project_content(self) -> dict:
return {
"project_properties": {},
"tasks": [],
}
def read(self):
with open(self.path) as fp:
data = json.load(fp)
self.data = data
tasks = []
for dico in data["tasks"]:
parsed = False
for parser in self.tasks_parsers:
if parser.can_parse_dico(dico):
try:
tasks.append(parser.parse_dico(dico))
parsed = True
except Exception:
pass
break
if not parsed:
raise Exception(f"Unable to parse task {dico}")
self.data["tasks"] = tasks
def write(self):
tasks = []
for task in self.get_tasks():
tasks.append(task.get_dico())
self.data["tasks"] = tasks
with open(self.path, "w") as fp:
json.dump(self.data, fp)
def get_tasks_dataframe(self):
l_tasks_formatted = [t.get_dico() for t in self.get_tasks()]
return pd.DataFrame(l_tasks_formatted)
def from_tasks_dataframe(self, df: pd.DataFrame):
for dico in df.to_dict('records'):
parsed = False
for task_parser in self.tasks_parsers:
if task_parser.can_parse_dico(dico):
self.data["tasks"].append(task_parser.parse_dico(dico))
parsed = True
break
if not parsed:
print(
f"Cannot parse task {dico} with parsers {self.tasks_parsers}"
)
def add_task(self, tasks: Sequence[AbstractTask]):
""":warning: This method add as it the objects (potential mutability problems)"""
self.data["tasks"].extend(tasks)
def delete_task(self, tasks: Sequence[int]):
new_tasks = [t for i, t in enumerate(
self.get_tasks()) if i not in tasks]
self.data["tasks"] = new_tasks
def get_tasks(self) -> List[AbstractTask]:
return self.data["tasks"]
|
import os
import math
import re
from collections import OrderedDict
import numpy as np
import sys
FWD_ALGO_list=[
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED"]
BWD_ALGO_DATA_list= [
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_0",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_1",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED"]
BWD_ALGO_FILTER_list=["CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3",
"CUDNN_CONVOLUTION_BWD_FILTER_WINOGRAD_NONFUSED",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING"]
FWD_ALGO_TENSORCORE=["CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED"
]
BWD_ALGO_DATA_TENSORCORE=["CUDNN_CONVOLUTION_BWD_DATA_ALGO_1",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED"]
BWD_ALGO_FILTER_TENSORCORE=["CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED"]
MATH_OPS_list= ['CUDNN_TENSOR_OP_MATH', 'CUDNN_DEFAULT_MATH']
def todict(LIST):
return OrderedDict([(itm, [re.compile(itm), 0]) for itm in LIST])
def count_occurences(filepath, line_bounds, ord_dict_list):
line_lb, line_ub = line_bounds
with open(filepath,'r') as f:
for (num_line,line) in enumerate(f):
if num_line > line_lb and num_line < line_ub:
for ord_dict in ord_dict_list:
for key, itm in ord_dict.items():
if itm[0].search(line):
ord_dict[key][1] += 1
def ceil_lg2(n):
return math.ceil(math.log(n) / math.log(2))
def extract_conv_ops(filepath, line_bounds, steps, times, debug=False):
line_lb, line_ub = line_bounds
total_flops = 0.0
flops = 0.0
dims = []
num_line = 0
with open(filepath, 'r') as f:
for _ in range(line_lb):
f.readline()
num_line += 1
lines = f.__iter__()
l = lines.readline()
num_line += 1
while 'xfunction cudnnConvolutionBackward' not in l:
if 'function cudnnConvolutionForward' not in l:
#if 'function cudnnConvolutionBackwardFilter' not in l:
try:
l = lines.readline()
num_line += 1
except StopIteration:
break
continue
dims = []
for i in range(3):
while 'dimA' not in l:
l = lines.readline()
num_line += 1
m = re.search(r'dimA: type=int; val=\[(\d+),(\d+),(\d+),(\d+)\]', l)
assert m is not None
dims.append(tuple(int(d) for d in m.groups()))
l = lines.readline()
num_line += 1
while 'dataType' not in l:
l = lines.readline()
num_line += 1
dtype = re.search(r'val=CUDNN_DATA_(\S+)', l).group(1)
while 'mathType' not in l:
l = lines.readline()
num_line += 1
mtype = re.search(r'val=CUDNN_(\S+)_MATH', l).group(1)
while 'algo' not in l:
l = lines.readline()
num_line += 1
#algo = re.search(r'val=CUDNN_CONVOLUTION_FWD_ALGO_(\S+)', l).group(1)
algo = re.search(r'val=CUDNN_CONVOLUTION_(\S+)', l).group(1)
t_in, t_filt, t_out = dims
if 'ALGO_FFT' in l:
# fft size needs to be sum of input and filter dimensions to
# allow for zero padding
fft_h = t_in[2] + t_filt[2]
fft_w = t_in[3] + t_filt[3]
fft_flops = (5 * fft_h * fft_w *
ceil_lg2(fft_h) * ceil_lg2(fft_w))
# we do NC + KC forward ffts and NK backwards ffts
num_ffts = ((t_in[0] * t_in[1]) +
(t_filt[0] * t_filt[1]) +
(t_out[0] * t_out[1]))
# and finally we need NKC element-wise products in frequency space
freq_mults = (t_in[0] * t_filt[0] * t_filt[1] *
fft_h * fft_w)
flops = fft_flops * num_ffts + freq_mults
else:
flops = (2 *
t_out[0] * # N
t_out[2] * # H
t_out[3] * # W
t_in[1] * # C
t_out[1] * # K
t_filt[2] * # R
t_filt[3]) # S
if num_line >= line_lb and num_line <= line_ub:
total_flops += flops
if debug:
print('in={} filt={} out={} flops={} algo={} dtype={} mtype={}'.format(t_in, t_filt, t_out, flops, algo, dtype, mtype))
if num_line > line_ub:
break
print('Trace from training step=%d to step=%d' %(steps[0], steps[1]))
print('Training Step = %2.3e FLOP (floating-point operations)' % (3*total_flops/(steps[-1] - steps[0])))
if debug:
print('inference = %2.2e FLOP' % (total_flops/(steps[-1] - steps[0])))
print('Training Step = %2.3e FLOPS (floating-point operations per sec)\n' % (3*total_flops/(times[-1] - times[0])))
if debug:
print('inference = %2.2e FLOPS' % (total_flops/(times[-1] - times[0])))
return total_flops
def rank_entries(ord_dict_list, steps):
FWD_ALGO_TENSORCORE=["CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED"
]
BWD_ALGO_DATA_TENSORCORE=["CUDNN_CONVOLUTION_BWD_DATA_ALGO_1",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED"]
BWD_ALGO_FILTER_TENSORCORE=["CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED"]
for ord_dict in ord_dict_list:
arr_counts = np.array([itm[1] for _, itm in ord_dict.items()])
indices = np.argsort(arr_counts)[::-1]
keys = list(ord_dict.keys())
print('Trace from training step=%d to step=%d' %(steps[0], steps[1]))
print('CUDA FUNCTION, # CUDA CALLS, TENSORCORES USAGE')
for ind in indices:
algo_name = keys[ind]
if algo_name in FWD_ALGO_TENSORCORE+BWD_ALGO_DATA_TENSORCORE+BWD_ALGO_FILTER_TENSORCORE:
tensorcore_usage = "YES"
else:
tensorcore_usage = "NO"
print('%s, %d ,%s ' %(algo_name, ord_dict[algo_name][1], tensorcore_usage))
print('\n')
def get_step_timing(logfile, step_start=90, step_end=100):
step_1 = re.compile('step= %d' %step_start)
step_2 = re.compile('step= %d' %step_end)
times, steps = [], []
with open(logfile, mode='r') as f:
for line in f:
if step_1.search(line) or step_2.search(line):
stream = line.split(',')
time = stream[0].split('=')[-1]
step = stream[1].split('=')[-1]
times.append(float(time))
steps.append(int(step))
return times, steps
def get_lines_bounds(times, logfile):
pattern = re.compile('Time:')
lines = []
cudnn_times = []
#print(times)
with open(logfile, mode='r') as f:
for i,line in enumerate(f):
if pattern.search(line):
time_list = re.findall('\d+',line)
if len(time_list) == 11:
# assert len(time_list) == 11, print('Time format is not as expected in Line %d: Found %s, Expected: len(Time)=11. Results may be wrong.'
# % (i, format(time_list)))
hour,minute,sec,millsec = time_list[3:7]
total_time = int(hour) * 3600
total_time += int(minute) * 60
total_time += int(sec)
total_time += int(millsec) * 10 ** (-len(millsec))
cudnn_times.append(total_time)
if len(cudnn_times) > 1:
total_time -= cudnn_times[0] # assume that first printed step lines up with cudnn start of trace
if total_time >= times[0] and total_time <= times[1]:
lines.append(i)
return lines[0], lines[-1]
def main(argv):
if len(argv) == 1:
print('Usage: python cudnn_parser.py cudnn_logfile train_logfile step_start step_end.')
else:
cudnn_logfile, train_logfile, step_start, step_end = argv[1:]
# Dictionaries
FWD_ALGO = todict(FWD_ALGO_list)
BWD_DATA_ALGO = todict(BWD_ALGO_DATA_list)
BWD_FILTER_ALGO = todict(BWD_ALGO_FILTER_list)
MATH_OPS = todict(MATH_OPS_list)
ord_dict_list = [FWD_ALGO, BWD_DATA_ALGO, BWD_FILTER_ALGO, MATH_OPS]
# parsing
times, steps = get_step_timing(train_logfile,step_start=int(step_start), step_end=int(step_end))
print(times, steps)
line_lb, line_ub = get_lines_bounds(times, cudnn_logfile)
extract_conv_ops(cudnn_logfile, [line_lb, line_ub], steps, times)
count_occurences(cudnn_logfile, [line_lb, line_ub], ord_dict_list)
rank_entries(ord_dict_list, steps)
if __name__ == "__main__":
main(sys.argv)
|
from .datagroup import DataGroup
__all__ = ["DataGroup"]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 08:06:31 2021
@author: bcamc
"""
#%% Import Packages
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, inset_axes
from matplotlib.lines import Line2D
import pandas as pd
import numpy as np
import scipy
from scipy.stats.stats import pearsonr, spearmanr
import cartopy
import cartopy.crs as ccrs
from mpl_toolkits.axes_grid1 import make_axes_locatable
import xarray as xr
from sklearn.decomposition import IncrementalPCA
import os
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from datetime import datetime
from sklearn import linear_model
import datetime
import cartopy.io.shapereader as shpreader
from cartopy.feature import ShapelyFeature
import shapely.geometry as sgeom
from shapely.ops import unary_union
from shapely.prepared import prep
import joblib
from joblib import Parallel, delayed
from obspy.geodetics import kilometers2degrees, degrees2kilometers
import cmocean
import seaborn as sns
from tabulate import tabulate
# Progress bar package
from tqdm import tqdm
# Gibbs seawater properties packages
import gsw
# Import pre-built mapping functions
from SO_mapping_templates import haversine, South_1ax_map, South_1ax_flat_map
# Import function to calculate fluxes
from Fluxes import calculate_fluxes
# Import taylor diagram script
from taylorDiagram import TaylorDiagram
#%% Define directories
front_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/'
lana_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/dmsclimatology/'
jarnikova_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/Jarnikova_SO_files/'
#%% Set working directories
dir_ = 'C:\\Users\\bcamc\\OneDrive\\Desktop\\Python\\Projects\\sulfur\\southern_ocean\\Scripts'
if os.getcwd() != dir_:
os.chdir(dir_)
#%% Read in data (optional)
export_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/export_data/'
models_combined = pd.read_csv(export_dir+'models_combined.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
X_full_plus = pd.read_csv(export_dir+'X_full_plus.csv').set_index(['datetime','latbins','lonbins'])
# ANN_y_pred = pd.read_csv(export_dir+'ANN_y_pred.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# RFR_y_pred = pd.read_csv(export_dir+'RFR_y_pred.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# y = pd.read_csv(export_dir+'y.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# X = pd.read_csv(export_dir+'X.csv').set_index(['datetime','latbins','lonbins'])
# X_full = X_full_plus.drop(['dSSHA','currents','SRD'],axis=1)
#%% Post-processing
# ***** Load in models/data using "SO_DMS_build_models.py" *****
# for plotting
reordered_months = np.array([10.,11.,12.,1.,2.,3.,4.])
# Average predictions
RFR_y_pred_mean = np.sinh(RFR_y_pred).groupby(['latbins','lonbins']).mean()
ANN_y_pred_mean = np.sinh(ANN_y_pred).groupby(['latbins','lonbins']).mean()
# calculate Si*
Si_star = (X_full.loc[:,'Si']-X_full.loc[:,'SSN']).squeeze()
X_full_plus['Si_star'] = Si_star
#------------------------------------------------------------------------------
# Import ACC front locations
front_data = xr.open_dataset(front_dir+'Park_durand_fronts.nc')
fronts = dict()
to_bin = lambda x: np.round(x /grid) * grid
#------------------------------------------------------------------------------
# NB front
fronts['NB'] = pd.DataFrame(np.stack([front_data.LatNB.values,
front_data.LonNB.values,
np.ones(front_data.LonNB.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['NB'] = fronts['NB'].sort_values('lonbins')
fronts['NB']['latbins'] = fronts['NB']['latbins'].map(to_bin).round(3)
fronts['NB']['lonbins'] = fronts['NB']['lonbins'].map(to_bin).round(3)
fronts['NB'] = fronts['NB'].set_index(['latbins','lonbins']).squeeze()
fronts['NB'] = fronts['NB'][~fronts['NB'].index.duplicated(keep='first')]
# fronts['NB'] = fronts['NB'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SAF front
fronts['SAF'] = pd.DataFrame(np.stack([front_data.LatSAF.values,
front_data.LonSAF.values,
np.ones(front_data.LonSAF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SAF'] = fronts['SAF'].sort_values('lonbins')
fronts['SAF']['latbins'] = fronts['SAF']['latbins'].map(to_bin).round(3)
fronts['SAF']['lonbins'] = fronts['SAF']['lonbins'].map(to_bin).round(3)
fronts['SAF'] = fronts['SAF'].set_index(['latbins','lonbins']).squeeze()
fronts['SAF'] = fronts['SAF'][~fronts['SAF'].index.duplicated(keep='first')]
# fronts['SAF'] = fronts['SAF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# PF front
fronts['PF'] = pd.DataFrame(np.stack([front_data.LatPF.values,
front_data.LonPF.values,
np.ones(front_data.LonPF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['PF'] = fronts['PF'].sort_values('lonbins')
fronts['PF']['latbins'] = fronts['PF']['latbins'].map(to_bin).round(3)
fronts['PF']['lonbins'] = fronts['PF']['lonbins'].map(to_bin).round(3)
fronts['PF'] = fronts['PF'].set_index(['latbins','lonbins']).squeeze()
fronts['PF'] = fronts['PF'][~fronts['PF'].index.duplicated(keep='first')]
# fronts['PF'] = fronts['PF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SACCF front
fronts['SACCF'] = pd.DataFrame(np.stack([front_data.LatSACCF.values,
front_data.LonSACCF.values,
np.ones(front_data.LonSACCF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SACCF'] = fronts['SACCF'].sort_values('lonbins')
fronts['SACCF']['latbins'] = fronts['SACCF']['latbins'].map(to_bin).round(3)
fronts['SACCF']['lonbins'] = fronts['SACCF']['lonbins'].map(to_bin).round(3)
fronts['SACCF'] = fronts['SACCF'].set_index(['latbins','lonbins']).squeeze()
fronts['SACCF'] = fronts['SACCF'][~fronts['SACCF'].index.duplicated(keep='first')]
# fronts['SACCF'] = fronts['SACCF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SB front
fronts['SB'] = pd.DataFrame(np.stack([front_data.LatSB.values,
front_data.LonSB.values,
np.ones(front_data.LonSB.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SB'] = fronts['SB'].sort_values('lonbins')
fronts['SB']['latbins'] = fronts['SB']['latbins'].map(to_bin).round(3)
fronts['SB']['lonbins'] = fronts['SB']['lonbins'].map(to_bin).round(3)
fronts['SB'] = fronts['SB'].set_index(['latbins','lonbins']).squeeze()
fronts['SB'] = fronts['SB'][~fronts['SB'].index.duplicated(keep='first')]
# fronts['SB'] = fronts['SB'].reindex_like(models_combined.loc[1])
# front_data.close(); del front_data
#------------------------------------------------------------------------------
SA = gsw.SA_from_SP(SP=X_full.loc[:,'SAL'].values, p=1, lon=X_full.index.get_level_values('lonbins').values, lat=X_full.index.get_level_values('latbins').values)
CT = gsw.CT_from_t(SA=SA, t=X_full.loc[:,'SST'].values, p=1)
density = gsw.density.rho(SA=SA,CT=CT,p=1)
density = pd.Series(density, index=X_full.loc[:,'chl'].index)
#%% Model Sea-Air Fluxes
#-----------------------------------------------------------------------------
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# ===================
# RFR Model
# ===================
#-----------------------------------------------------------------------------
# Fluxes (umol m^-2 d^-1):
RFR_flux = dict()
k_dms, RFR_flux['GM12'] = calculate_fluxes(data=np.sinh(RFR_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='GM12')
_, RFR_flux['SD02'] = calculate_fluxes(data=np.sinh(RFR_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
RFR_flux['GM12'] = pd.Series(RFR_flux['GM12'], index=X_full.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
RFR_flux['GM12'] = RFR_flux['GM12'][(RFR_flux['GM12'] >= 0) & (RFR_flux['GM12'].notna())].reindex_like(RFR_y_pred)
RFR_flux['SD02'] = pd.Series(RFR_flux['SD02'], index=X_full.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# ANN Model
# ===================
#-----------------------------------------------------------------------------
ANN_flux = dict()
_, ANN_flux['GM12'] = calculate_fluxes(data=np.sinh(ANN_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='GM12')
_, ANN_flux['SD02'] = calculate_fluxes(data=np.sinh(ANN_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
ANN_flux['GM12'] = pd.Series(ANN_flux['GM12'], index=X_full.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
ANN_flux['GM12'] = ANN_flux['GM12'][(ANN_flux['GM12'] >= 0) & (ANN_flux['GM12'].notna())].reindex_like(ANN_y_pred)
ANN_flux['SD02'] = pd.Series(ANN_flux['SD02'], index=X_full.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# Actual
# ===================
#-----------------------------------------------------------------------------
obs_flux = dict()
_, obs_flux['GM12'] = calculate_fluxes(data=np.sinh(y).values,
ice_cover=X.loc[:,'ice'].values,
wind_speed=X.loc[:,'wind'].values,
T=X.loc[:,'SST'].values,
parameterization='GM12')
_, obs_flux['SD02'] = calculate_fluxes(data=np.sinh(y).values,
ice_cover=X.loc[:,'ice'].values,
wind_speed=X.loc[:,'wind'].values,
T=X.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
obs_flux['GM12'] = pd.Series(obs_flux['GM12'], index=X.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
obs_flux['GM12'] = obs_flux['GM12'][(obs_flux['GM12'] >= 0) & (obs_flux['GM12'].notna())].reindex_like(y)
obs_flux['SD02'] = pd.Series(obs_flux['SD02'], index=X.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# Regional Fluxes
# ===================
#-----------------------------------------------------------------------------
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Constants:
A = ((max_lat-min_lat)*111*1000)*((max_lon-min_lon)*111*1000) # total regional area
A_ocean = A*frac_ocean # fraction of total area covered by ocean
S_mol_mass = 32.06 # molar mass of sulfur
num_days = np.sum(np.array([31,30,31,31,28,31,30])) # number of total days in the dataset
#-----------------------------------------------------------------------------
# Regional modelled flux (convert to Tg over total days)
RFR_flux_reg = (RFR_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
ANN_flux_reg = (ANN_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
obs_flux_reg = (obs_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
fluxes_combined = pd.concat([RFR_flux['GM12'], ANN_flux['GM12']], axis=1).mean(axis=1)
#%% Lana Climatology Sea-air Fluxes
files = os.listdir(lana_dir)
# Set 1x1o coords
lana_coords = dict()
lana_coords['lat'] = pd.Series(np.arange(-89,91,1), name='latbins')
lana_coords['lon'] = pd.Series(np.arange(-179,181,1), name='lonbins')
time_match = {'OCT':10,'NOV':11,'DEC':12,'JAN':1,'FEB':2,'MAR':3,'APR':4}
# Retrive DMS climatology values, adding lats/lons to dataframes
lana_clim = []
for file in files:
frame = pd.DataFrame(np.flipud(pd.read_csv(lana_dir+file, header=None)),
index=lana_coords['lat'], columns=lana_coords['lon'])
frame = frame.stack(dropna=False)
frame = frame.reset_index()
frame['datetime'] = np.tile(float(time_match[file.split('.')[0][-3:]]), len(frame))
frame = frame.set_index(['datetime','latbins','lonbins']).squeeze()
frame.name = 'DMS'
lana_clim.append(frame)
lana_clim = pd.concat(lana_clim)
# Regrid variables to compute sea-air fluxes
lana = dict()
for var in ['wind','ice','SST']:
lana[var] = X_full.loc[:,var].copy()
lana[var] = lana[var].reset_index()
lana[var] = lana[var].rename(columns={'lonbins':'lon','latbins':'lat'})
# regrid to nearest degree (i.e. 1x1o grid)
lana[var]['latbins'] = lana[var].lat.round(0).astype('int32')
lana[var]['lonbins'] = lana[var].lon.round(0).astype('int32')
lana[var] = lana[var].set_index(['datetime','latbins','lonbins'])
lana[var] = lana[var].drop(columns=['lat','lon'])
lana[var] = lana[var].groupby(['datetime','latbins','lonbins']).mean().squeeze()
lana[var] = lana[var].sort_index().reindex_like(lana_clim)
print(var+' regrid complete')
# Compute sea-air flux
#-----------------------------------------------------------------------------
lana_flux = dict()
_, lana_flux['GM12'] = calculate_fluxes(data=lana_clim.values,
ice_cover=lana['ice'].values,
wind_speed=lana['wind'].values,
T=lana['SST'].values,
parameterization='GM12')
_, lana_flux['SD02'] = calculate_fluxes(data=lana_clim.values,
ice_cover=lana['ice'].values,
wind_speed=lana['wind'].values,
T=lana['SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
lana_flux['GM12'] = pd.Series(lana_flux['GM12'], index=lana['SST'].index, name='DMS flux')
# filter out negative estimates
lana_flux['GM12'] = lana_flux['GM12'][(lana_flux['GM12'] >= 0) & (lana_flux['GM12'].notna())].reindex_like(lana_clim)
lana_flux['SD02'] = pd.Series(lana_flux['SD02'], index=lana['SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
del frame
#%% Jarnikova Climatology Sea-air Fluxes
# This climatology is from Dec to Feb (Jarnikova & Tortell, 2016)
mat = scipy.io.loadmat(jarnikova_dir+'nov26product.mat')
tj_dms = mat['structname'][0,1]['barnessmooth'][0,0]
tj_lats = mat['structname'][0,1]['latvec'][0,0][0,:]
tj_lons = mat['structname'][0,1]['lonvec'][0,0][0,:]
jarnikova_clim = pd.DataFrame(tj_dms, index=tj_lats, columns=tj_lons)
jarnikova_clim.index = jarnikova_clim.index.rename('latbins')
jarnikova_clim.columns = jarnikova_clim.columns.rename('lonbins')
jarnikova_clim = jarnikova_clim.stack()
# Reindex like lana et al. climatology
jarnikova_clim = jarnikova_clim.reindex_like(lana_clim.loc[[12,1,2]].groupby(['latbins','lonbins']).mean())
# Calculate the fluxes
#-----------------------------------------------------------------------------
jarnikova_flux = dict()
_, jarnikova_flux['GM12'] = calculate_fluxes(data=jarnikova_clim,
ice_cover=lana['ice'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
wind_speed=lana['wind'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
T=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
parameterization='GM12')
_, jarnikova_flux['SD02'] = calculate_fluxes(data=jarnikova_clim.values,
ice_cover=lana['ice'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
wind_speed=lana['wind'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
T=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
jarnikova_flux['GM12'] = pd.Series(jarnikova_flux['GM12'], index=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().index, name='DMS flux')
# filter out negative estimates
jarnikova_flux['GM12'] = jarnikova_flux['GM12'][(jarnikova_flux['GM12'] >= 0) & (jarnikova_flux['GM12'].notna())].reindex_like(jarnikova_clim)
jarnikova_flux['SD02'] = pd.Series(jarnikova_flux['SD02'], index=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().index, name='DMS flux')
#-----------------------------------------------------------------------------
del mat
#%% Compute KDEs for fluxes
def KDE(y):
"""
A modifed wrapper function pulled from the Pandas source code
(https://github.com/pandas-dev/pandas/blob/0.21.x/pandas/plotting/_core.py#L1381-L1430)
that returns the kernel density estimates of a Pandas Series/sliced DataFrame
using scipy's gaussian_kde function. It is efficient like the pandas native
plotting function (because it only fits a subset of only 1000 points from the
distribution) but it returns the actual values instead of an axes handle.
Parameters
----------
y : Series or sliced Dataframe
Input data.
Returns
-------
evals : Series or Dataframe
col1: Fitted indices (1000 samples between data max/min bounds);
col2: evaluated kernel density estimates at each indice.
"""
from scipy.stats import gaussian_kde
y = y.dropna()
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
kde = gaussian_kde(y.dropna())
vals = kde.evaluate(ind)
evals = pd.concat([pd.Series(ind, name='ind'), pd.Series(vals, name='kde')],axis=1)
return evals
# Function speeds up computation, but its still faster to load up the data
# rather than rerun the function:
if first_run is True:
# Calculate the KDEs
lana_kde = KDE(lana_flux['GM12'])
jarnikova_kde = KDE(jarnikova_flux['GM12'])
RFR_kde = KDE(RFR_flux['GM12'])
RFR_kde_3mon = KDE(RFR_flux['GM12'].loc[[12,1,2],:,:])
ANN_kde = KDE(ANN_flux['GM12'])
ANN_kde_3mon = KDE(ANN_flux['GM12'].loc[[12,1,2],:,:])
# Write each to a csv files
lana_kde.to_csv(write_dir[:-14]+'lana_kde.csv')
jarnikova_kde.to_csv(write_dir[:-14]+'jarnikova_kde.csv')
RFR_kde.to_csv(write_dir[:-14]+'RFR_kde.csv')
RFR_kde_3mon.to_csv(write_dir[:-14]+'RFR_kde_3mon.csv')
ANN_kde.to_csv(write_dir[:-14]+'ANN_kde.csv')
ANN_kde_3mon.to_csv(write_dir[:-14]+'ANN_kde_3mon.csv')
else:
# load up the csv files
lana_kde = pd.read_csv(write_dir[:-14]+'lana_kde.csv')
jarnikova_kde = pd.read_csv(write_dir[:-14]+'jarnikova_kde.csv')
RFR_kde = pd.read_csv(write_dir[:-14]+'RFR_kde.csv')
RFR_kde_3mon = pd.read_csv(write_dir[:-14]+'RFR_kde_3mon.csv')
ANN_kde = pd.read_csv(write_dir[:-14]+'ANN_kde.csv')
ANN_kde_3mon = pd.read_csv(write_dir[:-14]+'ANN_kde_3mon.csv')
#%% Convert fluxes to Tg S
bounds=[max_lat, min_lat, max_lon, min_lon]
def convert_fluxes(fluxes, to_mask, unique_dates, bounds):
from calendar import monthrange
max_lat, min_lat, max_lon, min_lon = bounds[0], bounds[1], bounds[2], bounds[3]
frac_ocean = to_mask.dropna().size/to_mask.size # calculate the fraction of the total bounded area that is ocean
A = ((max_lat-min_lat)*111*1000)*((max_lon-min_lon)*111*1000) # total regional area
A_ocean = A*frac_ocean # fraction of total area covered by ocean
S_mol_mass = 32.06 # molar mass of sulfur
# Get unique dates - average across leap years in the climatology
dates = pd.DataFrame(np.array([list(monthrange(int(i[:4]),int(i[5:]))) for i in unique_dates]))
dates = dates.set_index(0)
num_days = dates.groupby(0).mean().sum().values
return (fluxes*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
print('RFR')
print(f'{convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds).mean():.1f}',
'+/-',
f'{convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds).std():.1f}')
print(f'{convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds).mean():.1f}',
'+/-',
f'{convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds).std():.1f}')
print('\nANN')
print(f'{convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds).mean():.1f}',
'+/-',
f'{convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds).std():.1f}')
print(f'{convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds).mean():.1f}',
'+/-',
f'{convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds).std():.1f}')
print('\nCombined')
print(f'{np.nanmean([convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds), convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds)]):.1f}',
'+/-',
f'{np.nanstd([convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds), convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds)]):.1f}')
print(f'{np.nanmean([convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds), convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds)]):.1f}',
'+/-',
f'{np.nanstd([convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds), convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds)]):.1f}')
print('\nCumulative Combined')
print(f'{np.nanmean([convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(), convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum()]):.1f}',
'+/-',
f'{np.std([convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(), convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum()]):.1f}')
print(f'{np.nanmean([convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(), convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum()]):.1f}',
'+/-',
f'{np.std([convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(), convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum()]):.1f}')
print('\nPropagated uncertainity between cumulative combined flux')
print(np.nanmean([convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(),
convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(),
convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum(),
convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum()]))
print('+/-')
print(np.sqrt(np.sum([np.nanstd(convert_fluxes(RFR_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum())**2,
np.nanstd(convert_fluxes(ANN_flux["GM12"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum())**2,
np.nanstd(convert_fluxes(RFR_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum())**2,
np.nanstd(convert_fluxes(ANN_flux["SD02"], to_mask, unique_dates, bounds).groupby(["latbins","lonbins"]).sum())**2]))*0.5)
#%% MLR, LM & LIT models - run to validate models
#-----------------------------------------------------------------------------
#### Linear regression models (run 1 for each variable)
LM_preds = np.empty([y_test.shape[0],np.shape(X_test.columns)[0]])
LM_R2 = np.empty([np.shape(X_test.columns)[0],1])
LM_coef = np.empty([np.shape(X_test.columns)[0],1])
LM_RMSE = np.empty([np.shape(X_test.columns)[0],1])
lm = linear_model.LinearRegression()
for i, var_ in enumerate(X_test.columns.values):
LM_model = lm.fit(X_test.loc[:,[var_]],np.sinh(y_test))
ypred_LM = lm.predict(X_test.loc[:,[var_]])
LM_preds[:,i] = ypred_LM
LM_R2[i,:] = lm.score(X_test.loc[:,[var_]],np.sinh(y_test))
LM_coef[i,:] = lm.coef_
LM_RMSE[i,:] = np.sqrt(metrics.mean_squared_error(np.sinh(y_test), ypred_LM))
#-----------------------------------------------------------------------------
#### Calculate stds, pearson correlations for linear regression models:
LM_stds = np.std(np.arcsinh(LM_preds), axis=0)
LM_corrcoefs = np.empty([LM_preds.shape[1]])
for i in range(LM_preds.shape[1]):
rs = pearsonr(np.arcsinh(LM_preds[:,i]),y_test)
LM_corrcoefs[i] = rs[0]
R2_LM = np.empty([LM_preds.shape[1]])
for i in range(LM_preds.shape[1]):
R2_LM[i] = r2_score(y_test, np.arcsinh(LM_preds[:,i]))
print()
print('Linear Regression Results: ')
d = {'Variable':[x for x in X_test.columns.values],'Coefs':LM_coef[:,0],'R2':LM_R2[:,0],'RMSE':LM_RMSE[:,0]}
LM_results = pd.DataFrame(data=d).sort_values('RMSE')
print(LM_results)
print()
#-----------------------------------------------------------------------------
#### MLR
lm_MLR = linear_model.LinearRegression()
MLR_model = lm_MLR.fit(X_train,np.sinh(y_train))
ypred_MLR_train = lm_MLR.predict(X_train) #y predicted by MLR
lm_MLR = linear_model.LinearRegression()
MLR_model = lm_MLR.fit(X_test,np.sinh(y_test))
ypred_MLR = lm_MLR.predict(X_test) #y predicted by MLR
intercept_MLR = lm_MLR.intercept_ #intercept predicted by MLR
coef_MLR = lm_MLR.coef_ #regression coefficients in MLR model
R2_MLR = lm_MLR.score(X_test,np.sinh(y_test)) #R-squared value from MLR model
RMSE_MLR = np.sqrt(metrics.mean_squared_error(np.sinh(y_test), ypred_MLR))
#-----------------------------------------------------------------------------
#### Calculate stds, pearson correlations for multiple linear regression model:
MLR_stds = np.std(np.arcsinh(ypred_MLR))
MLR_corrcoefs = pearsonr(np.arcsinh(ypred_MLR), y_test)[0]
print('MLR results:')
print('a0 (intercept) = ' + str(intercept_MLR)[:5])
for i, val in enumerate(coef_MLR):
print('a%.0f = %.3f' % (i,val)+' ('+X.columns.values[i]+')')
print('')
print('R^2 = ' + str(R2_MLR)[:4])
print('RMSE = ' + str(RMSE_MLR)[:4])
print('')
#-----------------------------------------------------------------------------
# literature algorithms
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# First descale X values:
X_test_orig = pd.DataFrame(scaler.inverse_transform(X_test), index=X_test.index, columns=X_test.columns)
#-----------------------------------------------------------------------------
#### SD02 - Simo & Dachs (2002)
# First run model with global coefs from paper:
global_coefs = np.array([5.7, 55.8, 0.6])
def SD02_model(X, a,b,c):
coefs = np.array([a,b,c])
# Chl = X.loc[:,['Chlorophyll a']].values
Chl = X.loc[:,['chl']].values
MLD = X.loc[:,['MLD']].values
Chl_MLD = Chl/MLD
SD02 = np.empty([Chl.shape[0],Chl.shape[1]])
for i, val in enumerate(Chl_MLD):
if val < 0.02:
SD02[i,0] = -np.log(MLD[i])+coefs[0]
elif val >= 0.02:
SD02[i,0] = coefs[1]*(Chl_MLD[i])+coefs[2]
SD02 = SD02[:,0]
return SD02
SD02 = SD02_model(X_test_orig, global_coefs[0], global_coefs[1], global_coefs[2])
# Now regionally optimize using least squares:
w, _ = scipy.optimize.curve_fit(SD02_model, X_test_orig, np.sinh(y_test), p0=global_coefs)
SD02_ls_optimized = SD02_model(X_test_orig, w[0], w[1], w[2])
# Now transform to compare:
SD02 = np.arcsinh(SD02)
SD02_ls_optimized = np.arcsinh(SD02_ls_optimized)
# Calculate correlation coefficients, R2, and SDs
SD02_stds = np.std(SD02, axis=0)
SD02_corrcoefs = pearsonr(SD02.flatten(), y_test)[0]
R2_SD02 = r2_score(y_test, SD02.flatten())
SD02_ls_optimized_stds = np.std(SD02_ls_optimized, axis=0)
SD02_ls_optimized_corrcoefs = pearsonr(SD02_ls_optimized.flatten(), y_test)[0]
R2_SD02_ls_optimized = r2_score(y_test, SD02_ls_optimized.flatten())
#-----------------------------------------------------------------------------
#### VS07 - Vallina & Simo (2007)
# First run model with global coefs from paper:
global_coefs = np.array([0.492,0.019])
def VS07_model(X, a, b):
coefs = np.array([a,b])
PAR = X.loc[:,['PAR']].values
Kd = vars_interp['Kd'].reindex_like(X).values
MLD = X.loc[:,['MLD']].values
z = MLD # surface depth in m
SRD = (PAR/(Kd*MLD))*(1-np.exp(-Kd*z))
VS07 = coefs[0]+(coefs[1]*SRD)
VS07 = VS07[:,0]
return VS07
VS07 = VS07_model(X_test_orig, global_coefs[0], global_coefs[1])
# Now regionally optimize using least squares:
w, _ = scipy.optimize.curve_fit(VS07_model, X_test_orig, np.sinh(y_test), p0=global_coefs)
VS07_ls_optimized = VS07_model(X_test_orig, w[0], w[1])
# Now transform to compare:
VS07 = np.arcsinh(VS07)
VS07_ls_optimized = np.arcsinh(VS07_ls_optimized)
# Calculate correlation coefficients, R2, and SDs
VS07_stds = np.std(VS07, axis=0)
VS07_corrcoefs = pearsonr(VS07.flatten(), y_test)[0]
R2_VS07 = r2_score(y_test, VS07.flatten())
VS07_ls_optimized_stds = np.std(VS07_ls_optimized, axis=0)
VS07_ls_optimized_corrcoefs = pearsonr(VS07_ls_optimized.flatten(), y_test)[0]
R2_VS07_ls_optimized = r2_score(y_test, VS07_ls_optimized.flatten())
#-----------------------------------------------------------------------------
#### G18 - Gali et al. (2018)
# First run model with global coefs from paper:
global_coefs = np.array([1.237,0.578,0.0180])
def G18_model(X,a,b,c):
coefs = np.array([a,b,c])
Kd = vars_interp['Kd'].reindex_like(X).values.reshape(-1,1)
MLD = X.loc[:,['MLD']].values
Chl = X.loc[:,['chl']].values
SST = X.loc[:,['SST']].values
PAR = X.loc[:,['PAR']].values
Z_eu = 4.6/Kd # euphotic layer depth
Z_eu_MLD = Z_eu/MLD
DMSPt = np.empty([MLD.shape[0], MLD.shape[1]])
for i,val in enumerate(Z_eu_MLD):
if val >= 1:
DMSPt[i,0] = (1.70+(1.14*np.log10(Chl[i]))\
+(0.44*np.log10(Chl[i]**2))\
+(0.063*SST[i])-(0.0024*(SST[i]**2)))
elif val < 1:
DMSPt[i,0] = (1.74+(0.81*np.log10(Chl[i]))+(0.60*np.log10(Z_eu_MLD[i])))
G18 = -coefs[0]+(coefs[1]*DMSPt)+(coefs[2]*PAR)
G18 = 10**(G18[:,0])
return G18
G18 = G18_model(X_test_orig, global_coefs[0],global_coefs[1],global_coefs[2])
#### Now regionally optimize using least squares:
w, _ = scipy.optimize.curve_fit(G18_model, X_test_orig, np.sinh(y_test), p0=global_coefs)
G18_ls_optimized = G18_model(X_test_orig, w[0], w[1], w[2])
#### Now transform to compare:
G18 = np.arcsinh(G18)
G18_ls_optimized = np.arcsinh(G18_ls_optimized)
#### Calculate correlation coefficients, R2, and SDs
G18_stds = np.std(G18, axis=0)
G18_corrcoefs = pearsonr(G18.flatten(), y_test)[0]
R2_G18 = r2_score(y_test, G18.flatten())
G18_ls_optimized_stds = np.std(G18_ls_optimized, axis=0)
G18_ls_optimized_corrcoefs = pearsonr(G18_ls_optimized.flatten(), y_test)[0]
R2_G18_ls_optimized = r2_score(y_test, G18_ls_optimized.flatten())
#### Print results
table=[['SD02',R2_SD02],['SD02 LS',R2_SD02_ls_optimized],['VS07',R2_VS07],['VS07 LS',R2_VS07_ls_optimized],['G18',R2_G18],['G18 R2',R2_G18_ls_optimized]]
print(tabulate(table, headers=['Model','R2']))
#%% ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
#%% Plot Taylor Diagram
#-----------------------------------------------------------------------------
# Calculate the std of DMS data
stdrefs = y_test.std()
#-----------------------------------------------------------------------------
# Plot Taylor Diagram:
fig = plt.figure(figsize=(12,12))
font={'family':'sans-serif',
'weight':'normal',
'size':'24'}
plt.rc('font', **font) # sets the specified font formatting globally
dia = TaylorDiagram(stdrefs, fig=fig, rect=111, srange=(0,1.31),
label='Reference', extend=False, normalize=True)
contours = dia.add_contours(levels=[0,0.25,0.5,0.75,1,1.25,1.5], colors='r') # 5 levels in grey
plt.clabel(contours, inline=1, fontsize=20, fmt='%.2f')
dia.add_text(0.87,0.14,s='RMSE', fontsize=16, color='r', rotation=50)
# Add RFR values
dia.add_sample(RFR_stds,
RFR_corrcoefs,
marker='o',
ms=10,
ls='',
mfc='b',
mec='k',
label='RFR',
normalize=True,
zorder=2)
# Add ANN values
dia.add_sample(ANN_stds,
ANN_corrcoefs,
marker='o', ms=10, ls='',
mfc='r', mec='k',
label='ANN',
normalize=True,
zorder=1)
# Add MLR values
dia.add_sample(MLR_stds,
MLR_corrcoefs,
marker='+',
ms=20,
ls='',
mew='4',
mfc='k',
mec='deepskyblue',
label="MLR",
normalize=True,
zorder=10)
# Add Optimized SD02 values
dia.add_sample(SD02_ls_optimized_stds,
SD02_ls_optimized_corrcoefs,
marker='s',
ms=15,
ls='',
mew='1',
mfc='yellow',
mec='k',
label="SD02 (LS optimized)",
normalize=True,
zorder=11)
# Add Optimized VS07 values
dia.add_sample(VS07_ls_optimized_stds,
VS07_ls_optimized_corrcoefs,
marker='D',
ms=15,
ls='',
mew='1',
mfc='cyan',
mec='k',
label="VS07 (LS optimized)",
normalize=True,
zorder=10)
# Add Optimized G18 values
dia.add_sample(G18_ls_optimized_stds,
G18_ls_optimized_corrcoefs,
marker='^',
ms=15,
ls='',
mew='1',
mfc='darkorange',
mec='k',
label="G18 (LS optimized)",
normalize=True,
zorder=9)
# Add SD02 values
dia.add_sample(SD02_stds,
SD02_ls_optimized_corrcoefs,
marker='s',
ms=15,
ls='',
mew='1',
mfc='r',
mec='k',
label="SD02",
normalize=True,
zorder=11)
# Add VS07 values
dia.add_sample(VS07_stds,
VS07_ls_optimized_corrcoefs,
marker='D',
ms=15,
ls='',
mew='1',
mfc='r',
mec='k',
label="VS07",
normalize=True,
zorder=10)
# Add G18 values
dia.add_sample(G18_stds,
G18_corrcoefs,
marker='^',
ms=15,
ls='',
mew='1',
mfc='r',
mec='k',
label="G18",
normalize=True,
zorder=9)
dia._ax.axis[:].major_ticks.set_tick_out(True)
dia.add_grid(lw=1, ls='--')
fig.legend(dia.samplePoints,
[ p.get_label() for p in dia.samplePoints ],
numpoints=1, bbox_to_anchor=(1.2, 0.9), prop=dict(size='small'), loc='upper right', facecolor='none')
# plt.tight_layout()
fig.savefig(save_to_path+str('Taylor.png'), dpi=500, bbox_inches='tight')
#%% Plot MLR vs. RFR vs. ANN
fig = plt.figure(figsize=(24, 6))
# fig = plt.figure(figsize=(9,18)) # if stacking vertically
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'24'}
plt.rc('font', **font) # sets the specified font formatting globally
ax3=fig.add_subplot(131)
ax4=fig.add_subplot(133)
ax5=fig.add_subplot(132)
#-----------------------------------------------------------------------------
# Plot MLR Fit
ax3.scatter(y_train,np.arcsinh(ypred_MLR_train),s=10,c='k',marker='+',label='Training')
ax3.scatter(y_test,np.arcsinh(ypred_MLR),s=10,c='deepskyblue',marker='+',label='Testing (R${^2}$ = ' + str(round(R2_MLR,2))+')')
l1 = np.min(ax3.get_xlim())
l2 = np.max(ax3.get_xlim())
ax3.plot([l1,l2], [l1,l2], ls="--", c=".3", zorder=0)
ax3.set_xlim([0, RFR_model.predict(scaler.transform(X)).max()])
ax3.set_xlabel(r'arcsinh(DMS$_{\rmmeasured}$)')
ax3.set_ylabel(r'arcsinh(DMS$_{\rmmodel}$)')
ax3.legend(loc='upper center', markerscale=3, fontsize=20, facecolor='none')
ax3.text(0.79,0.06,'MLR', transform=ax3.transAxes, fontweight='bold',bbox=dict(facecolor='none',edgecolor='k'))
ax3.text(0.03,0.9,'$\mathbf{a}$',fontsize='24',transform=ax3.transAxes)
#-----------------------------------------------------------------------------
# Plot ANN Fit
ax4.scatter(y_train,ANN_y_train_pred,s=10,c='k', label="Training")
ax4.scatter(y_test,ANN_y_test_pred,s=10,c='r', label="Testing (R${^2}$ = "+ str(round(ANN_ensemble_R2,2))+")")
l1 = np.min(ax4.get_xlim())
l2 = np.max(ax4.get_xlim())
ax4.plot([l1,l2], [l1,l2], ls="--", c=".3", zorder=0)
ax4.set_xlim([0, RFR_model.predict(scaler.transform(X)).max()])
ax4.set_xlabel(r'arcsinh(DMS$_{\rmmeasured}$)')
# ax4.set_ylabel(r'arcsinh(DMS$_{\rmmodel}$)')
ax4.legend(loc='upper center', markerscale=3, fontsize=20, facecolor='none')
ax4.text(0.79,0.06,'ANN', transform=ax4.transAxes,fontweight='bold',bbox=dict(facecolor='none',edgecolor='k'))
ax4.text(0.03,0.9,'$\mathbf{c}$',fontsize='24',transform=ax4.transAxes)
#-----------------------------------------------------------------------------
# Plot RFR Fit
ax5.scatter(y_train,RFR_model.predict(X_train),s=10,c='k', label="Training")
ax5.scatter(y_test,RFR_model.predict(X_test),s=10,c='b', label="Testing (R${^2}$ = "+ str(round(RFR_model_R2,2))+")")
l1 = np.min(ax5.get_xlim())
l2 = np.max(ax5.get_xlim())
ax5.plot([l1,l2], [l1,l2], ls="--", c=".3", zorder=0)
ax5.set_xlim([0, RFR_model.predict(scaler.transform(X)) .max()])
ax5.set_xlabel(r'arcsinh(DMS$_{\rmmeasured}$)')
# ax5.set_ylabel(r'arcsinh(DMS$_{\rmmodel}$)')
ax5.legend(loc='upper center', markerscale=3, fontsize=20, facecolor='none')
ax5.text(0.79,0.06,'RFR', transform=ax5.transAxes,fontweight='bold',bbox=dict(facecolor='none',edgecolor='k'))
ax5.text(0.03,0.9,'$\mathbf{b}$',fontsize='24',transform=ax5.transAxes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fig.subplots_adjust(hspace=2)
# fig.subplots_adjust(wspace=0.1) # if stacking vertically
# fig.savefig(save_to_path+str('Ensemble_performance.png'), dpi=500, transparent=True, bbox_inches='tight')
#%% Print summary statistics for fluxes
from tabulate import tabulate
param_ = 'GM12'
headers = ['model', 'mean', 'SD', 'min', 'max']
data = [('RFR',np.nanmean(RFR_flux[param_]), np.nanstd(RFR_flux[param_]),np.nanmin(RFR_flux[param_]),np.nanmax(RFR_flux[param_])),
('ANN',np.nanmean(ANN_flux[param_]), np.nanstd(ANN_flux[param_]),np.nanmin(ANN_flux[param_]),np.nanmax(ANN_flux[param_])),
('L11',np.nanmean(lana_flux[param_]), np.nanstd(lana_flux[param_]),np.nanmin(lana_flux[param_]),np.nanmax(lana_flux[param_])),
('JT16',np.nanmean(jarnikova_flux[param_]), np.nanstd(jarnikova_flux[param_]),np.nanmin(jarnikova_flux[param_]),np.nanmax(jarnikova_flux[param_]))]
print(f'\nParmaeterization: {param_}')
print(tabulate(data, headers))
param_ = 'SD02'
headers = ['model', 'mean', 'SD', 'min', 'max']
data = [('RFR',np.nanmean(RFR_flux[param_]), np.nanstd(RFR_flux[param_]),np.nanmin(RFR_flux[param_]),np.nanmax(RFR_flux[param_])),
('ANN',np.nanmean(ANN_flux[param_]), np.nanstd(ANN_flux[param_]),np.nanmin(ANN_flux[param_]),np.nanmax(ANN_flux[param_])),
('L11',np.nanmean(lana_flux[param_]), np.nanstd(lana_flux[param_]),np.nanmin(lana_flux[param_]),np.nanmax(lana_flux[param_])),
('JT16',np.nanmean(jarnikova_flux[param_]), np.nanstd(jarnikova_flux[param_]),np.nanmin(jarnikova_flux[param_]),np.nanmax(jarnikova_flux[param_]))]
print(f'\nParmaeterization: {param_}')
print(tabulate(data, headers))
#%% Plot sea-air fluxes - compare climatologies
label_size = 32
fig = plt.figure(figsize=(24,38))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'24'}
plt.rc('font', **font) # sets the specified font formatting globally
gs = fig.add_gridspec(10, 2)
# main plots
# Increase resolution of projection - needed to draw polygons accurately
map_proj = ccrs.Orthographic(central_latitude=-90.0, central_longitude=0)
map_proj._threshold /= 100
ax = fig.add_subplot(gs[0:3, 0], projection=map_proj)
ax2 = fig.add_subplot(gs[3:6, 0], projection=map_proj)
ax3 = fig.add_subplot(gs[0:3, 1], projection=map_proj)
ax4 = fig.add_subplot(gs[3:6, 1], projection=map_proj)
# cax = fig.add_subplot(gs[6,0:]) # for colorbar
cax = fig.add_axes([ax2.get_position().x0,
ax2.get_position().y0-0.05,
(ax2.get_position().x1-ax2.get_position().x0)*2+(ax3.get_position().x0-ax2.get_position().x1),
0.02])
ax5 = fig.add_subplot(gs[7:9, 0])
ax6 = fig.add_subplot(gs[9, 0])
ax7 = fig.add_subplot(gs[7:9, 1])
ax8 = fig.add_subplot(gs[9, 1])
vmin=0
vmax=30
#------------------------------------------------------------------------------
# Plot ML fluxes (full climatology)
h, ax = South_1ax_map(ax=ax,
data=fluxes_combined.groupby(['latbins','lonbins']).mean().unstack('lonbins'),
vmin=vmin,
vmax=vmax,
cmap=cmocean.cm.haline)
ax.set_title('Mean ML Predictions \nOct-Apr, 20.0 km')
ax.text(-0.05,1,'$\mathbf{a}$',fontsize=label_size,transform=ax.transAxes, zorder=500)
#------------------------------------------------------------------------------
# Plot Lana Climatology
h2, ax2 = South_1ax_map(ax=ax2,
data=lana_flux['GM12'].groupby(['latbins','lonbins']).mean().unstack('lonbins'),
vmin=vmin,
vmax=vmax,
cmap=cmocean.cm.haline)
ax2.set_title(f'L11 Climatology \nOct-Apr, {degrees2kilometers(1):.1f} km')
ax2.text(-0.05,1,'$\mathbf{c}$',fontsize=label_size,transform=ax2.transAxes, zorder=500)
#------------------------------------------------------------------------------
# Plot ML fluxes (full climatology)
h, ax3 = South_1ax_map(ax=ax3,
data=fluxes_combined.loc[[12,1,2],:,:].groupby(['latbins','lonbins']).mean().unstack('lonbins'),
vmin=vmin,
vmax=vmax,
cmap=cmocean.cm.haline)
ax3.set_title('Mean ML Predictions \nDec-Feb, 20.0 km')
ax3.text(-0.05,1,'$\mathbf{b}$',fontsize=label_size,transform=ax3.transAxes, zorder=500)
#------------------------------------------------------------------------------
# Plot Jarnikova Climatology
h5, ax4 = South_1ax_map(ax=ax4,
data=jarnikova_flux['GM12'].unstack('lonbins'),
vmin=vmin,
vmax=vmax,
cmap=cmocean.cm.haline)
cb = plt.colorbar(h, cax=cax, fraction=0.001, extend='max', orientation='horizontal')
cb.set_ticks(np.arange(vmin,vmax+(vmax-vmin)/10,(vmax-vmin)/10))
cb.set_ticklabels(np.arange(vmin,vmax+(vmax-vmin)/10,(vmax-vmin)/10))
cb.set_label(r'DMS flux ($\mathrm{\mu}$mol $\mathrm{m^{-2}}$ $\mathrm{d^{-1}}$)', size=label_size)
ax4.set_title(f'JT16 Climatology \nDec-Feb, {degrees2kilometers(1):.1f} km')
ax4.text(-0.05,1,'$\mathbf{d}$',fontsize=label_size,transform=ax4.transAxes, zorder=500)
#------------------------------------------------------------------------------
# Plot KDEs of flux distributions (ML vs. Lana)
max_kde = []
for i in [lana_kde, RFR_kde, ANN_kde]:
max_kde.append(i['kde'].max())
max_kde = np.array(max_kde)
ax5.plot(RFR_kde['ind'],RFR_kde['kde']/max_kde.max(),'b-', lw=2, label='RFR')
ax5.plot(ANN_kde['ind'],ANN_kde['kde']/max_kde.max(),'r-', lw=2, label='ANN')
ax5.plot(lana_kde['ind'],lana_kde['kde']/max_kde.max(),c='gray',ls='--', lw=2, label='L11')
ax5.set_xlim(0,30)
ax5.set_ylim(0,1)
ax5.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax5.tick_params('both', length=10, width=1, which='major')
ax5.tick_params('both', length=5, width=1, which='minor')
ax5.set_xticks([])
ax5.legend()
ax5.set_ylabel('Probability Density (norm.)')
ax5.text(-0.05,1.05,'$\mathbf{e}$',fontsize=label_size,transform=ax5.transAxes, zorder=500)
#------------------------------------------------------------------------------
# Plot boxplot of fluxes (ML vs. Lana)
labels = ['RFR', 'ANN', 'L11']
bplot1 = ax6.boxplot([RFR_flux['GM12'].dropna(),ANN_flux['GM12'].dropna(),lana_flux['GM12'].dropna()],
widths=0.5,
vert=False,
showfliers=False,
patch_artist=True, # fill with color
labels=labels)
# fill with colors
colors = ['blue', 'red', 'gray']
for patch, color in zip(bplot1['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.6)
for patch in bplot1['medians']:
patch.set_color('black')
ax6.set_xlim([0,30])
ax6.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax6.tick_params('both', length=10, width=1, which='major')
ax6.tick_params('both', length=5, width=1, which='minor')
ax6.set_xlabel(r"DMS flux ($\mathrm{\mu}$mol $\mathrm{m^{-2}}$ $\mathrm{d^{-1}}$)")
ax6.text(-0.05,1.05,'$\mathbf{g}$',fontsize=label_size-2,transform=ax6.transAxes, zorder=1000)
#------------------------------------------------------------------------------
# Plot KDEs of flux distributions (ML vs. Jarnikova) - normalized
max_kde = []
for i in [jarnikova_kde, RFR_kde_3mon, ANN_kde_3mon]:
max_kde.append(i['kde'].max())
max_kde = np.array(max_kde)
ax7.plot(RFR_kde_3mon['ind'],RFR_kde_3mon['kde']/max_kde.max(),'b-', lw=2, label='RFR')
ax7.plot(ANN_kde_3mon['ind'],ANN_kde_3mon['kde']/max_kde.max(),'r-', lw=2, label='ANN')
ax7.plot(jarnikova_kde['ind'],jarnikova_kde['kde']/max_kde.max(),c='darkorange',ls='--', lw=2, label='JT16')
ax7.set_xlim(0,30)
ax7.set_ylim(0,1)
ax7.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax7.tick_params('both', length=10, width=1, which='major')
ax7.tick_params('both', length=5, width=1, which='minor')
ax7.set_xticks([])
ax7.legend()
ax7.text(-0.05,1.05,'$\mathbf{f}$',fontsize=label_size,transform=ax7.transAxes, zorder=500)
#------------------------------------------------------------------------------
# Plot boxplot of fluxes (ML vs. Jarnikova)
labels = ['RFR', 'ANN', 'JT16']
bplot1 = ax8.boxplot([RFR_flux['GM12'].loc[[12,1,2],:,:].dropna(),ANN_flux['GM12'].loc[[12,1,2],:,:].dropna(),jarnikova_flux['GM12'].dropna()],
widths=0.5,
vert=False,
showfliers=False,
patch_artist=True, # fill with color
labels=labels)
# fill with colors
colors = ['blue', 'red', 'darkorange']
for patch, color in zip(bplot1['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.6)
for patch in bplot1['medians']:
patch.set_color('black')
ax8.set_xlim([0,30])
ax8.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax8.tick_params('both', length=10, width=1, which='major')
ax8.tick_params('both', length=5, width=1, which='minor')
ax8.set_xlabel(r"DMS flux ($\mathrm{\mu}$mol $\mathrm{m^{-2}}$ $\mathrm{d^{-1}}$)")
ax8.text(-0.05,1.05,'$\mathbf{h}$',fontsize=label_size-2,transform=ax8.transAxes, zorder=1000)
fig.subplots_adjust(hspace=0.5)
#%% Map predictions, observations, and deviance
label_size = 32
sizing = 24
#------------------------------------------------------------------------------
# set color scale range
vmin = 0
vmax = 10
# set deviance scale range
dev_vmin = -5
dev_vmax = 5
# colorbar step size
dev_step = (dev_vmax-dev_vmin)/10
step = 0.25
#------------------------------------------------------------------------------
# Map the averages
# fig = plt.figure(figsize=(36,18))
fig = plt.figure(figsize=(24,24))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'22'}
plt.rc('font', **font) # sets the specified font formatting globally
ax = fig.add_subplot(2,2,1, projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
ax2 = fig.add_subplot(2,2,2, projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
ax3 = fig.add_subplot(2,2,3, projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
ax4 = fig.add_subplot(2,2,4, projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
#-----------------------------------------------------------------------------
#### Map RFR
h, ax = South_1ax_map(ax=ax,
data=RFR_y_pred_mean.unstack('lonbins'),
plottype='mesh',
cmap=cmocean.cm.haline,
vmin=vmin,
vmax=vmax,
extend="max",
)
newcmap = cmocean.tools.crop_by_percent(cmocean.cm.turbid, 50, which='max', N=None)
cs = newcmap(np.linspace(0,1,4))
hs = []
for i,key in enumerate(list(fronts.keys())[:-1]):
h = ax.scatter(x=fronts[key].index.get_level_values('lonbins').values,
y=fronts[key].index.get_level_values('latbins').values,
s=fronts[key].values,
color=cs[i],
transform=ccrs.PlateCarree())
hs.append(h)
ax.legend(handles=hs, labels=fronts.keys(), loc='center', bbox_to_anchor=(0.9,-0.225,0.5,0.5), markerscale=15, prop={'size': 30})
for ha in ax.legend_.legendHandles:
ha.set_edgecolor("k")
ax.set_title('RFR')
ax.text(-0.05,1,'$\mathbf{a}$',fontsize=label_size,transform=ax.transAxes, zorder=500)
#-----------------------------------------------------------------------------
#### Map ANN
h2, ax2 = South_1ax_map(ax=ax2,
data=ANN_y_pred_mean.unstack('lonbins'),
plottype='mesh',
cmap=cmocean.cm.haline,
vmin=vmin,
vmax=vmax,
extend="max",
)
newcmap = cmocean.tools.crop_by_percent(cmocean.cm.turbid, 50, which='max', N=None)
cs = newcmap(np.linspace(0,1,4))
hs = []
for i,key in enumerate(list(fronts.keys())[:-1]):
h = ax2.scatter(x=fronts[key].index.get_level_values('lonbins').values,
y=fronts[key].index.get_level_values('latbins').values,
s=fronts[key].values,
color=cs[i],
transform=ccrs.PlateCarree())
hs.append(h)
ax2.set_title('ANN')
ax2.text(-0.05,1,'$\mathbf{b}$',fontsize=label_size,transform=ax2.transAxes, zorder=500)
#-----------------------------------------------------------------------------
#### Map deviance
norm = mpl.colors.TwoSlopeNorm(vmin=dev_vmin, vcenter=0, vmax=dev_vmax) # scales to accentuate depth colors, and diverge at 0
h3, ax3 = South_1ax_map(ax=ax3,
data=(RFR_y_pred_mean-ANN_y_pred_mean).unstack('lonbins'),
plottype='mesh',
cmap='RdBu',
norm=norm,
vmin=dev_vmin,
vmax=dev_vmax,
extend="both",
)
ax3.set_title('Deviation')
# add colorbar
divider = make_axes_locatable(ax3)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h3, cax=ax_cb, orientation="horizontal")
cb.ax.tick_params(labelsize=sizing)
cb.set_label('Model Deviation (RFR-ANN, nM)', fontsize=sizing)
cb.set_ticks(np.arange(dev_vmin, dev_vmax+dev_step, dev_step))
cb.set_ticklabels(np.arange(dev_vmin, dev_vmax+dev_step, dev_step).astype(int))
ax3.text(-0.05,1,'$\mathbf{c}$',fontsize=label_size,transform=ax3.transAxes, zorder=500)
#-----------------------------------------------------------------------------
#### Map Observations
h4,ax4 = South_1ax_map(ax=ax4,
data=np.sinh(y).groupby(['latbins','lonbins']).mean().unstack('lonbins'),
s=5,
plottype='scatter',
cmap=cmocean.cm.haline,
vmin=vmin,
vmax=vmax,
)
ax4.set_title('Obs.')
# add colorbar
divider = make_axes_locatable(ax4)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h4, cax=ax_cb, orientation="horizontal", extend='max')
cb.ax.tick_params(labelsize=sizing)
cb.set_label(r'DMS (nM)', fontsize=sizing)
cb.set_ticks(np.arange(vmin, vmax+2, 2))
cb.set_ticklabels(np.arange(vmin, vmax+2, 2))
ax4.text(-0.05,1,'$\mathbf{d}$',fontsize=label_size,transform=ax4.transAxes, zorder=500)
#%% Wind speed vs DMS
label_size = 32
fig = plt.figure(figsize=(34,24))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'22'}
plt.rc('font', **font) # sets the specified font formatting globally
gs = fig.add_gridspec(2, 4)
# main plots
ax = fig.add_subplot(gs[0:1,0:2], projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
ax2 = fig.add_subplot(gs[1:2,0:2], projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
ax3 = fig.add_subplot(gs[0:2,2:])
#------------------------------------------------------------------------------
#### Map DMS
vmin=0
vmax=10
h, ax = South_1ax_map(ax=ax,
data=models_combined.groupby(['latbins','lonbins']).mean().unstack('lonbins'),
plottype='mesh',
# levels=np.linspace(vmin, vmax, 100),
vmin=vmin,
vmax=vmax,
extend='max',
cmap=cmocean.cm.haline,
)
h2 = ax.contour(X_full.loc[:,'MLD'].groupby(['latbins','lonbins']).mean().unstack('lonbins').columns.values,
X_full.loc[:,'MLD'].groupby(['latbins','lonbins']).mean().unstack('lonbins').index.values,
X_full.loc[:,'MLD'].groupby(['latbins','lonbins']).mean().unstack('lonbins').values,
levels=[60],
colors='w',
transform=ccrs.PlateCarree())
ax.clabel(h2)
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h, cax=ax_cb, orientation="horizontal", extend='max')
cb.set_ticks(np.arange(vmin,vmax+(vmax-vmin)/10,(vmax-vmin)/10))
cb.set_ticklabels(np.arange(vmin,vmax+(vmax-vmin)/10,(vmax-vmin)/10))
cb.set_label(r'DMS$_{\rmmodel}$ (nM)', size=22)
ax.text(-0.05,1,'$\mathbf{a}$',fontsize=label_size,transform=ax.transAxes, zorder=500)
#------------------------------------------------------------------------------
#### Map Winds
wind_vmin = X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean().min()
wind_vmax = X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean().max()
h3, ax2 = South_1ax_map(ax=ax2,
data=X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean().unstack('lonbins'),
plottype='mesh',
# levels=np.linspace(wind_vmin, wind_vmax, 100),
vmin=wind_vmin,
vmax=wind_vmax,
cmap='Spectral_r',
)
h4 = ax2.contour(X_full.loc[:,'MLD'].groupby(['latbins','lonbins']).mean().unstack('lonbins').columns.values,
X_full.loc[:,'MLD'].groupby(['latbins','lonbins']).mean().unstack('lonbins').index.values,
X_full.loc[:,'MLD'].groupby(['latbins','lonbins']).mean().unstack('lonbins').values,
levels=[60],
colors='k',
transform=ccrs.PlateCarree())
ax.clabel(h4)
divider = make_axes_locatable(ax2)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h3, cax=ax_cb, orientation="horizontal")
cb.set_ticks(np.round(np.arange(2,wind_vmax+1,2),0))
cb.set_ticklabels(np.round(np.arange(2,wind_vmax+1,2),0))
cb.set_label(r'Wind Speed (m $s^{-1}$)', size=22)
ax2.text(-0.05,1,'$\mathbf{b}$',fontsize=label_size,transform=ax2.transAxes, zorder=500)
#------------------------------------------------------------------------------
#### Bivariate wind speed vs DMS
# fit a linear model to the time-averaged data
lm = linear_model.LinearRegression().fit(X_full.loc[:,['wind']].groupby(['latbins','lonbins']).mean(),
np.log10(models_combined).groupby(['latbins','lonbins']).mean().to_frame())
wind_r2 = lm.score(X_full.loc[:,['wind']].groupby(['latbins','lonbins']).mean(),
np.log10(models_combined).groupby(['latbins','lonbins']).mean().to_frame())
wind_preds = lm.predict(X_full.loc[:,['wind']].groupby(['latbins','lonbins']).mean())
# Plot the data
cbaxes = inset_axes(ax3, width="30%", height="3%", loc='lower left',
bbox_to_anchor=(0.03, 0.06, 0.99, 0.9), bbox_transform=ax3.transAxes)
ax3.scatter(X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean(),
np.log10(models_combined.groupby(['latbins','lonbins']).mean()),
# models_combined.groupby(['latbins','lonbins']).mean(),
s=2,
color='k',
marker='.')
h = sns.histplot(x=X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean(),
y=np.log10(models_combined.groupby(['latbins','lonbins']).mean()),
stat='density',
bins=100, pthresh=.1, cmap="mako", ax=ax3, cbar=True, cbar_ax=cbaxes, cbar_kws={'orientation':'horizontal'}, zorder=2)
# set name and fontsizes of colorbar
h.figure.axes[-1].set_xlabel('Counts', size=22)
h.figure.axes[-1].tick_params(labelsize=22)
ax3.plot(X_full.loc[:,['wind']].groupby(['latbins','lonbins']).mean().values,
wind_preds,
'r-',
zorder=3)
ax3.set_ylabel(r'log$_{10}$(DMS$_{\rmmodel}$)')
ax3.set_xlabel('Wind Speed (m s$^{-1}$)')
ax3.set_xlim(X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean().min(),
X_full.loc[:,'wind'].groupby(['latbins','lonbins']).mean().max()+2)
ax3.set_ylim(np.log10(1),np.log10(models_combined.groupby(['latbins','lonbins']).mean().max()))
# Format ticks
ax3.tick_params('both', length=10, width=1, which='major')
ax3.tick_params('both', length=8, width=1, which='minor')
ax3.set_xscale('linear')
ax3.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
ax3.text(-0.05,1,'$\mathbf{c}$',fontsize=label_size,transform=ax3.transAxes, zorder=500)
#------------------------------------------------------------------------------
#### Miniplot of winds vs. DMS observations
# Define mini axes to plot in
left, bottom, width, height = [0.74, 0.66, 0.15, 0.2]
ax4 = fig.add_axes([left, bottom, width, height])
# Bin data
binned = pd.concat([X.loc[:,'wind'],np.log10(np.sinh(y))], axis=1)
wind_binned = binned.groupby(pd.cut(binned['wind'], np.arange(4,12,1))).mean()
wind_binned_std = binned.groupby(pd.cut(binned['wind'], np.arange(4,12,1))).std()
# Plot scatter of observations
ax4.errorbar(wind_binned['wind'],
wind_binned['DMS'],
yerr=wind_binned_std['DMS'],
capsize=3,
elinewidth=2,
ecolor='k',
ls='None',
marker='.',
mfc='k',
mec='k',
ms=15)
# Plot the binned data
ax4.scatter(X.loc[:,'wind'].groupby(['latbins','lonbins']).mean(),
np.log10(np.sinh(y).groupby(['latbins','lonbins']).mean()),
s=2,
color='gray',
marker='.')
# Fit linear model, plot line of best fit
lm = linear_model.LinearRegression().fit(wind_binned[['wind']],wind_binned[['DMS']])
wind_r22 = lm.score(wind_binned[['wind']],wind_binned[['DMS']])
ax4.plot(binned['wind'],
lm.predict(binned[['wind']]),
'r-',
zorder=3)
# Format axis labels and ticks
ax4.set_ylabel(r'log$_{10}$(DMS$_{\rmobs}$)', fontsize=18)
ax4.set_xlabel('Wind Speed (m s$^{-1}$)', fontsize=18)
ax4.set_xlim(X.loc[:,'wind'].groupby(['latbins','lonbins']).mean().min(),
X.loc[:,'wind'].groupby(['latbins','lonbins']).mean().max())
ax4.set_ylim(np.log10(0.1),np.log10(np.sinh(y).groupby(['latbins','lonbins']).mean().max()))
ax4.tick_params('both', length=10, width=1, which='major')
ax4.tick_params('both', length=8, width=1, which='minor')
ax4.set_xlim(2.5,12.5)
ax4.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
print(wind_r2)
print(wind_r22)
# fig.subplots_adjust(wspace=0.1)
#%% Map DMS, ice, Si*
label_size = 32
fig = plt.figure(figsize=(24,12))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'22'}
plt.rc('font', **font) # sets the specified font formatting globally
ax = fig.add_subplot(1,2,1, projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
ax2 = fig.add_subplot(1,2,2, projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
vmin=0
vmax=10
front_color = 'magenta'
#------------------------------------------------------------------------------
#### Mean DMS
h, ax = South_1ax_map(ax=ax,
data=models_combined.groupby(['latbins','lonbins']).mean().unstack('lonbins'),
plottype='mesh',
# levels=np.linspace(vmin, vmax, 100),
vmin=vmin,
vmax=vmax,
extend='max',
cmap=cmocean.cm.haline,
)
h2 = ax.contour(X_full_plus.loc[:,'Si_star'].groupby(['latbins','lonbins']).mean().unstack('lonbins').columns.values,
X_full_plus.loc[:,'Si_star'].groupby(['latbins','lonbins']).mean().unstack('lonbins').index.values,
X_full_plus.loc[:,'Si_star'].groupby(['latbins','lonbins']).mean().unstack('lonbins').values,
levels=[-10,0,20,40,50],
colors='w',
# linewidths=1,
transform=ccrs.PlateCarree())
ax.clabel(h2, fontsize=20)
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h, cax=ax_cb, orientation="horizontal", extend='max')
cb.set_ticks(np.arange(vmin,vmax+(vmax-vmin)/10,(vmax-vmin)/10))
cb.set_ticklabels(np.arange(vmin,vmax+(vmax-vmin)/10,(vmax-vmin)/10))
cb.set_label(r'DMS$_{\rmmodel}$ (nM)', size=22)
ax.text(-0.05,1,'$\mathbf{a}$',fontsize=label_size,transform=ax.transAxes, zorder=500)
# #------------------------------------------------------------------------------
#### Ice
newcmap = cmocean.tools.crop_by_percent(cmocean.cm.ice, 20, which='min', N=None)
h3, ax2 = South_1ax_map(ax=ax2,
data=(X_full.loc[:,'ice']/X_full.loc[:,'ice'].max()).groupby(['latbins','lonbins']).mean().unstack('lonbins'),
plottype='mesh',
# levels=np.linspace(0, X_full.loc[:,'ice'].groupby(['latbins','lonbins']).mean().max(), 100),
vmin=0,
vmax=(X_full.loc[:,'ice']/X_full.loc[:,'ice'].max()).groupby(['latbins','lonbins']).mean().max()-0.5,
cmap=newcmap,
)
divider = make_axes_locatable(ax2)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h3, cax=ax_cb, orientation="horizontal", extend='max')
cb.set_label(r'Fraction of Sea Ice Coverage', size=22)
ax2.text(-0.05,1,'$\mathbf{b}$',fontsize=label_size,transform=ax2.transAxes, zorder=500)
#------------------------------------------------------------------------------
#%% Temporal Correlations (Per Pixel)
if first_run == True:
# pull out dates into columns - do this before the loops to speed up computations
DMS_indexed = models_combined.unstack('datetime')
vars_indexed = X_full.unstack('datetime')
# create an empty list
corrs_by_date = []
# now iterate by predictor, computing correlations per coordinate over time (i.e. per row)
for i,var_ in enumerate(X_full.columns):
for j in tqdm(range(len(DMS_indexed))):
corrs_by_date.append(spearmanr(DMS_indexed.iloc[j,:], vars_indexed.loc[:,var_].iloc[j,:])[0])
if i == 0:
corr_matrix = pd.Series(np.array(corrs_by_date), index=DMS_indexed.index, name=var_)
else:
iterated_var = pd.Series(np.array(corrs_by_date), index=DMS_indexed.index, name=var_)
corr_matrix = pd.concat([corr_matrix, iterated_var], axis=1)
corrs_by_date = []
corr_matrix.to_csv(write_dir[:69]+'/'+'point_correlation_map_data.csv')
del DMS_indexed, vars_indexed
else:
corr_matrix = pd.read_csv(write_dir[:69]+'/'+'point_correlation_map_data.csv',index_col=[0,1], header=[0])
#%% Map correlations
sizing = 24
vmin = -1
vmax = 1
# Map predictors
for var in corr_matrix.columns:
if var == 'PAR':
fig = plt.figure(figsize=(18,18))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'22'}
plt.rc('font', **font) # sets the specified font formatting globally
gs = fig.add_gridspec(1, 1)
# main plots
ax = fig.add_subplot(gs[0,0], projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
norm = mpl.colors.TwoSlopeNorm(vmin=vmin,
vcenter=0,
vmax=vmax) # scales to accentuate depth colors, and diverge at 0
h, ax = South_1ax_map(ax=ax,
data=corr_matrix.loc[:,var].unstack('lonbins'),
# data=corrs_to_map.loc[:,var].unstack('lonbins'),
plottype='mesh',
vmin=vmin,
vmax=vmax,
cmap='RdBu_r',
norm=norm,
)
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="5%", pad=0.2, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb = plt.colorbar(h, cax=ax_cb, orientation="horizontal")
cb.ax.tick_params(labelsize=sizing)
cb.set_label(var, fontsize=sizing)
cb.set_ticks(np.arange(vmin, vmax, np.round(((vmax-vmin)/10),2)))
# cb.set_ticklabels(np.arange(vmin, vmax+np.round(((vmax-vmin)/10),2), np.round(((vmax-vmin)/10),2)))
#%% PCA
#### Setup PCA
PCA_input = models_combined.unstack('datetime').T.reindex(index=reordered_months).dropna(axis=1) # transpose so eigenvectors are in space
PCA_scaler = StandardScaler()
PCA_scaler.fit(PCA_input.values)
data_norm = pd.DataFrame(PCA_scaler.transform(PCA_input.values), index=PCA_input.index, columns=PCA_input.columns)
#### Apply IPCA - runs PCA incrementally to reduce memory consumption
n_modes = np.min(np.shape(data_norm))
pca = IncrementalPCA(n_components = n_modes, batch_size=1000)
PCs = -1*pca.fit_transform(data_norm)
eigvecs = -1*pca.components_
fracVar = pca.explained_variance_ratio_
#### Plot Fraction of Variance per Mode
plt.figure(figsize=(18,12))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'22'}
plt.rc('font', **font) # sets the specified font formatting globally
plt.subplot(1,1,1)
plt.plot(range(1,len(fracVar)+1),fracVar,'k--o',ms=10)
plt.xlabel('Mode Number')
plt.ylabel('Fraction Variance Explained')
plt.title('Variance Explained by All Modes')
plt.tight_layout()
plt.show()
#### Plot PCA spatial and temporal patterns
# choose number of modes to plot
n = 2
fig = plt.figure(figsize=(13*n,30))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'22'}
plt.rc('font', **font) # sets the specified font formatting globally
for k in range(n):
kPCs = PCs[:,k]
if k==0:
kPCs = -1*kPCs
ax = fig.add_subplot(3,n,k+1)
ax.plot(range(len(reordered_months)),kPCs,'k--')
ax.scatter(range(len(reordered_months)),
kPCs,
s=300,
c=PCs[:,k],
edgecolors='k',
cmap='RdBu_r',
vmin=-600,
vmax=700,
zorder=10)
ax.set_xticks(range(len(reordered_months)))
ax.set_xticklabels(['Oct','Nov','Dec','Jan','Feb','Mar','Apr'])
ax.set_ylim(-600,700)
ax.set_title('PCs of Mode #' + str(k+1))
ax.set_xlabel('Month')
ax.text(0.05, 0.9, f'Variance = {fracVar[k]*100:.2f}%', transform=ax.transAxes, fontsize=22,
va='center', ha='left', ma='left', zorder=500)
norm = mpl.colors.TwoSlopeNorm(vmin=eigvecs[:n,:].min(),
vcenter=0,
vmax=eigvecs[:n,:].max()) # scales to accentuate depth colors, and diverge at 0
ax2 = fig.add_subplot(3,n,(n)+k+1,projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
keigvecs = pd.DataFrame(eigvecs[k,:].T, index=PCA_input.columns).squeeze()
if k==0:
keigvecs = -1*keigvecs
h, ax2 = South_1ax_map(ax=ax2,
data=keigvecs.unstack('lonbins'),
plottype='mesh',
vmin=eigvecs[:n,:].min(),
vmax=eigvecs[:n,:].max(),
norm=norm,
cmap='RdBu_r',
)
ax2.set_title('Eigenvectors of Mode #' + str(k+1))
if k == 1:
divider = make_axes_locatable(ax2)
ax_cb = divider.new_horizontal(size="5%", pad=0.6, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h, cax=ax_cb)
cb1.ax.tick_params(labelsize=20)
cb1.set_ticks(np.linspace(np.round(eigvecs[:n,:].min(),3),np.round(eigvecs[:n,:].max(),3),5))
cb1.set_label('$\it{v}$', fontsize=20)
ax2.scatter(x=fronts['PF'].index.get_level_values('lonbins').values,
y=fronts['PF'].index.get_level_values('latbins').values,
s=fronts['PF'].values,
c='k',
transform=ccrs.PlateCarree())
ax3 = fig.add_subplot(3,n,(2*n)+k+1,projection=ccrs.Orthographic(central_longitude=0, central_latitude=-90))
names = ['MLD','SST','SAL']
h2, ax3 = South_1ax_map(ax=ax3,
data=corr_matrix.loc[:,names[k]].unstack('lonbins'),
plottype='mesh',
vmin=-1,
vmax=1,
cmap='RdBu_r',
)
ax3.set_title(names[k])
if k == 1:
divider = make_axes_locatable(ax3)
ax_cb = divider.new_horizontal(size="5%", pad=0.6, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h2, cax=ax_cb)
cb1.ax.tick_params(labelsize=20)
cb1.set_label(r'$\rho$', fontsize=20)
ax3.scatter(x=fronts['PF'].index.get_level_values('lonbins').values,
y=fronts['PF'].index.get_level_values('latbins').values,
s=fronts['PF'].values,
c='k',
transform=ccrs.PlateCarree())
fig.subplots_adjust(hspace=0.2)
#%% Plot Mesoscale variability at Kerguelen Region
label_size = 32
fig = plt.figure(figsize=(40,22))
font={'family':'DejaVu Sans',
'weight':'normal',
'size':'24'}
plt.rc('font', **font) # sets the specified font formatting globally
gs = fig.add_gridspec(4, 5)
# main plots
# Increase resolution of projection - needed to draw polygons accurately
map_proj = ccrs.Orthographic(central_latitude=-90.0, central_longitude=0)
map_proj._threshold /= 100
ax = fig.add_subplot(gs[0:3, 0:2], projection=map_proj)
ax3 = fig.add_subplot(gs[0:2, 2:5], projection=ccrs.PlateCarree())
ax4 = fig.add_subplot(gs[2:5, 2:5], projection=ccrs.PlateCarree())
ax5 = fig.add_subplot(gs[3, 0], projection=ccrs.PlateCarree())
ax6 = fig.add_subplot(gs[3, 1], projection=ccrs.PlateCarree())
#------------------------------------------------------------------------------
extent = [60, 90, -54, -40]
newextent = [67, 77, -54, -46]
month = 1
#------------------------------------------------------------------------------
#### Plot SSHA correlations
h, ax = South_1ax_map(ax=ax,
data=corr_matrix.loc[:,'SSHA'].unstack('lonbins'),
# data=corr_matrix.loc[:,'SSHA'].where((corr_matrix.loc[:,'SSHA']>0.5) | (corr_matrix.loc[:,'SSHA']<-0.5)).unstack('lonbins'),
plottype='mesh',
vmin=corr_matrix.loc[:,'SSHA'].unstack('lonbins').loc[extent[2]:extent[3],extent[0]:extent[1]].min().min(),
vmax=corr_matrix.loc[:,'SSHA'].unstack('lonbins').loc[extent[2]:extent[3],extent[0]:extent[1]].max().max(),
cmap='RdBu_r')
ax.gridlines(draw_labels=True,
lw=3,
color="k",
y_inline=True,
xlocs=range(-180,180,30),
ylocs=range(-80,91,10),
zorder=50,
)
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="5%", pad=0.12, axes_class=plt.Axes, pack_start=True)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h, cax=ax_cb, orientation='horizontal')
cb1.ax.tick_params(labelsize=20)
cb1.set_label(r'$\rho$(DMS, SSHA)', fontsize=20)
ax.text(-0.05,1,'$\mathbf{a}$',fontsize=label_size,transform=ax.transAxes, zorder=500)
def custom_mark_zoom(axA, axB, direction='right', extent=None, fc=None, ec='k', alpha=1, transform=None):
# starting point:
# https://stackoverflow.com/questions/51268493/drawing-filled-shapes-between-different-axes-in-matplotlib
import matplotlib.patches as patches
import numpy as np
import matplotlib as mpl
xx = [extent[0], extent[1]]
yy = [extent[2], extent[3]]
xy = (xx[0], yy[0])
width = xx[1] - xx[0]
height = yy[1] - yy[0]
xyB1 = (0,1)
xyB2 = (0,0)
xyA1 = transform.transform_point(60,-40,ccrs.PlateCarree())
xyA2 = transform.transform_point(90,-40,ccrs.PlateCarree())
coordsA='data'
coordsB='axes fraction'
# First mark the patch in the main axes
pp = axA.add_patch(patches.Rectangle(xy, width, height, fc=fc, ec=ec, zorder=5, alpha=alpha, transform=ccrs.PlateCarree()))
# Add a second identical patch w/o alpha & face color (i.e. make the edge color dark)
pp = axA.add_patch(patches.Rectangle(xy, width, height, fc='None', ec=ec, lw=2, zorder=5, transform=ccrs.PlateCarree()))
# now draw an anchor line to the zoomed in axis
p1 = axA.add_patch(patches.ConnectionPatch(
xyA=xyA1, xyB=xyB1,
coordsA=coordsA, coordsB=coordsB,
axesA=axA, axesB=axB))
# draw a 2nd anchor line to the zoomed in axes
p2 = axA.add_patch(patches.ConnectionPatch(
xyA=xyA2, xyB=xyB2,
coordsA=coordsA, coordsB=coordsB,
axesA=axA, axesB=axB))
return pp, p1, p2
# add the connection lines and shading
pp, p1, p2 = custom_mark_zoom(ax, ax3, direction='right', extent=[60, 90, -54, -40], fc='gray', alpha=0.5, transform=map_proj)
#------------------------------------------------------------------------------
#### Plot DMS subregion
h2, ax3, gl = South_1ax_flat_map(ax=ax3,
data=models_combined.loc[month].unstack('lonbins'),
plottype='mesh',
# levels=100,
vmin=0,
vmax=10,
# cmap='viridis',
cmap=cmocean.cm.haline,
extent=extent)
gl.bottom_labels = False
var = 'SSHA'
h0 = ax3.contour(X_full.loc[:,var].loc[month].unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].columns.values,
X_full.loc[:,var].loc[month].unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].index.values,
X_full.loc[:,var].loc[month].unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].values,
levels=[-0.25,-0.2,-0.15,-0.1,-0.05,0.05,0.1,0.15,0.2,0.25],
# levels=10,
colors='k',
linewidths=0.5,
transform=ccrs.PlateCarree())
ax3.clabel(h0, fontsize=10)
# Plot PF and SAF fronts
ax3.plot(front_data.LonPF.values,
front_data.LatPF.values,
'r-',
linewidth=3,
transform=ccrs.PlateCarree())
ax3.plot(front_data.LonSAF.values,
front_data.LatSAF.values,
'w-',
linewidth=3,
transform=ccrs.PlateCarree())
# Contour topography
h01 = ax3.contour(etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].columns.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].index.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].values,
levels=[-1500],
colors='w',
linestyles='dashed',
linewidths=2,
transform=ccrs.PlateCarree())
for c in h01.collections:
c.set_dashes([(0, (2.0, 5.0))])
ax3.set_title(f'Kerguelen Plateau ({var_months_[month]})')
divider = make_axes_locatable(ax3)
ax_cb = divider.new_horizontal(size="5%", pad=0.2, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h2, cax=ax_cb, extend='max')
cb1.ax.tick_params(labelsize=20)
cb1.set_label(r'DMS$_{\rmmodel}$ (nM)')
# Add legend
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], color='r', label='Polar Front (PF)'),
Line2D([0], [0], color='w', label='Southern Antarctic Front (SAF)'),
]
ax3.legend(handles=legend_elements, loc='lower left',framealpha=0.7)
ax3.text(-0.05,1,'$\mathbf{b}$',fontsize=label_size,transform=ax3.transAxes, zorder=500)
#------------------------------------------------------------------------------
#### Plot 2nd variable for subregion
mapvar = 'chl'
h3, ax4, _ = South_1ax_flat_map(ax=ax4,
data=X_full.loc[:,mapvar].loc[month].unstack('lonbins'),
plottype='mesh',
# levels=100,
vmin=X_full.loc[:,mapvar].loc[1,extent[2]-3:extent[3],extent[0]:extent[1]].min(),
vmax=X_full.loc[:,mapvar].loc[1,extent[2]-3:extent[3],extent[0]:extent[1]].max(),
cmap=cmocean.cm.thermal,
extent=extent)
var = 'SSHA'
h0 = ax4.contour(X_full.loc[:,var].loc[month].unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].columns.values,
X_full.loc[:,var].loc[month].unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].index.values,
X_full.loc[:,var].loc[month].unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].values,
levels=[-0.25,-0.2,-0.15,-0.1,-0.05,0.05,0.1,0.15,0.2,0.25],
# levels=15,
colors='w',
linewidths=0.5,
transform=ccrs.PlateCarree())
ax4.clabel(h0, fontsize=10)
# mark the inset box
ax4.add_patch(mpl.patches.Rectangle(xy=(newextent[0], newextent[2]), width=newextent[1]-newextent[0], height=newextent[3]-newextent[2],
ec='r',
linestyle='-',
lw=4,
fill=False,
alpha=1,
zorder=1000,
transform=ccrs.PlateCarree()))
divider = make_axes_locatable(ax4)
ax_cb = divider.new_horizontal(size="5%", pad=0.2, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h3, cax=ax_cb, extend='both')
cb1.ax.tick_params(labelsize=20)
cb1.set_label(r'chl-a (mg m$^{-3}$)')
# Plot PF and SAF fronts
h0 = ax4.plot(front_data.LonPF.values,
front_data.LatPF.values,
'r-',
linewidth=3,
transform=ccrs.PlateCarree())
h0 = ax4.plot(front_data.LonSAF.values,
front_data.LatSAF.values,
'w-',
linewidth=3,
transform=ccrs.PlateCarree())
# Contour topography
h01 = ax4.contour(etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].columns.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].index.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].values,
levels=[-1500],
colors='w',
linestyles='dashed',
linewidths=2,
transform=ccrs.PlateCarree())
for c in h01.collections:
c.set_dashes([(0, (2.0, 5.0))])
ax4.text(-0.05,1,'$\mathbf{c}$',fontsize=label_size, transform=ax4.transAxes, zorder=500)
#------------------------------------------------------------------------------
#### Plot CDOM over plateau
h2, ax5, _ = South_1ax_flat_map(ax=ax5,
data=X_full.loc[month,'CDOM'].unstack('lonbins'),
plottype='mesh',
cmap=cmocean.cm.thermal,
vmin=np.nanmin(X_full.loc[month,'CDOM'].loc[newextent[2]:newextent[3],newextent[0]:newextent[1]]),
vmax=np.nanmax(X_full.loc[month,'CDOM'].loc[newextent[2]:newextent[3],newextent[0]:newextent[1]]),
extent=newextent
)
divider = make_axes_locatable(ax5)
ax_cb = divider.new_horizontal(size="5%", pad=0.2, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h2, cax=ax_cb, extend='both')
cb1.ax.tick_params(labelsize=20)
cb1.set_label(r'a$_{443}$ (m$^{-1}$)')
# Plot PF and SAF fronts
h0 = ax5.plot(front_data.LonPF.values,
front_data.LatPF.values,
'r-',
linewidth=3,
transform=ccrs.PlateCarree())
h0 = ax5.plot(front_data.LonSAF.values,
front_data.LatSAF.values,
'w-',
linewidth=3,
transform=ccrs.PlateCarree())
# Contour topography
h01 = ax5.contour(etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].columns.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].index.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].values,
levels=[-1500],
colors='w',
linestyles='dashed',
linewidths=2,
transform=ccrs.PlateCarree())
for c in h01.collections:
c.set_dashes([(0, (2.0, 5.0))])
ax5.text(-0.1,1,'$\mathbf{d}$',fontsize=label_size,transform=ax5.transAxes, zorder=500)
#------------------------------------------------------------------------------
#### Plot SSN over plateau
h2, ax6, gl = South_1ax_flat_map(ax=ax6,
data=X_full.loc[month,'SSN'].unstack('lonbins'),
plottype='mesh',
cmap=cmocean.cm.thermal,
vmin=np.nanmin(X_full.loc[month,'SSN'].loc[newextent[2]:newextent[3],newextent[0]:newextent[1]]),
vmax=np.nanmax(X_full.loc[month,'SSN'].loc[newextent[2]:newextent[3],newextent[0]:newextent[1]]),
extent=newextent
)
gl.left_labels = False
divider = make_axes_locatable(ax6)
ax_cb = divider.new_horizontal(size="5%", pad=0.2, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb1 = plt.colorbar(h2, cax=ax_cb, extend='both')
cb1.ax.tick_params(labelsize=20)
cb1.set_label(r'SSN ($\mathrm{\mu}$mol $\mathrm{kg^{-1}}$)')
# Plot PF and SAF fronts
h0 = ax6.plot(front_data.LonPF.values,
front_data.LatPF.values,
'r-',
linewidth=3,
transform=ccrs.PlateCarree())
h0 = ax6.plot(front_data.LonSAF.values,
front_data.LatSAF.values,
'w-',
linewidth=3,
transform=ccrs.PlateCarree())
# Contour topography
h01 = ax6.contour(etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].columns.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].index.values,
etopo.unstack('lonbins').loc[extent[2]-3:extent[3],extent[0]:extent[1]].values,
levels=[-1500],
colors='w',
linestyles='dashed',
linewidths=2,
transform=ccrs.PlateCarree())
for c in h01.collections:
c.set_dashes([(0, (2.0, 5.0))])
ax5.text(-0.1,1,'$\mathbf{e}$',fontsize=label_size,transform=ax6.transAxes, zorder=500)
fig.subplots_adjust(hspace=0.3)
#%% Spatial Correlations
# Use spearman rank to assess non-linear relationships
X_full_plus = X_full.copy()
X_full_plus['Si_star'] = Si_star
X_plus = X.copy()
X_plus['Si_star'] = (X.loc[:,'Si']-X.loc[:,'SSN']).squeeze()
#-----------------------------------------------------------------------------
#### RFR correlations
rhos = []
pvals = []
names = X_full_plus.columns.values
for i in range(len(X_full_plus.columns.values)):
rho, pval = spearmanr(X_full_plus.iloc[:,i].values,np.sinh(RFR_y_pred.values))
rhos.append(rho)
pvals.append(pval)
RFR_correlations = pd.concat([pd.Series(names),pd.Series(rhos),pd.Series(np.array(rhos)**2),pd.Series(pvals)], axis=1)
RFR_correlations.columns = ['Variable','rho','R2','p-value']
RFR_correlations.set_index('Variable', inplace=True)
# RFR_correlations = RFR_correlations.reindex(RFR_correlations.rho.abs().sort_values(ascending=False).index)
#-----------------------------------------------------------------------------
#### ANN correlations
rhos = []
pvals = []
names = X_full_plus.columns.values
for i in range(len(X_full_plus.columns.values)):
rho, pval = spearmanr(X_full_plus.iloc[:,i].values,np.sinh(ANN_y_pred.values))
rhos.append(rho)
pvals.append(pval)
ANN_correlations = pd.concat([pd.Series(names),pd.Series(rhos),pd.Series(np.array(rhos)**2),pd.Series(pvals)], axis=1)
ANN_correlations.columns = ['Variable','rho','R2','p-value']
ANN_correlations.set_index('Variable', inplace=True)
# ANN_correlations = ANN_correlations.reindex(ANN_correlations.rho.abs().sort_values(ascending=False).index)
#------------------------------------------------------------------------------
#### Raw Data
rhos = []
pvals = []
names = X_plus.columns.values
for i in range(len(X_plus.columns.values)):
rho, pval = spearmanr(X_plus.iloc[:,i].values,np.sinh(y.values))
rhos.append(rho)
pvals.append(pval)
correlations = pd.concat([pd.Series(names),pd.Series(rhos),pd.Series(np.array(rhos)**2),pd.Series(pvals)], axis=1)
correlations.columns = ['Variable','rho','R2','p-value']
correlations.set_index('Variable', inplace=True)
# correlations = correlations.reindex(correlations.rho.abs().sort_values(ascending=False).index)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
print(RFR_correlations)
print(ANN_correlations)
print(correlations)
#------------------------------------------------------------------------------
corr_mat = pd.concat([correlations['rho'],RFR_correlations['rho'],ANN_correlations['rho']],axis=1)
corr_mat.columns=['Obs.','RFR','ANN']
corr_mat=corr_mat.T
#### Plot heatmap
fig = plt.figure(figsize=(24,24))
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 22}
mpl.rc('font', **font)
ax = fig.add_subplot(111)
# Set up diverging colormaps...
# colors1 = plt.cm.gist_heat(np.linspace(0, 1, 128))
# colors2 = plt.cm.gist_heat_r(np.linspace(0, 1, 128))
colors1 = plt.cm.Blues_r(np.linspace(0, 1, 128))
colors2 = plt.cm.Reds(np.linspace(0, 1, 128))
# ...combine them and build a new colormap
colors = np.vstack((colors1, colors2))
cmap = mpl.colors.LinearSegmentedColormap.from_list('my_colormap', colors)
# normalize cmap to diverge at 0
norm = mpl.colors.TwoSlopeNorm(vmin=-0.4, vcenter=0, vmax=0.4)
# plot data
im = ax.imshow(corr_mat, cmap=cmap, norm=norm)
# We want to show all ticks...
ax.set_xticks(np.arange(len(corr_mat.columns.values)))
ax.set_yticks(np.arange(len(corr_mat.index.values)))
# ... and label them with the respective list entries
ax.set_xticklabels(corr_mat.columns.values)
ax.set_yticklabels(corr_mat.index.values)
# Rotate the tick labels and set their alignment
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations
for i in range(len(corr_mat.index.values)):
for j in range(len(corr_mat.columns.values)):
if corr_mat.iloc[i, j]<0.1 and corr_mat.iloc[i, j]>-0.1:
text = ax.text(j, i, round(corr_mat.iloc[i, j], ndigits=2),
ha="center", va="center", color="k")
else:
text = ax.text(j, i, round(corr_mat.iloc[i, j], ndigits=2),
ha="center", va="center", color="w")
ax.set_title(r'Spearman Rank Correlations ($\rho$)')
# plt.colorbar(im, ax=ax, shrink=0.2)
divider = make_axes_locatable(ax)
ax_cb2 = divider.new_horizontal(size="3%", pad=0.2, axes_class=plt.Axes)
fig.add_axes(ax_cb2)
cb2 = plt.colorbar(im, cax=ax_cb2)
cb2.ax.tick_params(labelsize=20)
cb2.set_label(r'$\rho$')
# fig.savefig(save_to_path+str('heatmap.png'), dpi=500, transparent=True, bbox_inches='tight')
|
"""Invoice views"""
# Django REST Framework
from rest_framework import mixins, viewsets, status
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
# Serializers
from bugal.invoices.serializers import InvoiceModelSerializer
# Models
from bugal.base.models import Contact, Invoice
# Permissions
from ..permissions import IsAccountOwner
class InvoiceViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet
):
"""Client view set."""
queryset = Invoice.objects.all()
serializer_class = InvoiceModelSerializer
permission_classes = (IsAuthenticated, IsAccountOwner)
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
def get_queryset(self):
"""Return objects for the current authenticated user only"""
queryset = Invoice.objects.filter(user=self.request.user)
return queryset
def retrieve(self, request, *args, **kwargs):
"""Add extra data to the response."""
instance = Contact.objects.filter(user=self.request.user, id=int(kwargs['pk'])).first()
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
return Response(serializer.data)
def perform_create(self, serializer):
"""Create a new Client."""
serializer.save(user=self.request.user)
def partial_update(self, request, *args, **kwargs):
"""Update Invoice"""
instance = Invoice.objects.filter(user=self.request.user, id=int(kwargs['pk'])).first()
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
print("\nInstance: {0}\n".format(instance))
print("\nValid: {0}\n".format(serializer.is_valid))
serializer.save()
return Response({'success': True}, status=status.HTTP_200_OK)
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
This file contains indexing suite v2 code
"""
file_name = "indexing_suite/element_proxy_traits.hpp"
code = """// Copyright (c) 2003 Raoul M. Gough
//
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy
// at http://www.boost.org/LICENSE_1_0.txt)
//
// Header file element_proxy_traits.hpp
//
// Note: element_proxy.hpp must be included before this header
//
// This is a separate header so that element_proxy.hpp is not
// dependant on register_ptr_to_python.hpp. This avoids a problem with
// two-phase name lookup, where register_ptr_to_python must be
// included *after* the element_proxy overload of boost::get_pointer
// is declared.
//
// History
// =======
// 2003/10/23 rmg File creation
// 2008/12/08 Roman Change indexing suite layout
//
// $Id: element_proxy_traits.hpp,v 1.1.2.5 2003/12/05 17:36:14 raoulgough Exp $
//
#ifndef BOOST_PYTHON_INDEXING_ELEMENT_PROXY_TRAITS_HPP
#define BOOST_PYTHON_INDEXING_ELEMENT_PROXY_TRAITS_HPP
#include <indexing_suite/element_proxy.hpp>
#include <indexing_suite/value_traits.hpp>
#include <boost/python/register_ptr_to_python.hpp>
#include <boost/python/implicit.hpp>
namespace boost { namespace python { namespace indexing {
template<typename ContainerProxy>
struct element_proxy_traits
: public value_traits<
BOOST_DEDUCED_TYPENAME ContainerProxy::raw_value_type>
{
typedef element_proxy<ContainerProxy> element_proxy_type;
typedef typename ContainerProxy::raw_value_type raw_value_type;
typedef value_traits<raw_value_type> base_type;
// Wrap the base class versions of the comparisons using
// indirection
struct less
: std::binary_function<element_proxy_type, element_proxy_type, bool>
{
typename base_type::less m_base_compare;
bool operator()(
element_proxy_type const &p1, element_proxy_type const &p2) const
{
return m_base_compare (*p1, *p2);
}
};
struct equal_to
: std::binary_function<raw_value_type, element_proxy_type, bool>
{
// First param is raw_value_type to interface smoothly with the
// bind1st used in default_algorithms::find
typename base_type::equal_to m_base_compare;
bool operator()(
raw_value_type const &v, element_proxy_type const &p) const
{
return m_base_compare (v, *p);
}
};
template<typename PythonClass, typename Policy>
static void visit_container_class (PythonClass &, Policy const &)
{
register_ptr_to_python<element_proxy_type>();
implicitly_convertible<raw_value_type, element_proxy_type>();
}
};
#if !defined (BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
// value_traits partial specialization for element_proxy instances
template<typename ContainerProxy>
struct value_traits<element_proxy<ContainerProxy> >
: element_proxy_traits<ContainerProxy>
{
};
#endif
} } }
#endif // BOOST_PYTHON_INDEXING_ELEMENT_PROXY_TRAITS_HPP
"""
|
"""
__name__ = model.py
__author__ = Yash Patel
__description__ = Defines model to be trained on the Cartpole data,
predicting the directioal action to take given 4D observation state
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout
def create_model():
model = Sequential()
model.add(Dense(128, input_shape=(4,), activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(2, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
return model
if __name__ == "__main__":
model = create_model()
|
import sqlite3
class Database:
"""
DB connection and methods
"""
base_recipe_query = 'SELECT * FROM RECIPE'
def __init__(self, connection_str):
self.connection_str = connection_str
def connect(self):
self.connection = sqlite3.connect(self.connection_str)
return self.connection
def disconnect(self):
return self.connection.close()
def get_recipes(self):
return self.connection.execute(self.base_recipe_query).fetchall()
def get_recipe(self, recipe_name):
return self.connection.execute(self.base_recipe_query + ' WHERE name = "' + recipe_name + '"').fetchone()
|
# CORES NO TERMINAL
print("\033[1;30m---------SISTEMA ANSI DE CORES---------\033[m")
print(" ")
print("Código de abertura: Código de fechamento:")
print("\033[1;30m\ 0 3 3 [0;30;40m + STRING + \ 0 3 3 [m\033[m")
print(" ")
print("Preenchimento do código de abertura:")
print("\033[1;30m\ 0 3 3 [ ESTILO ; COR DO TEXTO ; COR DE FUNDO m\033[m")
print("\033[0;30mS/FORMATAÇÃO - 0 ; BRANCO - 30 ; \033[1;37;40m BRANCO - 40\033[m")
print("\033[1;31m NEGRITO - 1 ; VERMELHO - 31 ; \033[1;37;41m VERMELHO - 41\033[m")
print("\033[4;32m SUBLINHADO - 4 ; VERDE - 32 ; \033[1;37;42m VERDE - 42\033[m")
print("\033[7;33m NEGATIVO -7 ;\033[m" + "\033[0;33m AMARELO - 33\033[m")
print("\033[1;34;m ; AZUL - 34 ; \033[1;30;44 AZUL - 44\033[m")
print("\033[0;30mS/FORMATAÇÃO - 0 ; AZUL - 30 ; \033[1;37;40m BRANCO - 40\033[m")
print("\033[0;30mS/FORMATAÇÃO - 0 ; BRANCO - 30 ; \033[1;37;40m BRANCO - 40\033[m")
print("\033[0;30mS/FORMATAÇÃO - 0 ; BRANCO - 30 ; \033[1;37;40m BRANCO - 40\033[m")
|
#!/usr/bin/python3
import ctypes
import os,sys,copy,math
this_path = os.path.dirname(os.path.realpath(__file__))
assert(0==os.system('cd "%s" && make default' % this_path))
fds_lib = None
libnames = ['fds_x86_64.so','fds_x86.so']
while libnames:
so = libnames.pop()
try:
fds_lib = ctypes.CDLL(os.path.join(this_path,so))
break
except OSError: # load failed
if not libnames:
raise
# Errors from fds.h
errtab = {}
for errno, (name, desc) in enumerate(
[("FDS_SUCCESS", "The operation completed successfully."),
("FDS_ERR_OPERATION_TIMEOUT", "The operation timed out."),
("FDS_ERR_NOT_INITIALIZED", "The module has not been initialized."),
("FDS_ERR_UNALIGNED_ADDR", "The input data is not aligned to a word boundary."),
("FDS_ERR_INVALID_ARG", "The parameter contains invalid data."),
("FDS_ERR_NULL_ARG", "The parameter is NULL."),
("FDS_ERR_NO_OPEN_RECORDS", "The record is not open, so it cannot be closed."),
("FDS_ERR_NO_SPACE_IN_FLASH", "There is no space in flash memory."),
("FDS_ERR_NO_SPACE_IN_QUEUES", "There is no space in the internal queues."),
("FDS_ERR_RECORD_TOO_LARGE", "The record exceeds the maximum allowed size."),
("FDS_ERR_NOT_FOUND", "The record was not found."),
("FDS_ERR_NO_PAGES", "No flash pages are available."),
("FDS_ERR_USER_LIMIT_REACHED", "The maximum number of users has been reached."),
("FDS_ERR_CRC_CHECK_FAILED", "The CRC check failed."),
("FDS_ERR_BUSY", "The underlying flash subsystem was busy."),
("FDS_ERR_INTERNAL", "An internal error occurred."),
]):
locals()[name]=errno
errtab[errno] = name, desc
def crc16_compute(data):
dbuf = ctypes.create_string_buffer(data, len(data))
ret = fds_lib.crc16_compute(dbuf, len(data), 0)
return int(ret)
class FDSException(Exception):
def __init__(self, errno):
try:
name, desc = errtab[errno]
except KeyError:
name = "FDS_UNKNOWN_%d"%errno
desc = "Unknown error #%d"%errno
self.args = errno,name,desc
class Fds(object):
def __init__(self, image=None):
size = fds_lib.api_fs_size()
if image:
if len(image) != size:
raise Exception("Image must be exactly %d bytes."%size)
else:
image = b'\xff'*size
self.mount(image)
def mount(self, image):
"""Supply an image (as bytes) to mount as fds filesystem.
All bytes = 255 for a new filesystem"""
size = fds_lib.api_fs_size()
assert len(image) == size
self.im = ctypes.create_string_buffer(image, size)
fds_lib.api_fds_mount.restype = ctypes.c_int
result = fds_lib.api_fds_mount(self.im)
if result:
raise FDSException(result)
def unmount(self):
pass
def dir(self):
"""Gets a list of record_ids, not too meaningful by themselves.
See read_all() for a more useful function."""
entries=[]
def collect_entry(record_id):
entries.append(int(record_id))
EntryCallback = ctypes.CFUNCTYPE(None,
ctypes.c_uint32, # record_id
)
entry_cb = EntryCallback(collect_entry)
result = fds_lib.api_fds_dir(entry_cb)
if result:
raise FDSException(result)
return entries
def write_record(self, record_key, file_id, data):
pad_len = (4-len(data)%4)%4
data += b'\0'*pad_len
assert 0 == len(data)%4
assert 0 <= record_key < 0x10000
assert 0 <= file_id < 0x10000
result = fds_lib.api_write_record(record_key,
file_id,
data,
len(data)//4)
if result:
raise FDSException(result)
def update_record(self, record_id, data):
"Replaces a record by creating a new one and deleting the old one"
pad_len = (4-len(data)%4)%4
data += b'\0'*pad_len
assert 0 == len(data)%4
assert 0 <= record_id < 0x100000000
result = fds_lib.api_update_record(record_id,
data,
len(data)//4)
if result:
raise FDSException(result)
def read_record(self, record_id):
"Reads an individual record given the record_id"
key = ctypes.c_uint16()
file_id = ctypes.c_uint16()
data = ctypes.POINTER(ctypes.c_uint8)()
data_len_words = ctypes.c_int()
result = fds_lib.api_get_record(record_id,
ctypes.byref(file_id),
ctypes.byref(key),
ctypes.byref(data_len_words),
ctypes.byref(data))
if result:
raise FDSException(result)
file_id = int(file_id.value)
key = int(key.value)
data_len_words = int(data_len_words.value)
data = bytes(data[i] for i in range(4*data_len_words))
return file_id,key,data
def read_all(self):
"Returns a list-of-dicts of all records"
ret = []
for record_id in self.dir():
file_id,key,data = self.read_record(record_id)
ret.append({'file_id':file_id,
'key':key,
'record_id':record_id,
'data':data})
return ret
def delete_record(self, record_id):
"Removes a single record."
result = fds_lib.api_del_record(record_id)
if result:
raise FDSException(result)
def delete_file(self, file_id):
"Marks all records belonging to file as deleted."
result = fds_lib.api_del_file(file_id)
if result:
raise FDSException(result)
def gc(self):
"Garbage collect."
result = fds_lib.api_gc()
if result:
raise FDSException(result)
@property
def contents(self):
return bytes(self.im)
def hd(self):
"Prints a hexdump of the image to stdout using 'hd'"
with open('/tmp/image.bin','wb') as fd:
fd.write(self.contents)
os.system('hd < /tmp/image.bin | tee /tmp/hexdump')
return open('/tmp/hexdump').read()
def _tests(fds_mount):
s = fds_mount
ids = s.dir()
assert len(ids)==0
s.write_record(file_id=6,
record_key=100,
data=b"Hello World.")
ids = s.dir()
assert len(ids)==1
fid,key,data = s.read_record(ids[0])
assert fid==6
assert key==100
assert data==b"Hello World."
s.write_record(file_id=6,
record_key=100,
data=b"Hello World2.")
ids = s.dir()
print(ids)
assert len(ids)==2
s.delete_record(ids[-1])
ids = s.dir()
assert len(ids)==1
k = 0
while 1:
try:
s.write_record(record_key=100,
file_id=6,
data=("Hello World %d."%k).encode())
except FDSException as e:
if e.args[0] == FDS_ERR_NO_SPACE_IN_FLASH:
break
else:
raise
k += 1
assert len(s.dir())==1+k
s.gc()
assert len(s.dir())==1+k
s.delete_file(6)
assert len(s.dir())==0
try:
s.write_record(file_id=8,
record_key=1234,
data=b"This data won't fit."*8)
except FDSException as e:
assert e.args[0] == FDS_ERR_NO_SPACE_IN_FLASH
else:
assert False # didn't get the exception we wanted
s.gc() # now records will fit
s.write_record(file_id=66,
record_key=234,
data=b"This is the first data.")
ids = s.dir()
assert len(ids)==1
s.update_record(ids[0],
data=b"This is the second data.")
ids = s.dir()
assert len(ids)==1
fid,key,data = s.read_record(ids[0])
assert fid==66
assert key==234
assert data==b"This is the second data."
s.write_record(file_id=66,
record_key=234,
data=b"This is the third data.")
all_data = s.read_all()
assert len(all_data)==2
assert all_data[0]['file_id']==66
assert all_data[0]['key']==234
assert all_data[0]['data']==b"This is the second data."
s.hd()
if __name__=="__main__":
fs = Fds()
_tests(fs)
|
def align_tokens(tokens, text):
point, spans = 0, []
for token in tokens:
start = text.find(token, point)
if start < 0:
raise ValueError(f'substring "{token}" not found in "{text}"')
end = start + len(token)
spans.append((start, end))
point = end
return spans
|
import os
import sqlite3
from flask import Flask, render_template, g, request
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_nav.elements import Navbar, View
from forms import DangersForm, DiceRollForm, TravelForm
from functions import fetch_animals_and_monsters, dice_roll
import odatasfunctions as of
app = Flask(__name__)
bootstrap = Bootstrap(app)
nav = Nav()
nav.init_app(app)
secret_key = os.environ.get('FLASK_KEY')
app.secret_key = secret_key if secret_key else 'development_key'
db_path = '%s/odatastools.db' % os.path.dirname(os.path.realpath(__file__))
@nav.navigation()
def get_navbar():
return Navbar('DSA5 MeisterTools',
View('Home', 'index'),
View('Reisehelfer','travel'),
View('Wildtiere und Ungeheuer', 'dangers'),
View('Würfeln', 'dice'))
def get_database():
db = getattr(g, '_database', None)
if not db:
g._database = sqlite3.connect(db_path)
db = g._database
return db
@app.route('/')
def index():
return render_template('index.html')
class TravelComputationResult(object):
def __init__(self, travel_days, trans_cost, trans_method_cost, food, water):
self.travel_days = int(travel_days)
self.transport_cost = of.Geldrechner(trans_cost, 'Heller')
self.food_cost = of.Geldrechner(food, 'Heller')
self.total_cost = of.Geldrechner(trans_cost + food, 'Heller')
qs_multipliers = [1.3, 1, 0.95, 0.92, 0.9, 0.89, 0.87, 0.8]
row_labels = ['Fehlschlag', 'QS 1', 'QS 2', 'QS 3', 'QS 4', 'QS 5', 'QS 6']
self.haggling_table = []
for label, qs_multiplier in zip(row_labels, qs_multipliers):
col = [label]
for cost in [food] + list(trans_method_cost):
c = of.GeldrechnerKurz(qs_multiplier * cost, 'Heller')
col.append(c)
self.haggling_table.append(col)
@app.route('/travel', methods=['GET', 'POST'])
def travel():
form = TravelForm()
if request.method == 'GET':
form.group_size.data = form.group_size_default
form.dist_foot.data = form.dist_default
form.dist_boat.data = form.dist_default
form.dist_carriage.data = form.dist_default
form.dist_sea_hammock.data = form.dist_default
form.dist_sea_cabin.data = form.dist_default
form.dist_horse.data = form.dist_default
form.travel_conditions.data = form.travel_conditions_default
form.computation_methods.data = form.computation_methods_default
travel_cost_result = None
if form.validate_on_submit():
group_size = form.group_size.data
dist_foot = form.dist_foot.data
dist_boat = form.dist_boat.data
dist_carriage = form.dist_carriage.data
dist_sea_hammock = form.dist_sea_hammock.data
dist_sea_cabin = form.dist_sea_cabin.data
dist_horse = form.dist_horse.data
travel_conditions = form.travel_conditions.data
computation_methods = form.computation_methods.data
simulation = computation_methods == 'simulation'
distances = (dist_horse, dist_foot, dist_boat, dist_carriage,
dist_sea_hammock, dist_sea_cabin)
travel_days, trans_cost, trans_method_cost = of.reisedauerrechnung(distances,
group_size,
travel_conditions,
simulation)
food, water = of.berechne_nahrungsbedarf(travel_days, group_size,
travel_conditions, simulation)
travel_cost_result = TravelComputationResult(travel_days, trans_cost, trans_method_cost, food, water)
return render_template('travel.html', form=form, travel_cost_result=travel_cost_result)
@app.route('/dangers', methods=['GET', 'POST'])
def dangers():
form = DangersForm()
region = form.select_region_choices[0][0]
if form.validate_on_submit():
region = form.region.data
db_conn = get_database()
animal_rows, monster_rows = fetch_animals_and_monsters(db_conn,
form.query_names[region])
return render_template('dangers.html', form=form, animal_rows=animal_rows,
monster_rows=monster_rows)
@app.route('/dice', methods=['GET', 'POST'])
def dice():
default_value = 13
form = DiceRollForm()
if request.method == 'GET':
form.attribute1.data = form.attribute_default
form.attribute2.data = form.attribute_default
form.attribute3.data = form.attribute_default
form.skill.data = form.skill_default
form.mod.data = form.mod_defaut
if form.validate_on_submit():
att1 = form.attribute1.data
att2 = form.attribute2.data
att3 = form.attribute3.data
skill = form.skill.data
mod = form.mod.data
rolls, result_str = dice_roll(att1, att2, att3, skill, mod)
return render_template('dice.html', form=form, rolls=rolls,
result_str=result_str)
return render_template('dice.html', form=form)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
import sys
sys.path.append('..')
import torch
from model.encoder import swin_transformer,simplenet,trans_plus_conv,resnet
def build_encoder(arch='resnet18', weights=None, **kwargs):
arch = arch.lower()
if arch.startswith('resnet'):
backbone = resnet.__dict__[arch](**kwargs)
elif arch.startswith('swin_transformer'):
backbone = swin_transformer.__dict__[arch](**kwargs)
elif arch.startswith('simplenet'):
backbone = simplenet.__dict__[arch](**kwargs)
elif arch.startswith('swinplus'):
backbone = trans_plus_conv.__dict__[arch](**kwargs)
else:
raise Exception('Architecture undefined!')
if weights is not None and isinstance(weights, str):
print('Loading weights for backbone')
backbone.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return backbone
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# net = build_encoder('swin_transformer',n_channels=1)
# net = build_encoder('resnet18',n_channels=1)
net = build_encoder('swinplusr18',n_channels=1)
# net = build_encoder('simplenet',n_channels=1)
net = net.cuda()
net.train()
input = torch.randn((1,1,512,512)).cuda()
output = net(input)
for item in output:
print(item.size())
import sys
sys.path.append('..')
from utils import count_params_and_macs
count_params_and_macs(net.cuda(),(1,1,512,512))
|
#!/usr/bin/env python
import json
import yaml
import urllib
import os
import sys
from jsonref import JsonRef # type: ignore
import click
from openapi2jsonschema.log import info, debug, error
from openapi2jsonschema.util import (
additional_properties,
replace_int_or_string,
allow_null_optional_fields,
change_dict_values,
append_no_duplicates,
)
from openapi2jsonschema.errors import UnsupportedError
@click.command()
@click.option(
"-o",
"--output",
default="schemas",
metavar="PATH",
help="Directory to store schema files",
)
@click.option(
"-p",
"--prefix",
default="_definitions.json",
help="Prefix for JSON references (only for OpenAPI versions before 3.0)",
)
@click.option(
"--stand-alone", is_flag=True, help="Whether or not to de-reference JSON schemas"
)
@click.option(
"--expanded", is_flag=True, help="Expand Kubernetes schemas by API version"
)
@click.option(
"--kubernetes", is_flag=True, help="Enable Kubernetes specific processors"
)
@click.option(
"--no-all", is_flag=True, help="Do not generate all.json file"
)
@click.option(
"--strict",
is_flag=True,
help="Prohibits properties not in the schema (additionalProperties: false)",
)
@click.argument("schema", metavar="SCHEMA_URL")
def default(output, schema, prefix, stand_alone, expanded, kubernetes, no_all, strict):
"""
Converts a valid OpenAPI specification into a set of JSON Schema files
"""
info("Downloading schema")
if sys.version_info < (3, 0):
response = urllib.urlopen(schema)
else:
if os.path.isfile(schema):
schema = "file://" + os.path.realpath(schema)
req = urllib.request.Request(schema)
response = urllib.request.urlopen(req)
info("Parsing schema")
# Note that JSON is valid YAML, so we can use the YAML parser whether
# the schema is stored in JSON or YAML
data = yaml.load(response.read(), Loader=yaml.SafeLoader)
if "swagger" in data:
version = data["swagger"]
elif "openapi" in data:
version = data["openapi"]
if not os.path.exists(output):
os.makedirs(output)
if version < "3":
with open("%s/_definitions.json" % output, "w") as definitions_file:
info("Generating shared definitions")
definitions = data["definitions"]
if kubernetes:
definitions["io.k8s.apimachinery.pkg.util.intstr.IntOrString"] = {
"oneOf": [{"type": "string"}, {"type": "integer"}]
}
# Although the kubernetes api does not allow `number` as valid
# Quantity type - almost all kubenetes tooling
# recognizes it is valid. For this reason, we extend the API definition to
# allow `number` values.
definitions["io.k8s.apimachinery.pkg.api.resource.Quantity"] = {
"oneOf": [{"type": "string"}, {"type": "number"}]
}
# For Kubernetes, populate `apiVersion` and `kind` properties from `x-kubernetes-group-version-kind`
for type_name in definitions:
type_def = definitions[type_name]
if "x-kubernetes-group-version-kind" in type_def:
for kube_ext in type_def["x-kubernetes-group-version-kind"]:
if expanded and "apiVersion" in type_def["properties"]:
api_version = (
kube_ext["group"] + "/" +
kube_ext["version"]
if kube_ext["group"]
else kube_ext["version"]
)
append_no_duplicates(
type_def["properties"]["apiVersion"],
"enum",
api_version,
)
if "kind" in type_def["properties"]:
kind = kube_ext["kind"]
append_no_duplicates(
type_def["properties"]["kind"], "enum", kind
)
if strict:
definitions = additional_properties(definitions)
definitions_file.write(json.dumps(
{"definitions": definitions}, indent=2))
types = []
info("Generating individual schemas")
if version < "3":
components = data["definitions"]
else:
components = data["components"]["schemas"]
generated_files = []
for title in components:
kind = title.split(".")[-1].lower()
if kubernetes:
group = title.split(".")[-3].lower()
api_version = title.split(".")[-2].lower()
specification = components[title]
specification["$schema"] = "http://json-schema.org/schema#"
specification.setdefault("type", "object")
if strict:
specification["additionalProperties"] = False
if kubernetes and expanded:
if group in ["core", "api"]:
full_name = "%s-%s" % (kind, api_version)
else:
full_name = "%s-%s-%s" % (kind, group, api_version)
else:
full_name = kind
types.append(title)
try:
debug("Processing %s" % full_name)
# These APIs are all deprecated
if kubernetes:
if title.split(".")[3] == "pkg" and title.split(".")[2] == "kubernetes":
raise UnsupportedError(
"%s not currently supported, due to use of pkg namespace"
% title
)
# This list of Kubernetes types carry around jsonschema for Kubernetes and don't
# currently work with openapi2jsonschema
if (
kubernetes
and stand_alone
and kind
in [
"jsonschemaprops",
"jsonschemapropsorarray",
"customresourcevalidation",
"customresourcedefinition",
"customresourcedefinitionspec",
"customresourcedefinitionlist",
"customresourcedefinitionspec",
"jsonschemapropsorstringarray",
"jsonschemapropsorbool",
]
):
raise UnsupportedError("%s not currently supported" % kind)
updated = change_dict_values(specification, prefix, version)
specification = updated
if stand_alone:
# Put generated file on list for dereferencig $ref elements
# after all files will be generated
generated_files.append(full_name)
if "additionalProperties" in specification:
if specification["additionalProperties"]:
updated = change_dict_values(
specification["additionalProperties"], prefix, version
)
specification["additionalProperties"] = updated
if strict and "properties" in specification:
updated = additional_properties(specification["properties"])
specification["properties"] = updated
if kubernetes and "properties" in specification:
updated = replace_int_or_string(specification["properties"])
updated = allow_null_optional_fields(updated)
specification["properties"] = updated
with open("%s/%s.json" % (output, full_name), "w") as schema_file:
debug("Generating %s.json" % full_name)
schema_file.write(json.dumps(specification, indent=2))
except Exception as e:
error("An error occured processing %s: %s" % (kind, e))
if stand_alone:
base = "file://%s/%s/" % (os.getcwd(), output)
for file_name in generated_files:
full_path = "%s/%s.json" % (output, file_name)
specification = json.load(open(full_path))
specification = JsonRef.replace_refs(
specification, base_uri=base)
with open(full_path, "w") as schema_file:
schema_file.write(json.dumps(specification, indent=2))
if not no_all:
with open("%s/all.json" % output, "w") as all_file:
info("Generating schema for all types")
contents = {"oneOf": []}
for title in types:
if version < "3":
contents["oneOf"].append(
{"$ref": "%s#/definitions/%s" % (prefix, title)}
)
else:
contents["oneOf"].append(
{"$ref": (title.replace("#/components/schemas/", "") + ".json")}
)
all_file.write(json.dumps(contents, indent=2))
if __name__ == "__main__":
default()
|
import os.path
import numpy
from scipy.spatial import Delaunay
import meshio
from meshplex import MeshTri
def simple0():
#
# 3___________2
# |\_ 2 _/|
# | \_ _/ |
# | 3 \4/ 1 |
# | _/ \_ |
# | _/ \_ |
# |/ 0 \|
# 0-----------1
#
X = numpy.array(
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.5, 0.0],
]
)
cells = numpy.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]])
return X, cells
def simple1():
#
# 3___________2
# |\_ 2 _/|
# | \_ _/ |
# | 3 \4/ 1 |
# | _/ \_ |
# | _/ \_ |
# |/ 0 \|
# 0-----------1
#
X = numpy.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.4, 0.5]])
cells = numpy.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]])
return X, cells
def simple2():
#
# 3___________2
# |\_ 3 _/ \_
# | \_ _/ 2 \_
# | 4 \4/_________\5
# | _/ \_ _/
# | _/ \_ 1 _/
# |/ 0 \ /
# 0-----------1
#
X = numpy.array(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.7, 0.5], [1.7, 0.5]]
)
cells = numpy.array([[0, 1, 4], [1, 5, 4], [2, 4, 5], [2, 3, 4], [3, 0, 4]])
return X, cells
def simple3():
#
# 5___________4___________3
# |\_ 6 _/ \_ 4 _/|
# | \_ _/ 5 \_ _/ |
# | 7 \6/_________\7/ 3 |
# | _/ \_ _/ \_ |
# | _/ \_ 1 _/ 2 \_ |
# |/ 0 \ / \|
# 0-----------1-----------2
#
X = numpy.array(
[
[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[2.0, 1.0],
[1.0, 1.0],
[0.0, 1.0],
[0.7, 0.5],
[1.7, 0.5],
]
)
cells = numpy.array(
[
[0, 1, 6],
[1, 7, 6],
[1, 2, 7],
[2, 3, 7],
[3, 4, 7],
[4, 6, 7],
[4, 5, 6],
[5, 0, 6],
]
)
return X, cells
def pacman():
this_dir = os.path.dirname(os.path.realpath(__file__))
mesh = meshio.read(os.path.join(this_dir, "meshes", "pacman.vtk"))
return mesh.points[:, :2], mesh.cells["triangle"]
def circle_gmsh():
this_dir = os.path.dirname(os.path.realpath(__file__))
mesh = meshio.read(os.path.join(this_dir, "meshes", "circle-gmsh.vtk"))
c = mesh.cells["triangle"].astype(numpy.int)
return mesh.points[:, :2], c
def circle_random():
n = 40
radius = 1.0
k = numpy.arange(n)
boundary_pts = radius * numpy.column_stack(
[numpy.cos(2 * numpy.pi * k / n), numpy.sin(2 * numpy.pi * k / n)]
)
# Compute the number of interior nodes such that all triangles can be somewhat
# equilateral.
edge_length = 2 * numpy.pi * radius / n
domain_area = numpy.pi - n * (
radius ** 2 / 2 * (edge_length - numpy.sin(edge_length))
)
cell_area = numpy.sqrt(3) / 4 * edge_length ** 2
target_num_cells = domain_area / cell_area
# Euler:
# 2 * num_points - num_boundary_edges - 2 = num_cells
# <=>
# num_interior_points ~= 0.5 * (num_cells + num_boundary_edges) + 1 - num_boundary_points
m = int(0.5 * (target_num_cells + n) + 1 - n)
# generate random points in circle; <http://mathworld.wolfram.com/DiskPointPicking.html>
numpy.random.seed(0)
r = numpy.random.rand(m)
alpha = 2 * numpy.pi * numpy.random.rand(m)
interior_pts = numpy.column_stack(
[numpy.sqrt(r) * numpy.cos(alpha), numpy.sqrt(r) * numpy.sin(alpha)]
)
pts = numpy.concatenate([boundary_pts, interior_pts])
tri = Delaunay(pts)
pts = numpy.column_stack([pts[:, 0], pts[:, 1], numpy.zeros(pts.shape[0])])
# Make sure there are exactly `n` boundary points
mesh = MeshTri(pts, tri.simplices)
assert numpy.sum(mesh.is_boundary_node) == n
return pts, tri.simplices
def circle_rotated():
pts, cells = circle_random()
# <https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula>
theta = numpy.pi / 4
k = numpy.array([1.0, 0.0, 0.0])
pts = (
pts * numpy.cos(theta)
+ numpy.cross(k, pts) * numpy.sin(theta)
+ numpy.outer(numpy.einsum("ij,j->i", pts, k), k) * (1.0 - numpy.cos(theta))
)
meshio.write_points_cells("out.vtk", pts, {"triangle": cells})
return pts, cells
|
#!/usr/bin/env python3
import sys, os, unittest, logging, tempfile
# Extend PYTHONPATH with local 'lib' folder
jasyroot = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir, os.pardir))
sys.path.insert(0, jasyroot)
import jasy.core.Project as Project
class Tests(unittest.TestCase):
def writeFile(self, path, fileName, content):
handle = open(os.path.join(path, fileName), mode="w", encoding="utf-8")
handle.write(content)
handle.close()
def readFile(self, path, fileName):
return open(os.path.join(path, fileName), mode="r", encoding="utf-8").read()
def createjpyaml(self, path):
self.writeFile(path, "jasyproject.yaml", """name: myproject
""")
def createjpyaml_withContent(self, path):
self.writeFile(path, "jasyproject.yaml", """name: myproject
content: {myproject.Main: [man/Main.js, man/Add.js], myproject/main.css: [man/main.css]}
""")
def createCaseOne(self):
#manual
path = os.path.join(tempfile.TemporaryDirectory().name, "myproject")
os.makedirs(path)
def createFolders():
os.makedirs(os.path.join(path, "man"))
def createSampleClasses():
self.writeFile(os.path.join(path, "man"), "index.html", """<html></html>""")
self.writeFile(os.path.join(path, "man"), "Main.js", ";")
self.writeFile(os.path.join(path, "man"), "Add.js", ";")
def createSampleAssets():
self.writeFile(os.path.join(path, "man"), "main.css", """html{}""")
createFolders()
self.createjpyaml_withContent(path)
createSampleClasses()
createSampleAssets()
return Project.getProjectFromPath(path)
def createCaseTwo(self):
#application
path = os.path.join(tempfile.TemporaryDirectory().name, "myproject")
os.makedirs(path)
def createFolders():
os.makedirs(os.path.join(path, "source"))
os.makedirs(os.path.join(os.path.join(path, "source"), "class"))
os.makedirs(os.path.join(os.path.join(path, "source"), "asset"))
os.makedirs(os.path.join(os.path.join(path, "source"), "translation"))
def createSampleClasses():
self.writeFile(os.path.join(path, "source"), "index.html", """<html></html>""")
self.writeFile(os.path.join(os.path.join(path, "source"), "class"), "Main.js", ";")
def createSampleAssets():
self.writeFile(os.path.join(os.path.join(path, "source"), "asset"), "main.css", """html{}""")
def createSampleTranslations():
self.writeFile(os.path.join(os.path.join(path, "source"), "translation"), "de.po", " ")
createFolders()
self.createjpyaml(path)
createSampleClasses()
createSampleAssets()
createSampleTranslations()
return Project.getProjectFromPath(path)
def createCaseThree(self):
#src
path = os.path.join(tempfile.TemporaryDirectory().name, "myproject")
os.makedirs(path)
def createFolders():
os.makedirs(os.path.join(path, "src"))
def createSampleClasses():
self.writeFile(os.path.join(path, "src"), "index.html", """<html></html>""")
self.writeFile(os.path.join(path, "src"), "Main.js", ";")
def createSampleAssets():
self.writeFile(os.path.join(path, "src"), "main.css", """html{}""")
createFolders()
self.createjpyaml(path)
createSampleClasses()
createSampleAssets()
return Project.getProjectFromPath(path)
def createCaseFour(self):
#resource
path = os.path.join(tempfile.TemporaryDirectory().name, "myproject")
os.makedirs(path)
def createFolders():
os.makedirs(os.path.join(path, "class"))
os.makedirs(os.path.join(path, "asset"))
os.makedirs(os.path.join(path, "translation"))
def createSampleClasses():
self.writeFile(os.path.join(path, "class"), "index.html", """<html></html>""")
self.writeFile(os.path.join(path, "class"), "Main.js", ";")
def createSampleAssets():
self.writeFile(os.path.join(path, "asset"), "main.css", """html{}""")
def createSampleTranslations():
self.writeFile(os.path.join(path, "translation"), "de.po", " ")
createFolders()
self.createjpyaml(path)
createSampleClasses()
createSampleAssets()
createSampleTranslations()
return Project.getProjectFromPath(path)
def getProjects(self):
return [self.createCaseOne(),self.createCaseTwo(),self.createCaseThree(),self.createCaseFour()]
def test_get_project(self):
for project in self.getProjects():
self.assertEqual(project.getName(), "myproject")
def test_get_name_from_path(self):
for project in self.getProjects():
self.assertEqual(Project.getProjectNameFromPath(project.getPath()), "myproject")
def test_scan(self):
for project in self.getProjects():
project.scan()
self.assertEqual(project.scanned, True)
def test_has_requires(self):
for project in self.getProjects():
self.assertEqual(project.hasRequires(), False)
def test_fields(self):
for project in self.getProjects():
self.assertEqual(project.getFields(), {})
def test_get_class_by_name(self):
for project in self.getProjects():
self.assertEqual(project.getClassByName("myproject.Main"), project.getClasses()["myproject.Main"])
self.assertEqual(type(project.getClassByName("myproject.Main")).__name__, "ClassItem")
def test_assets(self):
for project in self.getProjects():
self.assertEqual(type(project.getAssets()["myproject/main.css"]).__name__, "AssetItem")
def test_translations(self):
for project in [self.createCaseTwo(), self.createCaseFour()]:
self.assertEqual(type(project.getTranslations()["myproject.de"]).__name__, "TranslationItem")
def test_manual_class_fusion(self):
self.assertEqual(self.createCaseOne().getClassByName("myproject.Main").getText(), ";;")
if __name__ == '__main__':
logging.getLogger().setLevel(logging.ERROR)
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
# -*- coding: utf-8 -*-
'''
myword
'''
#from snsapi.plugin.renren import RenrenAPI
from snsapi.snspocket import SNSPocket
import json
import sys
import urllib2
import hashlib
import time
REPLY_GAP = 10 # seconds, 10 seems the minimum
NEWS_QUERY_COUNT = 5
MY_NAME = "hsama2012"
def can_reply(status):
"""
A filter function of the status you want to reply
"""
if not status.parsed.text.find("@" + MY_NAME) == -1:
return True
else:
return False
def get_word(text):
"""
To the get word in a message
"""
text = text.replace("@" + MY_NAME +" ","")
return text
def translate(word):
"""
Translate a word with dic.zhan-dui.com
"""
url = "http://dic.zhan-dui.com/api.php?s=" + word + "&type=json"
req = urllib2.Request(url, data='')
req.add_header('User_Agent', 'toolbar')
results = json.load(urllib2.urlopen(req))
if "error_code" in results:
return word +" " + " not found"
else:
mean = ""
for c in results["simple_dic"]:
mean = mean + c
return word + " " + mean
def main():
"""docstring for main"""
#set system default encoding to utf-8 to avoid encoding problems
reload(sys)
sys.setdefaultencoding( "utf-8" )
#load channel configurations
channels = json.load(open('conf/channel.json'))
#find one account
rr = SNSPocket()
for c in channels:
rr.add_channel(c)
if rr is None:
print "cannot find one renren platform in channel.json"
return
else:
rr.load_config()
rr.auth()
#load record to avoid repeated reply
try:
sIDs = json.load(open('statusID.json'))
except IOError, e:
if e.errno == 2: #no such file
sIDs = {}
else:
raise e
status_list = rr.home_timeline(NEWS_QUERY_COUNT)
for s in status_list:
s.show()
msg_string = "".join( unicode(x) for x in \
[s.parsed.time, s.ID, s.parsed.username, \
s.parsed.userid, s.parsed.text])
sig = hashlib.sha1(msg_string.encode('utf-8')).hexdigest()
if not sig in sIDs and can_reply(s):
print '[reply it]'
REPLY_STRING = translate(get_word(s.parsed.text))
ret = rr.reply(s.ID, REPLY_STRING.decode('utf-8'))
print "[ret: %s]" % ret
print "[wait for %d seconds]" % REPLY_GAP
time.sleep(REPLY_GAP)
if ret:
sIDs[sig] = msg_string
else:
print '[no reply]'
#save reply record
json.dump(sIDs, open('statusID.json', 'w'))
if __name__ == '__main__':
main()
|
from .base import Filth
from .. import exceptions
class CredentialFilth(Filth):
type = 'credential'
# specify how the username/password are replaced
username_placeholder = 'USERNAME'
password_placeholder = 'PASSWORD'
@property
def placeholder(self):
ubeg, uend = self.match.span('username')
pbeg, pend = self.match.span('password')
return (
self.match.string[self.match.start():ubeg] +
self.prefix + self.username_placeholder + self.suffix +
self.match.string[uend:pbeg] +
self.prefix + self.password_placeholder + self.suffix
)
# override the replace_with method for credentials because the
# prefix/suffix components are mixed into the placeholder
def replace_with(self, replace_with='placeholder', **kwargs):
if replace_with == 'placeholder':
return self.placeholder
else:
raise exceptions.InvalidReplaceWith(replace_with)
|
import tkinter
widget = tkinter.Label(None, text='Hello GUI world!')
widget.pack()
widget.mainloop()
|
"""Helpful fixtures for testing with pytest and PostgreSQL.
Use these fixtures to test code that requires access to a PostgreSQL database.
"""
import pathlib
from typing import Callable, Generator, Union
import psycopg2
import psycopg2.extensions
import psycopg2.sql
import pytest
import testing.postgresql
import lathorp
@pytest.fixture(scope='session')
def pg() -> Generator[testing.postgresql.Postgresql, None, None]:
"""A session-scoped temporary PostgreSQL instance.
Add this fixture to your test function to get access to a temporary PostgreSQL database.
Because setting up a temporary database is a costly operation that slow your tests down, this
fixture is scoped at the session level, so the database is set up only once per testing
session.
You can create a connection to the temporary database as follows:
>> conn = psycopg2.connect(**pg.dsn())
Yields:
A temporary database.
"""
Postgresql = testing.postgresql.PostgresqlFactory(cache_initialized_db=True)
pg = Postgresql()
yield pg
pg.stop()
Postgresql.clear_cache()
Connector = Callable[[Union[pathlib.Path, None], Union[psycopg2.extensions.cursor, None]],
psycopg2.extensions.connection]
"""A type alias for the function returned by the pg_conn fixture."""
@pytest.fixture(scope='function')
def pg_connect(request, pg) -> Connector:
"""Returns a function that opens a connection to a temporary PostgreSQL instance.
To get a connection in your test function, use it like so:
>> conn = pg_connect(data_path=pathlib.Path('my_path'), cursor_factory=psycopg2.extras.NamedTupleCursor)
You may also omit any or both arguments:
>> conn = pg_connect()
Use the data_path argument to copy test data into the database prior to running your test
function.
The data is automatically deleted after the test function is done, so test functions do not leak
side effects.
The data_path argument may point to a single data file, or a directory of files.
Note that a schema definition must be loaded before the data can be copied.
Use `load_schema_definitions` for that, perhaps together with another session-scoped fixture.
See `tests/conftest.py::init_schema` for an example.
Use the cursor_factory argument to specify a cursor factory for the new connection.
The arguments to the fixture itself are auto-loaded by pytest.
"""
def connector(data_path=None, cursor_factory=None):
dsn = psycopg2.extensions.make_dsn(**pg.dsn())
conn = psycopg2.connect(dsn, cursor_factory=cursor_factory)
tables = []
if data_path:
tables = lathorp.copy_data(conn, data_path)
def finalize():
lathorp.delete_data(conn, tables)
conn.close()
request.addfinalizer(finalize)
return conn
return connector
|
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actor network for Register Allocation."""
from typing import Optional, Sequence, Callable, Text, Any
import gin
import tensorflow as tf
from tf_agents.networks import categorical_projection_network
from tf_agents.networks import encoding_network
from tf_agents.networks import network
from tf_agents.typing import types
from tf_agents.utils import nest_utils
class RegAllocEncodingNetwork(encoding_network.EncodingNetwork):
def __init__(self, **kwargs):
super(RegAllocEncodingNetwork, self).__init__(**kwargs)
# remove the first layer (Flatten) in postprocessing_layers cause this will
# flatten the B x T x 33 x dim to B x T x (33 x dim).
self._postprocessing_layers = self._postprocessing_layers[1:]
class RegAllocProbProjectionNetwork(
categorical_projection_network.CategoricalProjectionNetwork):
def __init__(self, **kwargs):
super(RegAllocProbProjectionNetwork, self).__init__(**kwargs)
# shape after projection_layer: B x T x 33 x 1; then gets re-shaped to
# B x T x 33.
self._projection_layer = tf.keras.layers.Dense(
1,
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(
scale=kwargs['logits_init_output_factor']),
bias_initializer=tf.keras.initializers.Zeros(),
name='logits')
@gin.configurable
class RegAllocRNDEncodingNetwork(RegAllocEncodingNetwork):
def __init__(self, **kwargs):
pooling_layer = tf.keras.layers.GlobalMaxPool1D(data_format='channels_last')
super(RegAllocRNDEncodingNetwork, self).__init__(**kwargs)
# add a pooling layer at the end to to convert B x T x 33 x dim to
# B x T x dim.
self._postprocessing_layers.append(pooling_layer)
@gin.configurable
class RegAllocNetwork(network.DistributionNetwork):
"""Creates the actor network for register allocation policy training."""
def __init__(
self,
input_tensor_spec: types.NestedTensorSpec,
output_tensor_spec: types.NestedTensorSpec,
preprocessing_layers: Optional[types.NestedLayer] = None,
preprocessing_combiner: Optional[tf.keras.layers.Layer] = None,
conv_layer_params: Optional[Sequence[Any]] = None,
fc_layer_params: Optional[Sequence[int]] = (200, 100),
dropout_layer_params: Optional[Sequence[float]] = None,
activation_fn: Callable[[types.Tensor],
types.Tensor] = tf.keras.activations.relu,
kernel_initializer: Optional[tf.keras.initializers.Initializer] = None,
batch_squash: bool = True,
dtype: tf.DType = tf.float32,
name: Text = 'RegAllocNetwork'):
"""Creates an instance of `RegAllocNetwork`.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
input.
output_tensor_spec: A nest of `tensor_spec.BoundedTensorSpec` representing
the output.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations.
All of these layers must not be already built. For more details see
the documentation of `networks.EncodingNetwork`.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.
This layer must not be already built. For more details see
the documentation of `networks.EncodingNetwork`.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
fc_layer_params: Optional list of fully_connected parameters, where each
item is the number of units in the layer.
dropout_layer_params: Optional list of dropout layer parameters, each item
is the fraction of input units to drop or a dictionary of parameters
according to the keras.Dropout documentation. The additional parameter
`permanent`, if set to True, allows to apply dropout at inference for
approximated Bayesian inference. The dropout layers are interleaved with
the fully connected layers; there is a dropout layer after each fully
connected layer, except if the entry in the list is None. This list must
have the same length of fc_layer_params, or be None.
activation_fn: Activation function, e.g. tf.nn.relu, slim.leaky_relu, ...
kernel_initializer: Initializer to use for the kernels of the conv and
dense layers. If none is provided a default glorot_uniform.
batch_squash: If True the outer_ranks of the observation are squashed into
the batch dimension. This allow encoding networks to be used with
observations with shape [BxTx...].
dtype: The dtype to use by the convolution and fully connected layers.
name: A string representing name of the network.
Raises:
ValueError: If `input_tensor_spec` contains more than one observation.
"""
if not kernel_initializer:
kernel_initializer = tf.compat.v1.keras.initializers.glorot_uniform()
# input: B x T x obs_spec
# output: B x T x 33 x dim
encoder = RegAllocEncodingNetwork(
input_tensor_spec=input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params,
dropout_layer_params=dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=batch_squash,
dtype=dtype)
projection_network = RegAllocProbProjectionNetwork(
sample_spec=output_tensor_spec, logits_init_output_factor=0.1)
output_spec = projection_network.output_spec
super(RegAllocNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
output_spec=output_spec,
name=name)
self._encoder = encoder
self._projection_network = projection_network
self._output_tensor_spec = output_tensor_spec
@property
def output_tensor_spec(self):
return self._output_tensor_spec
def call(self,
observations: types.NestedTensor,
step_type: types.NestedTensor,
network_state=(),
training: bool = False,
mask=None):
state, network_state = self._encoder(
observations,
step_type=step_type,
network_state=network_state,
training=training)
outer_rank = nest_utils.get_outer_rank(observations, self.input_tensor_spec)
# mask un-evictable registers.
distribution, _ = self._projection_network(
state, outer_rank, training=training, mask=observations['mask'])
return distribution, network_state
|
import random
import itertools
import csv
from typing import List, Union, Optional, NamedTuple
# Configuration
SIMULATE_ROUNDS = 1000
MATCH_SIZE = 4
MAX_RESHUFFLES = 10
# Data types
class Card(NamedTuple):
suit: str
rank: Union[str, int]
Hand = List[Card]
class Player:
@property
def number(self):
return self._number
def __init__(self, number: int):
self._number: int = number
self.hand: Hand = []
self.pending_card: Optional[None] = None
# Constants
SUITS = ('hearts', 'clubs', 'diamonds', 'spades')
RANKS = list(range(1, 10)) + ['jack', 'queen', 'king', 'ace']
CARDS = [Card(suit=suit, rank=rank) for suit in SUITS for rank in RANKS]
class TakeTurnResult(NamedTuple):
next_card: Card
is_winning: bool
def take_turn(player: Player, card: Card) -> TakeTurnResult:
"Returns the card passed to the next player"
# group cards for easier processing
groups = dict()
for c in player.hand:
group = None
if c.rank in groups:
group = groups[c.rank]
else:
group = []
groups[c.rank] = group
group.append(c)
sorted_groups = [{'count': len(v), 'rank': k}
for (k, v) in groups.items()]
sorted_groups.sort(key=lambda group: group['count'])
next_card = card
if card.rank in groups.keys():
# we'll sacrifice a card from the smallest group, but only
# if that would make a bigger group than that card already belongs to
size_of_match = len(groups[card.rank]) + 1
# find the smallest group, we'll sacrifice a card from it
smallest_group = sorted_groups[0]
if smallest_group['count'] < size_of_match:
old_card = next(
c for c in player.hand if c.rank == smallest_group['rank'])
player.hand.remove(old_card)
player.hand.append(card)
next_card = old_card
return TakeTurnResult(next_card=next_card, is_winning=hand_is_winning(player.hand))
def hand_is_winning(hand: Hand) -> bool:
rank = hand[0].rank
for card in hand:
if card.rank != rank:
return False
return True
def format_card(card: Card) -> str:
return f'{card.rank} of {card.suit}'
def format_hand(hand: Hand) -> List[str]:
return [format_card(card) for card in hand]
def format_players(players: List[Player]) -> List[str]:
return [f'{player.number}:{format_card(player.pending_card) if player.pending_card else "..."}' for player in players]
def play_round(num_players: int):
if ((num_players * MATCH_SIZE) + num_players > len(CARDS)):
raise Exception(
"num_players is too high; there are not enough cards to deal out starting hands and complete a round")
# set up
players: List[Player] = [Player(number=i) for i in range(num_players)]
deck = list(CARDS)
random.shuffle(deck)
# deal out starting hands
for _ in range(MATCH_SIZE):
for p in players:
p.hand.append(deck.pop())
dealer = players[0]
player_after_dealer = players[1]
draws = 0
turns = 0
reshuffles = 0
winner = None
discard: List[Card] = []
while True:
# each iteration of this loop is a "draw" (as in the dealer drawing a card)
draws += 1
# make sure the deck has cards left. If not, recycle the discard pile
if (len(deck) == 0):
if reshuffles > MAX_RESHUFFLES:
# don't get caught in an infinite loop
break
deck, discard = discard, []
random.shuffle(deck)
reshuffles += 1
# first, the dealer will draw a card and take their turn
new_card = deck.pop()
turns += 1
dealer_turn = take_turn(dealer, new_card)
if dealer_turn.is_winning:
winner = dealer
break
player_after_dealer.pending_card = dealer_turn.next_card
# to simulate the parallel nature of the game, loop in reverse order through each player
# that has a pending card
players_in_order = [
player for player in players[::-1] if player.pending_card]
for player in players_in_order:
turns += 1
turn = take_turn(player, player.pending_card)
player.pending_card = None
if turn.is_winning:
winner = player
break
# hand the card off to either the next player, or if none, the discard pile
next_player_index = player.number + 1
if (next_player_index < len(players)):
players[next_player_index].pending_card = turn.next_card
else:
discard.append(turn.next_card)
if winner:
break
return {
'reshuffles': reshuffles,
'draws': draws,
'turns': turns,
'winner': winner.number if winner != None else None
}
with open('output.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["# Players", "Winner", "Reshuffles", "Draws", "Turns"])
for num_players in range(2, 11):
for round_num in range(SIMULATE_ROUNDS):
result = play_round(num_players)
writer.writerow([num_players, result['winner'] if result['winner'] != None else "N/A", result['reshuffles'], result['draws'], result['turns']])
|
import datetime
from arend.settings import base
from arend.queue.task import QueueTask
class ProgressUpdater:
def __init__(
self,
queue_task: QueueTask,
suppress_exception: bool = True,
verbose: bool = 1
):
self.queue_task = queue_task
self.verbose = verbose
self.suppress_exception = suppress_exception
def notify(self, message: str):
self.queue_task.detail += f"- {message}\n"
def __call__(self, **kwargs):
self.__dict__.update(kwargs)
return self
def __enter__(self):
self.queue_task.start_time = datetime.datetime.utcnow()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.queue_task.end_time = datetime.datetime.utcnow()
if exc_type:
self.queue_task.status = base.FAIL
self.queue_task.detail = exc_val
self.queue_task.save()
return self.suppress_exception
elif not self._finished:
self.queue_task.status = base.FINISHED
self.queue_task.result = exc_val
self.queue_task.save()
return True
|
from django.contrib import admin
# Register your models here.
from .models import OrderModel, WoodFormModel, SkinFormModel, PaperFormModel
admin.site.register(OrderModel)
admin.site.register(WoodFormModel)
admin.site.register(SkinFormModel)
admin.site.register(PaperFormModel)
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
regions = np.zeros((100,200,5))
print(regions.shape)
regions[10:30,10:30,0] = 1
regions[10:30,40:70,0] = 2
regions[50:80,80:140,0] = 3
regions[85:100,100:150,0] = 4
regions[15:40,150:180,0] = 5
regions[10:30,10:70,1] = 1
regions[50:80,80:140,1] = 2
regions[85:100,100:150,1] = 3
regions[15:40,150:180,1] = 4
regions[10:30,10:70,2] = 1
regions[50:80,80:140,2] = 2
regions[85:100,100:150,2] = 3
regions[15:40,150:180,2] = 4
regions[10:30,10:60,3] = 1
regions[50:80,90:120,3] = 2
regions[85:100,110:140,3] = 3
regions[15:40,160:180,3] = 4
regions[10:30,10:65,4] = 1
regions[50:80,90:125,4] = 2
regions[85:100,110:157,4] = 3
regions[15:43,160:190,4] = 4
mask = np.ones((100,200))
for i in range(5):
mask = mask*regions[:,:,i]
mask[mask>0] = 1
labels = measure.label(mask, background=0)
plt.subplot(2,3,1)
plt.imshow(regions[:,:,0])
plt.subplot(2,3,2)
plt.imshow(regions[:,:,1])
plt.subplot(2,3,3)
plt.imshow(regions[:,:,2])
plt.subplot(2,3,4)
plt.imshow(regions[:,:,3])
plt.subplot(2,3,5)
plt.imshow(regions[:,:,4])
plt.subplot(2,3,6)
plt.imshow(labels)
plt.title("common mask")
plt.show()
|
from .Cilindro import Cilindro
from . import BinWriter as bin
import os
class Indice:
def __init__(self, pkey, ruta):
self.indx = [None]*30
self.intervalo = 30
self.pkey = pkey
self.ruta = ruta
self.readI()
def readI(self):
if os.path.exists(self.ruta+"/indx.b"):
data = bin.read(self.ruta+"/indx.b")
i = 0
if type(data[-1]) is list: self.pkey = data[-1]
for d in data:
if type(d) is str:
self.indx[i] = Cilindro(d, self.pkey, i, self.ruta)
elif d is None:
self.indx[i] = None
i+=1
def writeI(self):
data=[]
for x in self.indx:
if x != None:
data.append(x.nombre)
else:
data.append(None)
data.append(self.pkey)
bin.write(data, self.ruta+"/indx.b")
def hash(self, val):
e = self._checkKey(val)
if type(e) is int:
i = self._hashn(e)
else:
i = self._hashl(e)
return i
def insert(self, registro):
val = []
try:
for key in self.pkey:
val.append(registro[key])
i = self.hash(val[0])
if self.indx[i] == None:
self.indx[i] = Cilindro("CS" + str(i), self.pkey, i, self.ruta)
bin.write([None] * 30, self.ruta + "/" + "CS" + str(i) + ".b")
self.writeI()
return self.indx[i].insert(registro)
except:
return 1
def _checkKey(self, key):
try:
r = int(key)
return r
except:
return key
def _hashl(self, key):
fst = ord(key[0].upper())
return (fst - 65) % self.intervalo
def _hashn(self, key):
return (key // 30) % self.intervalo
def update(self, register, val):
try:
if type(val[0]) is int:
i = self._hashn(val[0])
else:
i = self._hashl(val[0])
if self.indx[i] is None:
return 4
return self.indx[i].update(register, val)
except:
return 1
def delete(self, val):
try:
if type(val[0]) is int:
i = self._hashn(val[0])
else:
i = self._hashl(val[0])
if self.indx[i] is None:
return 4
return self.indx[i].delete(val)
except:
return 1
def extractRow(self, val):
try:
if type(val[0]) is int:
i = self._hashn(val[0])
else:
i = self._hashl(val[0])
if self.indx[i] is None:
return []
return self.indx[i].extractRow(val)
except:
return 1
def readAll(self):
data=[]
try:
for cil in self.indx:
if cil == None:
continue
data.extend(cil.readAll())
return data
except:
return None
def readRange(self, columnNumber ,lower, upper):
data = []
try:
for cil in self.indx:
if cil == None:
continue
data.extend(cil.readRange(columnNumber, lower, upper))
return data
except:
return None
#actualiza todos los registros a su version mas reciente y reescribe el indice a su version mas reciente
def refreshMem(self):
for x in self.indx:
if x != None:
x.indx = bin.read(self.ruta +"/"+ x.nombre + ".b")
x.pkeys = self.pkey
self.writeI()
|
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Element meta-class.
$Id: test_element.py 39768 2005-10-31 13:57:35Z tlotze $
"""
import unittest
from zope.interface.interface import Element
class TestElement(unittest.TestCase):
def test_taggedValues(self):
"""Test that we can update tagged values of more than one element
"""
e1 = Element("foo")
e2 = Element("bar")
e1.setTaggedValue("x", 1)
e2.setTaggedValue("x", 2)
self.assertEqual(e1.getTaggedValue("x"), 1)
self.assertEqual(e2.getTaggedValue("x"), 2)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestElement))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
import PikaStdLib
import machine
time = machine.Time()
pwm = machine.PWM()
pwm.setPin('PA8')
pwm.setFrequency(2000)
pwm.setDuty(0.5)
pwm.enable()
mem = PikaStdLib.MemChecker()
while True:
mem.now()
time.sleep_ms(500)
pwm.setDuty(0.5)
time.sleep_ms(500)
pwm.setDuty(0.001)
|
from django.conf import settings
from django.views.static import serve
urlpatterns = [
url(r'^Object_Detection/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
|
#-------------------------------------------------------------------------------
# Name: Logistic Regression
# Purpose:
#
# Author: Nonlining
#
# Created: 17/04/2017
# Copyright: (c) Nonlining 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
import string
import sys
import numpy as np
def extract(data_frame, features, label):
data_frame['constant'] = 1.0
features = ['constant'] + features
features_matrix = data_frame[features].to_numpy()
if label != None:
label_array = data_frame[label].to_numpy()
else:
label_array = []
return(features_matrix, label_array)
def remove_punctuation(text):
if type(text) is str:
return text.translate(None, string.punctuation)
else:
return str(text).translate(None, string.punctuation)
def countWords(string):
wordCount = {}
for i in string.split(' '):
if i in wordCount:
wordCount[i] += 1
else:
wordCount[i] = 1
return wordCount
def get_classification_accuracy(model, data, true_labels):
pred_result = model.predict(data)
correct = sum(pred_result == true_labels)
accuracy = correct/float(len(true_labels))
return accuracy
def compute_probability(score):
return 1.0/(1 + np.exp(-score))
|
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
def ret_contenttypes(*args, **kwargs):
"""
Takes a blacklist or a whitelist of model names
and returns queryset of ContentTypes
"""
if not 'app_label' in kwargs.keys():
raise ImproperlyConfigured('Missing app_label')
app_label = kwargs['app_label']
whitelist = kwargs['whitelist'] if 'whitelist' in kwargs.keys() else None
blacklist = kwargs['blacklist'] if 'blacklist' in kwargs.keys() else None
if whitelist and blacklist:
raise ImproperlyConfigured("Dont configure kwargs['blacklist'] and kwargs['whitelist']")
if blacklist:
return ContentType.objects.filter(app_label=app_label).exclude(model__in=blacklist)
if whitelist:
tmp_whitelist = ContentType.objects.none()
for name in whitelist:
tmp_whitelist = tmp_whitelist | ContentType.objects.filter(app_label=app_label, model=name)
return tmp_whitelist
return ContentType.objects.filter(app_label=app_label)
|
def funcTwo():
print("can you add stuff?")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.