content stringlengths 5 1.05M |
|---|
from __future__ import absolute_import
from .utils import (gen_404, load_configuration, db_connect, load_schemas,
_unpack_params, _return2client, _stringify_data)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 11:13:47 2018
https://jeremykun.com/2013/11/08/adversarial-bandits-and-the-exp3-algorithm/
@author: shivap
"""
import random
# draw: [float] -> int
# pick an index from the given list of floats proportionally
# to the size of the entry (i.e. normalize to a probability
# distribution and draw according to the probabilities).
def draw(weights):
choice = random.uniform(0, sum(weights))
# print(choice)
choiceIndex = 0
for weight in weights:
choice -= weight
if choice <= 0:
return choiceIndex
choiceIndex += 1
# distr: [float] -> (float)
# Normalize a list of floats to a probability distribution. Gamma is an
# egalitarianism factor, which tempers the distribtuion toward being uniform as
# it grows from zero to one.
def distr(weights, gamma=0.0):
theSum = float(sum(weights))
return tuple((1.0 - gamma) * (w / theSum) + (gamma / len(weights)) for w in weights)
def mean(aList):
theSum = 0
count = 0
for x in aList:
theSum += x
count += 1
return 0 if count == 0 else theSum / count |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
import json
import argparse
import sys
import os
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, '..')
sys.path.insert(0, path)
from .__init__ import metadata_from_isbn, metadata_from_ean, metadata_from_jpno
from .metadata import Metadata
parser = argparse.ArgumentParser(
description='find book metadata')
parser.add_argument('--isbn', action="store", dest="isbn")
parser.add_argument('--ean', action="store", dest="ean")
parser.add_argument('--jpno', action="store", dest="jpno")
parser.add_argument('--rakuten-application-id', action="store",
dest="rakuten_application_id")
parser.add_argument('--amazon-auth-info', action="store",
dest="amazon_auth_info")
parser.add_argument('--use-wikipedia', action="store_true",
default=False, dest="use_wikipedia")
parser.add_argument('--prettyprint', action="store_true",
default=False, dest="prettyprint")
parser.add_argument('--include-null-value-field', action='store_true',
default=False, dest='include_none_value_field')
args = parser.parse_args()
amazon_auth_info = None
if args.amazon_auth_info:
amazon_auth_info = args.amazon_auth_info.split(',')
rakuten_application_id = None
if args.rakuten_application_id:
rakuten_application_id = args.rakuten_application_id
use_wikipedia = args.use_wikipedia
metadata = Metadata()
if args.isbn:
metadata = metadata_from_isbn(
args.isbn,
amazon_auth_info=amazon_auth_info,
rakuten_application_id=rakuten_application_id,
use_wikipedia=use_wikipedia)
elif args.ean:
metadata = metadata_from_ean(
args.ean,
amazon_auth_info=amazon_auth_info,
rakuten_application_id=rakuten_application_id,
use_wikipedia=use_wikipedia)
elif args.jpno:
metadata = metadata_from_jpno(
args.jpno,
amazon_auth_info=amazon_auth_info,
rakuten_application_id=rakuten_application_id,
use_wikipedia=use_wikipedia)
print(json.dumps(metadata.todict(args.include_none_value_field),
ensure_ascii=False, indent=2 if args.prettyprint else None))
|
#############
#JSON output#
#############
FEED_FORMAT = "json"
FEED_EXPORT_ENCODING = "utf-8"
FEED_URI = "scrapy_generator %(time)s.json"
FEED_EXPORT_INDENT = 4
FEED_EXPORT_FIELDS = items_generator
|
# author: PyShine
# website: http://www.pyshine.com
# import the necessary packages
import numpy as np
import cv2
import socket,time
import queue
import sounddevice as sd
import threading
import matplotlib
import copy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
import _thread
from multiprocessing import Process
from collections import deque
import tempfile
import soundfile as sf
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
def putBText(img,text,text_offset_x=20,text_offset_y=20,vspace=10,hspace=10, font_scale=1.0,background_RGB=(228,225,222),text_RGB=(1,1,1),font = cv2.FONT_HERSHEY_DUPLEX,thickness = 2,alpha=0.6,gamma=0):
"""
Inputs:
img: cv2 image img
text_offset_x, text_offset_x: X,Y location of text start
vspace, hspace: Vertical and Horizontal space between text and box boundries
font_scale: Font size
background_RGB: Background R,G,B color
text_RGB: Text R,G,B color
font: Font Style e.g. cv2.FONT_HERSHEY_DUPLEX,cv2.FONT_HERSHEY_SIMPLEX,cv2.FONT_HERSHEY_PLAIN,cv2.FONT_HERSHEY_COMPLEX
cv2.FONT_HERSHEY_TRIPLEX, etc
thickness: Thickness of the text font
alpha: Opacity 0~1 of the box around text
gamma: 0 by default
Output:
img: CV2 image with text and background
"""
R,G,B = background_RGB[0],background_RGB[1],background_RGB[2]
text_R,text_G,text_B = text_RGB[0],text_RGB[1],text_RGB[2]
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=thickness)[0]
x, y, w, h = text_offset_x, text_offset_y, text_width , text_height
crop = img[y-vspace:y+h+vspace, x-hspace:x+w+hspace]
white_rect = np.ones(crop.shape, dtype=np.uint8)
b,g,r = cv2.split(white_rect)
rect_changed = cv2.merge((B*b,G*g,R*r))
res = cv2.addWeighted(crop, alpha, rect_changed, 1-alpha, gamma)
img[y-vspace:y+vspace+h, x-hspace:x+w+hspace] = res
cv2.putText(img, text, (x, (y+h)), font, fontScale=font_scale, color=(text_B,text_G,text_R ), thickness=thickness)
return img
def audioCapture(mode='send',record=False,filename='recorded',typename='.wav',dirname=''):
q = deque(maxlen=20)
filename = tempfile.mktemp(prefix=filename,suffix=typename, dir=dirname)
frame = [0]
audio = queue.Queue(maxsize=20)
def getAudio():
def callback(indata, outdata, frames, time, status):
if status:
print(status)
if mode=='get':
try:
frame = audio.get()
outdata[:] = frame
q.append(frame)
except :
pass
else:
audio.put(indata)
q.append(indata)
if record==True:
with sf.SoundFile(filename, mode='x', samplerate=44100,channels=2) as file:
with sd.Stream( channels=2,blocksize=1024, callback=callback):
print('press Ctrl+C to stop the recording')
file.write(audio.get())
# input()
# exit()
else:
with sd.Stream( channels=2,blocksize=1024, callback=callback):
input()
exit()
thread = threading.Thread(target=getAudio, args=())
thread.start()
return audio,q
def showPlot(audio,name='pyshine.com',length=8,xmin=0,ymin=-0.5,xmax=8*1024,ymax=0.5,color = (0,1,0.29)):
def getAudio():
global plotdata
frame = [0]
length=8
fig,ax = plt.subplots(figsize=(8,2))
ax.set_title('naem')
plotdata = np.zeros((length*1024,2))
lines = ax.plot(plotdata,color = color)
ax.set_facecolor((0,0,0))
ax.set_ylim( ymin=ymin, ymax=ymax)
ax.set_xlim( xmin=xmin, xmax=xmax)
def animate(i):
global plotdata
try:
ys = []
ys = audio.pop()
data = ys
shift = len(data)
plotdata = np.roll(plotdata, -shift,axis = 0)
plotdata[-shift:,:] = data
T= ys.shape[0]
ys = ys[0:T//1]
X_m = ys
ax.set_title(name)
except Exception as e:
try:
ax.set_title(name)
except:
pass
pass
for column, line in enumerate(lines):
line.set_ydata(plotdata[:,column])
return lines
ani = FuncAnimation(fig,animate, interval=30)
while True:
plt.ion()
plt.show()
plt.pause(0.001)
thread = threading.Thread(target=getAudio, args=())
thread.start()
def _showPlot(audio,xmin=0,ymin=-0.5,xmax=1024,ymax=0.5):
name='name'
# Get the Figure
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(1,1,1)
ax.set_facecolor((0,0,0))
fig.tight_layout()
ax.yaxis.grid(True)
def animate(i):
try:
ax.clear()
ys = []
ys = audio.get()
audio.task_done()
T= ys.shape[0]
ys = ys[0:T//1]
X_m = ys
ax.plot(X_m, '.', color = (0.25,1,0))
ax.set_ylim( ymin=ymin, ymax=ymax)
ax.set_xlim( xmin=xmin, xmax=xmax)
ax.set_title(name)
except Exception as e:
try:
ax.set_ylim( ymin=ymin, ymax=ymax)
ax.set_xlim( xmin=xmin, xmax=xmax)
ax.set_title(name)
except:
pass
pass
print(e)
# Lets call the animation function
ani = animation.FuncAnimation(fig, animate, interval=30)
plt.ion()
plt.show()
plt.pause(0.001)
class RPSNET:
def build(width, height, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
model.add(Conv2D(20, (5, 5), padding="same",
input_shape=inputShape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(50, (5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("sigmoid"))
model.add(Dense(classes))
model.add(Activation("sigmoid"))
return model
# TEXT examples
# Hershey Simplex
# Hershey Plain
# Hershey Duplex
# Hershey Complex
# Hershey Triplex
# Hershey Complex Small
# Hershey Script Simplex
# Hershey Script Complex
|
#!/usr/bin/python
# Python script to delete all C++ source and header files from a root directory
import os
import sys
import time
import datetime
import logging
from optparse import OptionParser
FILE_EXTENSIONS = ['.h', '.cpp']
# -----------------------------------------------------------------------------
def step(ext, dirname, names):
'''function executed by walk in every new folder'''
for e in ext:
e = e.lower()
for name in names:
if name.lower().endswith(e):
file_path = os.path.join(dirname, name)
logging.info(file_path)
if not options.dry_run:
os.remove(file_path)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
usage = "%prog - Delete all c++ header and source files in a given root directory."
parser = OptionParser(usage=usage)
parser.add_option("-d", "--root_directory",
action="store", type="string", dest="root_directory",
default="")
parser.add_option("--dry_run",
action="store_true", dest="dry_run", default=False,
help='print output without deleting any files')
parser.add_option("--log_dir",
action="store", type="string", dest="log_dir",
default="/tmp",
help="Logfiles are written into this directory.")
parser.add_option("--log_info_to_file",
action="store_true", dest="log_info_to_file",
default=False)
(options, args) = parser.parse_args()
# Open logfile.
if not options.log_dir or not os.path.exists(options.log_dir):
logging.critical('log_dir must exist: %s', options.log_dir)
sys.exit(-1)
start_time_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
log_directory_name = os.path.join(
options.log_dir, 'DataLoggingMonitor-%s.log' % start_time_str)
FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
if options.log_info_to_file:
logging.basicConfig(filename=log_directory_name,
level=logging.DEBUG, format=FORMAT)
print '===> Running with logfile: %s' % log_directory_name
else:
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
logging.info('Running with configuration:')
logging.info(parser.parse_args())
# check root directory
if not options.root_directory or not os.path.exists(options.root_directory):
logging.critical('root directory does not exist: %s',
options.root_directory)
sys.exit(-1)
# walks through the directory tree, performing the 'step' function at
# every step
os.path.walk(options.root_directory, step, FILE_EXTENSIONS)
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
from gpu_tests import common_browser_args as cba
from gpu_tests import color_profile_manager
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import pixel_test_pages
from gpu_tests import skia_gold_integration_test_base
from py_utils import cloud_storage
from telemetry.util import image_util
_MAPS_PERF_TEST_PATH = os.path.join(path_util.GetChromiumSrcDir(), 'tools',
'perf', 'page_sets', 'maps_perf_test')
_DATA_PATH = os.path.join(path_util.GetChromiumSrcDir(), 'content', 'test',
'gpu', 'gpu_tests')
_TEST_NAME = 'Maps_maps'
class MapsIntegrationTest(
skia_gold_integration_test_base.SkiaGoldIntegrationTestBase):
"""Google Maps pixel tests.
Note: this test uses the same WPR as the smoothness.maps benchmark
in tools/perf/benchmarks. See src/tools/perf/page_sets/maps.py for
documentation on updating the WPR archive.
"""
@classmethod
def Name(cls):
return 'maps'
@classmethod
def SetUpProcess(cls):
options = cls.GetParsedCommandLineOptions()
color_profile_manager.ForceUntilExitSRGB(
options.dont_restore_color_profile_after_test)
super(MapsIntegrationTest, cls).SetUpProcess()
cls.CustomizeBrowserArgs([
cba.FORCE_COLOR_PROFILE_SRGB,
cba.ENSURE_FORCED_COLOR_PROFILE,
])
cloud_storage.GetIfChanged(
os.path.join(_MAPS_PERF_TEST_PATH, 'load_dataset'),
cloud_storage.PUBLIC_BUCKET)
cls.SetStaticServerDirs([_MAPS_PERF_TEST_PATH])
cls.StartBrowser()
@classmethod
def TearDownProcess(cls):
super(cls, MapsIntegrationTest).TearDownProcess()
cls.StopWPRServer()
@classmethod
def GenerateGpuTests(cls, options):
cls.SetParsedCommandLineOptions(options)
# The maps_pixel_expectations.json contain the actual image expectations. If
# the test fails, with errors greater than the tolerance for the run, then
# the logs will report the actual failure.
#
# There will also be a Skia Gold Triage link, this will be used to store the
# artifact of the failure to help with debugging. There are no accepted
# positive baselines recorded in Skia Gold, so its diff will not be
# sufficient to debugging the failure.
yield ('Maps_maps', 'file://performance.html', ())
def RunActualGpuTest(self, url, *_):
tab = self.tab
action_runner = tab.action_runner
action_runner.Navigate(url)
action_runner.WaitForJavaScriptCondition('window.startTest != undefined')
action_runner.EvaluateJavaScript('window.startTest()')
action_runner.WaitForJavaScriptCondition('window.testDone', timeout=320)
# Wait for the page to process immediate work and load tiles.
action_runner.EvaluateJavaScript('''
window.testCompleted = false;
requestIdleCallback(
() => window.testCompleted = true,
{ timeout : 10000 })''')
action_runner.WaitForJavaScriptCondition('window.testCompleted', timeout=30)
screenshot = tab.Screenshot(5)
if screenshot is None:
self.fail('Could not capture screenshot')
dpr = tab.EvaluateJavaScript('window.devicePixelRatio')
print 'Maps\' devicePixelRatio is ' + str(dpr)
expected = _ReadPixelExpectations('maps_pixel_expectations.json')
page = _GetMapsPageForUrl(url, expected)
# The bottom corners of Mac screenshots have black triangles due to the
# rounded corners of Mac windows. So, crop the bottom few rows off now to
# get rid of those. The triangles appear to be 5 pixels wide and tall
# regardless of DPI, so 10 pixels should be sufficient.
if self.browser.platform.GetOSName() == 'mac':
img_height, img_width = screenshot.shape[:2]
screenshot = image_util.Crop(screenshot, 0, 0, img_width, img_height - 10)
x1, y1, x2, y2 = _GetCropBoundaries(screenshot)
screenshot = image_util.Crop(screenshot, x1, y1, x2 - x1, y2 - y1)
self._ValidateScreenshotSamplesWithSkiaGold(tab, page, screenshot, dpr)
@classmethod
def ExpectationsFiles(cls):
return [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_expectations',
'maps_expectations.txt')
]
def _ReadPixelExpectations(expectations_file):
expectations_path = os.path.join(_DATA_PATH, expectations_file)
with open(expectations_path, 'r') as f:
json_contents = json.load(f)
return json_contents
def _GetMapsPageForUrl(url, expected_colors):
page = pixel_test_pages.PixelTestPage(
url=url,
name=_TEST_NAME,
# Exact test_rect is arbitrary, just needs to encapsulate all pixels
# that are tested.
test_rect=[0, 0, 1000, 800],
tolerance=10,
expected_colors=expected_colors)
return page
def _GetCropBoundaries(screenshot):
"""Returns the boundaries to crop the screenshot to.
Specifically, we look for the boundaries where the white background
transitions into the (non-white) content we care about.
Args:
screenshot: A screenshot returned by Tab.Screenshot() (numpy ndarray?)
Returns:
A 4-tuple (x1, y1, x2, y2) denoting the top left and bottom right
coordinates to crop to.
"""
img_height, img_width = screenshot.shape[:2]
def RowIsWhite(row):
for col in xrange(img_width):
pixel = image_util.GetPixelColor(screenshot, col, row)
if pixel.r != 255 or pixel.g != 255 or pixel.b != 255:
return False
return True
def ColumnIsWhite(column):
for row in xrange(img_height):
pixel = image_util.GetPixelColor(screenshot, column, row)
if pixel.r != 255 or pixel.g != 255 or pixel.b != 255:
return False
return True
x1 = y1 = 0
x2 = img_width
y2 = img_height
for column in xrange(img_width):
if not ColumnIsWhite(column):
x1 = column
break
for row in xrange(img_height):
if not RowIsWhite(row):
y1 = row
break
for column in xrange(x1 + 1, img_width):
if ColumnIsWhite(column):
x2 = column
break
for row in xrange(y1 + 1, img_height):
if RowIsWhite(row):
y2 = row
break
return x1, y1, x2, y2
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-labelerodedilate',
version='1.1.1',
author='Richard Beare',
author_email='Richard.Beare@med.monash.edu.au',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/LabelErodeDilate',
description=r'An ITK module for erode and dilate operations on label images',
long_description='LabelErodeDilate provides classes for morphological math'
'erode and dilate operations on label images.\n'
'Please refer to:'
'Beare, R. and Jackway, P.'
'“Parallel Algorithms via Scaled Paraboloid Structuring'
'Functions for Spatially-Variant and Label-Set Dilations'
'and Erosions”, '
'2011 International Conference on Digital Image Computing'
'Techniques and Applications (DICTA). 180--185. 2011. IEEE.\n',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://itk.org/',
install_requires=[
r'itk>=5.0b01'
]
)
|
import sys
import re
for i, x in enumerate(sys.stdin):
val = str(x).strip().lower()
if(re.match(r'(\w*([s])\2+)', val)):
print("hiss")
else:
print("no hiss") |
R, B = map(int, input().split())
x, y = map(int, input().split())
def is_ok(n):
r = R - n
if r < 0:
return False
b = B - n
if b < 0:
return False
return r // (x - 1) + b // (y - 1) >= n
ok = 0
ng = 10 ** 18 + 1
while ng - ok > 1:
m = (ok + ng) // 2
if is_ok(m):
ok = m
else:
ng = m
print(ok)
|
from getpass import getpass
from mysql.connector import connect, Error
try:
# Establishing a connection with MySQL
with connect(
host="localhost",
user=input("Enter username: "),
password=getpass("Enter password: "),
database="online_movie_rating",
) as connection:
# Filtering results
select_movies_query = """
SELECT CONCAT(title, " (", release_year, ")"), rate
FROM movies
ORDER BY rate DESC
"""
with connection.cursor() as cursor:
cursor.execute(select_movies_query)
for movie in cursor.fetchall():
print(movie)
except Error as e:
print(e)
|
from .core import *
from .logger import * |
#!/usr/bin/env python
# $Id: test_language.py 8372 2019-08-27 12:11:15Z milde $
# Authors: Engelbert Gruber <grubert@users.sourceforge.net>;
# David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for language module completeness.
Specify a language code (e.g. "de") as a command-line parameter to test only
that language.
"""
import sys
import os
import re
import DocutilsTestSupport # must be imported before docutils
import docutils.languages
import docutils.parsers.rst.languages
from docutils.parsers.rst import states, directives, roles
import docutils.utils, docutils.frontend
_settings = docutils.frontend.OptionParser().get_default_values()
_reporter = docutils.utils.new_reporter('', _settings)
reference_language = 'en'
if sys.version_info >= (3, 0):
unicode = str # noqa
class LanguageTestSuite(DocutilsTestSupport.CustomTestSuite):
language_module_pattern = re.compile(r'^([a-z]{2,3}(_[a-z]{2,8})*)\.py$')
def __init__(self, languages=None):
DocutilsTestSupport.CustomTestSuite.__init__(self)
if languages:
self.languages = languages
else:
self.get_languages()
def get_languages(self):
"""
Get installed language translations from docutils.languages and from
docutils.parsers.rst.languages.
"""
languages = {}
for mod in (os.listdir(docutils.languages.__path__[0])
+ os.listdir(docutils.parsers.rst.languages.__path__[0])):
match = self.language_module_pattern.match(mod)
if match:
languages[match.group(1)] = 1
self.languages = list(languages.keys())
# test language tag normalization:
self.languages += ['en_gb', 'en_US', 'en-CA', 'de-DE', 'de-AT-1901',
'pt-BR', 'pt-foo-BR']
# test that locally created language files are also loaded.
# requires local_dummy_lang.py in test directory (testroot)
# The local_dummy_lang.py contains all the fields from both
# the docutils language tags and the parser.rst language tags
self.languages += ['local_dummy_lang']
def generateTests(self):
for language in self.languages:
for method in LanguageTestCase.test_methods:
self.addTestCase(LanguageTestCase, method, None, None,
id=language+'.py', language=language)
class LanguageTestCase(DocutilsTestSupport.CustomTestCase):
test_methods = ['test_labels', 'test_bibliographic_fields',
'test_directives', 'test_roles']
"""Names of methods used to test each language."""
def __init__(self, *args, **kwargs):
self.ref = docutils.languages.get_language(reference_language,
_reporter)
self.language = kwargs['language']
del kwargs['language'] # only wanted here
DocutilsTestSupport.CustomTestCase.__init__(self, *args, **kwargs)
def _xor(self, ref_dict, l_dict):
"""
Returns entries that are only in one dictionary.
(missing_in_lang, more_than_in_ref).
"""
missing = [] # in ref but not in l.
too_much = [] # in l but not in ref.
for label in ref_dict.keys():
if label not in l_dict:
missing.append(label)
for label in l_dict.keys():
if label not in ref_dict:
too_much.append(label)
return (missing, too_much)
def _invert(self, adict):
"""Return an inverted (keys & values swapped) dictionary."""
inverted = {}
for key, value in adict.items():
inverted[value] = key
return inverted
def test_labels(self):
try:
module = docutils.languages.get_language(self.language, _reporter)
if not module:
raise ImportError
except ImportError:
self.fail('No docutils.languages.%s module.' % self.language)
missed, unknown = self._xor(self.ref.labels, module.labels)
if missed or unknown:
self.fail('Module docutils.languages.%s.labels:\n'
' Missed: %s; Unknown: %s'
% (self.language, str(missed), str(unknown)))
def test_bibliographic_fields(self):
try:
module = docutils.languages.get_language(self.language, _reporter)
if not module:
raise ImportError
except ImportError:
self.fail('No docutils.languages.%s module.' % self.language)
missed, unknown = self._xor(
self._invert(self.ref.bibliographic_fields),
self._invert(module.bibliographic_fields))
if missed or unknown:
self.fail('Module docutils.languages.%s.bibliographic_fields:\n'
' Missed: %s; Unknown: %s'
% (self.language, str(missed), str(unknown)))
def test_directives(self):
try:
module = docutils.parsers.rst.languages.get_language(
self.language)
if not module:
raise ImportError
except ImportError:
self.fail('No docutils.parsers.rst.languages.%s module.'
% self.language)
failures = []
for d in module.directives.keys():
try:
func, msg = directives.directive(d, module, None)
if not func:
failures.append('"%s": unknown directive' % d)
except Exception as error:
failures.append('"%s": %s' % (d, error))
inverted = self._invert(module.directives)
canonical = sorted(directives._directive_registry.keys())
canonical.remove('restructuredtext-test-directive')
for name in canonical:
if name not in inverted:
failures.append('"%s": translation missing' % name)
if failures:
text = ('Module docutils.parsers.rst.languages.%s:\n %s'
% (self.language, '\n '.join(failures)))
if isinstance(text, unicode):
text = text.encode('raw_unicode_escape')
self.fail(text)
def test_roles(self):
try:
module = docutils.parsers.rst.languages.get_language(
self.language)
if not module:
raise ImportError
module.roles
except ImportError:
self.fail('No docutils.parsers.rst.languages.%s module.'
% self.language)
except AttributeError:
self.fail('No "roles" mapping in docutils.parsers.rst.languages.'
'%s module.' % self.language)
failures = []
for d in module.roles.values():
try:
method = roles._role_registry[d]
#if not method:
# failures.append('"%s": unknown role' % d)
except KeyError as error:
failures.append('"%s": %s' % (d, error))
inverted = self._invert(module.roles)
canonical = sorted(roles._role_registry.keys())
canonical.remove('restructuredtext-unimplemented-role')
for name in canonical:
if name not in inverted:
failures.append('"%s": translation missing' % name)
if failures:
text = ('Module docutils.parsers.rst.languages.%s:\n %s'
% (self.language, '\n '.join(failures)))
if isinstance(text, unicode):
text = text.encode('raw_unicode_escape')
self.fail(text)
languages_to_test = []
def suite():
s = LanguageTestSuite(languages_to_test)
s.generateTests()
return s
def get_language_arguments():
while len(sys.argv) > 1:
last = sys.argv[-1]
if last.startswith('-'):
break
languages_to_test.append(last)
sys.argv.pop()
languages_to_test.reverse()
if __name__ == '__main__':
get_language_arguments()
import unittest
unittest.main(defaultTest='suite')
# vim: set et ts=4 ai :
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Script to demonstrate the use of ciscosparkapi for the people API
The package natively retrieves your Spark access token from the
SPARK_ACCESS_TOKEN environment variable. You must have this environment
variable set to run this script.
"""
# Use future for Python v2 and v3 compatibility
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
__author__ = "Jose Bogarín Solano"
__author_email__ = "jose@bogarin.co.cr"
__contributors__ = ["Chris Lunsford <chrlunsf@cisco.com>"]
__copyright__ = "Copyright (c) 2016-2018 Cisco and/or its affiliates."
__license__ = "MIT"
from ciscosparkapi import CiscoSparkAPI
api = CiscoSparkAPI() # Create a CiscoSparkAPI connection object; uses your SPARK_ACCESS_TOKEN environment variable
# Get my user information
print("Get my information ...")
me = api.people.me()
print(me)
# Get my user information using my id
print("Get my information but using id ...")
me_by_id = api.people.get(me.id)
print(me_by_id)
# Get my user information using id
print("Get the list of people I know...")
people = api.people.list(displayName="Jose") # Creates a generator container (iterable) that lists the people I know
for person in people:
print(person.displayName) # Return the displayName of every person found
|
''' Opening and Reading Files
Syntax to open file.
f-open("myfile.txt)
''' |
import sys
import eann
import gym
import time
from float_binary_conversion import float_to_binary_list, binary_list_to_float
from Preprocessing import preprocess_inputs
env = gym.make('HalfCheetah-v2')
action_len = env.action_space.shape[0]
print("brain input size: " + str(len(preprocess_inputs(env.reset()))))
assert(eann.init("./tensorboard_logs/cart_pole") == 0)
for i in range(10000):
steps = 0
is_done = False
observation = env.reset()
while not is_done:
inputs = []
for i in range(env.observation_space.shape[0]):
inputs += float_to_binary_list(observation[i], 0.05, 5)
start = time.clock()
output = eann.think(inputs)
elapsed = time.clock()
elapsed = elapsed - start
# print("\n" + str(elapsed) + "s")
#print(output[0])
actions = []
output_len = len(output)
if output_len < action_len:
print("eann think output is too short. Please adjust the num_outputs in the Hyperparameters file")
print("The environment takes an action vector of length {}".format(action_len))
sys.exit(-1)
print("eann output: ")
print(output)
for i in range(action_len):
print(action_len)
num_outputs = output_len // action_len
actions.append(binary_list_to_float(output[i*num_outputs:(i+1) * num_outputs], -1, 1))
print(actions)
observation, reward, is_done, info = env.step(actions)
#env.render()
eann.reward(reward)
time.sleep(0.01)
steps += 1
#eann.reset_memory()
print("result: " + str(steps)) |
from app.schema.item import ItemInDatabase
from app.schema.seller import SellerInDatabase
from app.test.client import client
from app.test.sample_data import (
item_1_raw,
item_2_raw,
seller_1_raw,
seller_2_raw,
)
from requests import Response
def create_seller_1() -> tuple[Response, SellerInDatabase]:
response = client.post("/sellers", json=seller_1_raw)
return response, SellerInDatabase(**response.json())
def create_seller_2() -> tuple[Response, SellerInDatabase]:
response = client.post("/sellers", json=seller_2_raw)
return response, SellerInDatabase(**response.json())
def create_item_1(created_seller_id: str) -> tuple[Response, ItemInDatabase]:
response = client.post(
f"/sellers/{created_seller_id}/items", json=item_1_raw.dict()
)
return response, ItemInDatabase(**response.json())
def create_item_2(created_seller_id: str) -> tuple[Response, ItemInDatabase]:
response = client.post(
f"/sellers/{created_seller_id}/items", json=item_2_raw.dict()
)
return response, ItemInDatabase(**response.json())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ozpcenter', '0023_auto_20170629_1323'),
]
operations = [
migrations.AddField(
model_name='review',
name='review_parent',
field=models.ForeignKey(blank=True, null=True, to='ozpcenter.Review'),
),
migrations.AlterUniqueTogether(
name='review',
unique_together=set([('review_parent', 'author', 'listing')]),
),
]
|
import os
import re
import numpy as np
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, \
UnexpectedAlertPresentException, StaleElementReferenceException, \
NoSuchWindowException, WebDriverException
from enum import Enum, auto
from threading import Thread, Event
from typing import Callable, Dict, List, Tuple
_listeners: Dict['OthelloListenerCallback', Callable] = {}
_listeners_cache: Dict['OthelloListenerCallback', Tuple] = {}
class ListenerCallback(Enum):
USER_LOGGED = auto()
IN_ROOM = auto()
IN_GAME = auto()
PLAYERS = auto()
PLAYER_COLOR = auto()
PLAYERS_POINTS = auto()
BOARD = auto()
PLAYERS_TIME = auto()
CURRENT_PLAYER = auto()
GAME_PROGRESS = auto()
IS_FINISHED = auto()
CLOSE = auto()
class ListenerCallbackRegister:
def register_listener(type_: ListenerCallback):
global _listeners
def decorator(function):
_listeners[type_] = function
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except UnexpectedAlertPresentException:
return None
except StaleElementReferenceException:
return None
return wrapper
return decorator
@register_listener(ListenerCallback.USER_LOGGED)
def _user_logged_listener(driver):
try:
driver.find_element_by_xpath('//body[not(contains(@class, "not_logged_user"))]')
return driver.execute_script('return document.getElementById("connected_username").innerText')
except NoSuchElementException:
return None
@register_listener(ListenerCallback.IN_ROOM)
def _in_room_logged_listener(driver):
return bool(re.match(r'.+/table\?table=\d+', driver.current_url))
@register_listener(ListenerCallback.IN_GAME)
def _in_game_logged_listener(driver):
return bool(re.match(r'.+/reversi\?table=\d+', driver.current_url))
@register_listener(ListenerCallback.CURRENT_PLAYER)
def _current_player_logged_listener(driver):
xpath = '//*[@class="emblemwrap" and contains(@style, "display: block;") ' \
'and contains(@id, "active")]/following::div[@class="player-name"]'
try:
element = driver.find_element_by_xpath(xpath)
return element and element.text
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYERS)
def _players_listener(driver):
xpath = '//*[contains(@class, "player-name")]//a'
try:
elements = driver.find_elements_by_xpath(xpath)
return tuple([element.text for element in elements])
except NoSuchElementException:
return None
@register_listener(ListenerCallback.BOARD)
def _board_listener(driver):
try:
board = np.zeros((8, 8), dtype=int)
discs_root = driver.find_element_by_id('discs')
discs = {}
for disc_el in discs_root.find_elements_by_class_name('disc'):
player = -1 if 'disccolor_ffffff' in disc_el.get_attribute('class') else 1
position = disc_el.get_attribute('id').split('_')[1]
position = int(position[1]) - 1, int(position[0]) - 1
board[position[0], position[1]] = player
return board
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYERS_POINTS)
def _points_listener(driver):
try:
players = driver.find_elements_by_xpath('//*[contains(@class, "player-name")]//a')
players = [p.text for p in players]
points = driver.execute_script('return Array.prototype.map.call(document.querySelectorAll(".player_score_value"),(item) => item.innerText)')
points = map(int, points)
return dict(zip(players, points))
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYERS_TIME)
def _players_time_listener(driver):
try:
players = driver.find_elements_by_xpath('//*[contains(@class, "player-name")]//a')
players = [p.text for p in players]
times = driver.execute_script('return Array.prototype.map.call(document.querySelectorAll(".timeToThink"),(item) => item.innerText)')
return dict(zip(players, times))
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYER_COLOR)
def _player_color_listener(driver):
try:
xpath = '//*[contains(@class, "player-name")]//a'
logged_player_style = driver.find_element_by_xpath(xpath).get_attribute('style')
return 1 if logged_player_style == 'color: rgb(0, 0, 0);' else -1
except NoSuchElementException:
return None
@register_listener(ListenerCallback.IS_FINISHED)
def _is_finished_listener(driver):
try:
driver.find_element_by_id('createNew_btn')
return True
except NoSuchElementException:
return None
@register_listener(ListenerCallback.GAME_PROGRESS)
def _game_progress_listener(driver):
try:
element = driver.find_element_by_id('pr_gameprogression')
return element and element.text
except NoSuchElementException:
return None
class OthelloListener(Thread):
HOME_PAGE = 'https://en.boardgamearena.com/account'
def __init__(self):
self._driver = None
self._stop_event = Event()
self._callbacks: Dict[OthelloListenerCallback, List[Callable]] = {}
super().__init__(daemon=True)
def run(self):
options = webdriver.ChromeOptions()
options.add_argument('--lang=en')
if os.name == 'nt':
executable_path = './chromedriver.exe'
else:
executable_path = './chromedriver'
self._driver = webdriver.Chrome(executable_path=executable_path, options=options)
self._driver.get(OthelloListener.HOME_PAGE)
self._listener()
self._driver.quit()
def register_callback(self, type_: 'ListenerCallback', callback: Callable):
if type_ not in self._callbacks:
self._callbacks[type_] = []
self._callbacks[type_].append(callback)
def unregister_callback(self, callback: Callable):
self._callbacks[type_].remove(callback)
def _listener(self):
global _listeners_cache
while not self._stop_event.is_set():
for type_ in ListenerCallback:
if type_ in self._callbacks and type_ in _listeners:
listener = _listeners[type_]
self._driver.implicitly_wait(0)
try:
result = listener(self._driver)
except NoSuchWindowException:
self._stop_event.set()
break
except WebDriverException:
self._stop_event.set()
break
cache_result = _listeners_cache.get(type_)
cache_result = cache_result and cache_result[1]
if isinstance(result, np.ndarray):
results_are_equals = np.all(result == cache_result)
else:
results_are_equals = result == cache_result
callback_params = tuple([type_] + [result])
if result is not None and not results_are_equals:
self._run_callbacks(type_, callback_params)
_listeners_cache[type_] = callback_params
if ListenerCallback.CLOSE in self._callbacks:
self._run_callbacks(ListenerCallback.CLOSE, (ListenerCallback.CLOSE, None))
def _run_callbacks(self, type_: 'ListenerCallback', callback_params):
if type_ in self._callbacks:
for callback in self._callbacks[type_]:
Thread(target=callback, args=callback_params, daemon=True).start()
def callback(event, result):
print(f'Event: {event}. Result: {repr(result)}')
if __name__ == '__main__':
listener = OthelloListener()
listener.start()
listener.register_callback(ListenerCallback.USER_LOGGED, callback)
listener.register_callback(ListenerCallback.IN_ROOM, callback)
listener.register_callback(ListenerCallback.CURRENT_PLAYER, callback)
listener.register_callback(ListenerCallback.BOARD, callback)
listener.register_callback(ListenerCallback.PLAYERS_POINTS, callback)
listener.register_callback(ListenerCallback.PLAYERS, callback)
listener.register_callback(ListenerCallback.PLAYER_COLOR, callback)
listener.register_callback(ListenerCallback.PLAYERS_TIME, callback)
listener.register_callback(ListenerCallback.IS_FINISHED, callback)
while True:
pass
|
import logging
from telebot import types
from botstarter import bot
logging.basicConfig(level=logging.INFO)
bot.init_bot()
@bot.user_handler(commands=["echo"])
def echo_handler(msg, user, **kwargs):
received_msg = msg.text.replace("/echo", "", 1).strip()
if received_msg:
reply = f"Hello {user.first_name}\nEcho: {received_msg}"
else:
reply = "Didn't get any message to echo.\nTry with this command: `/echo ping`"
bot.send_message(
msg.chat.id,
text=reply
)
if __name__ == '__main__':
bot.set_my_commands([
types.BotCommand(command="echo", description="Echo a message back to the user")
])
bot.start()
|
"""exercism armstrong numbers module."""
def is_armstrong_number(number):
"""
Determine if the number provided is an Armstrong Number.
An Armstrong Number is a number that is the sum of its own digits each raised to the power of the number of digits
:param number int - Number to check.
:return bool - Whether the provided number passes the check or not.
>>>is_armstrong_number(9)
True
# because `9 = 9^1 = 9`
>>>is_armstrong_number(10)
False
# because `10 != 1^2 + 0^2 = 1`
"""
number_string = list(str(number))
digit_sum = 0
power = len(number_string)
for digit in number_string:
digit_sum += int(digit) ** power
return digit_sum == number
|
import subprocess
from termcolor import colored
import os
import cPickle as pickle
import argparse
from read_credentials import readCredentials, ssh, scp
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int, default=1800)
parser.add_argument("--end", type=int, default=2009)
args = parser.parse_args()
servers = readCredentials("good_hosts")
with open('file_on_server.pkl', 'r') as f:
server_files = pickle.load(f)
for server, files in server_files.iteritems():
server_files[server] = [x.split('/')[-1] for x in files
if args.start <= int(x.split('/')[-1].split('-')[0]) and args.end >= int(x.split('/')[-1].split('-')[0])]
server_bash_scripts = {}
bash_script = """#!/bin/bash
trap "exit" INT
# model training related
neg_samples=4
threads=8
dim=300
rm -rf train_log
mkdir train_log
# which models to train
declare -a ngram_files=({})
for data in "${{ngram_files[@]}}"; do
echo "$data"
python word2vec_optimized.py --min_count 100 --num_neg_samples $neg_samples --concurrent_steps $threads --embedding_size $dim --train_data=../data/"$data" --eval_data=word2vec/trunk/questions-words.txt --save_path=./data/ > ./train_log/"$data"-output.log
done"""
for server, files in server_files.iteritems():
server_bash_scripts[server] = bash_script.format(' '.join(['"../data/{}"'.format(x) for x in files]))
processes = []
for server, user, passwd in servers:
with open('tmpbash_{}'.format(server), 'w') as f:
f.write(server_bash_scripts[server])
proc = subprocess.popen(scp(server, user, passwd, "tmpbash_{}".format(server), "./ngram/word2vec/run.sh").split(), stdout=subprocess.pipe, stderr=subprocess.pipe)
processes.append((server, proc));
for server, proc in processes:
output, err = proc.communicate()
if proc.returncode != 0:
print colored("{} failed to scp script".format(server), 'red')
subprocess.check_output("rm tmpbash_*", shell=true)
# start training
processes = []
for server, user, passwd in servers:
proc = subprocess.Popen('{} "cd ./ngram/word2vec;bash run.sh"'.format(ssh(server, user, passwd)), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processes.append((server, proc));
for server, proc in processes:
output, err = proc.communicate()
if proc.returncode != 0:
print colored("{} failed to run script".format(server), 'red')
print colored("Error: {}".format(output), 'red')
|
from setuptools import setup, find_packages
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
long_description = "\n" + fh.read()
VERSION = "0.1.1"
DESCRIPTION = "Notification/Alert for django users"
LONG_DESCRIPTION = (
"A package that allows to build simple async alert system using websocket."
)
# Setting up
setup(
name="ibalert",
version=VERSION,
author="Ideabreed Technology (Milann Malla)",
author_email="<hello@itsmilann.com>",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
packages=find_packages(),
install_requires=["channels", "channels-redis", "coloredlogs"],
keywords=["python", "websocket", "channels", "python-alerts", "ideabreed"],
project_urls={
"channels-alert": "https://github.com/ItsMilann/channels-alert/tree/release",
},
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
)
|
from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
import mplfinance as mpf
api_key = "WA57b7Xw7jhd5P1t78Z3gj6AuB8D9iSgbaZJEs16TjyJCfw2ds8mIUJdQDqmAVXG"
api_secret = "9m2mU7iWLWS2v0q2rgFzkdyGx3gSUIMi1n6Sx9ItBInWL1y7Cxl6Tf5A2AwTYzA7"
client = Client(api_key, api_secret)
###### get ticker for current prices
# tickers = client.get_all_tickers()
# ticker_df = pd.DataFrame(tickers)
# ticker_df.set_index('symbol', inplace=True)
# print(ticker_df.head())
##### get market depth for particular pair
# depth = client.get_order_book(symbol='BTCUSDT')
# depth_df = pd.DataFrame(depth['bids'])
# depth_df.columns = ['Price', 'Volume']
# print(depth_df.head)
##### get historical data
historical = client.get_historical_klines('BTCUSDT', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume',
'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df['Open Time'] = pd.to_datetime(hist_df['Open Time']/1000, unit='s')
hist_df['Close Time'] = pd.to_datetime(hist_df['Close Time']/1000, unit='s')
numeric_columns = ['Open', 'High', 'Low', 'Close', 'Volume', 'Quote Asset Volume', 'TB Base Volume', 'TB Quote Volume']
hist_df[numeric_columns] = hist_df[numeric_columns].apply(pd.to_numeric, axis=1)
print(hist_df.shape)
# print(hist_df.describe())
# print(hist_df.info())
#### visualize the data
mpf.plot(hist_df.set_index('Close Time').tail(1020),
type='candle', style='charles',
volume=True,
title='ETHBTC Last 1020 Days',
mav=(10,20,30))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
import pip
from pip.req import parse_requirements
from setuptools import find_packages, setup, Command
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.install import install
from setuptools.extension import Extension
# Package meta-data.
NAME = 'pytranus'
DESCRIPTION = 'Python Tranus (Lcal module) '
URL = 'https://gitlab.inria.fr/tcapelle/Tranus_Python'
EMAIL = 'thomascapelle@gmail.com'
AUTHOR = 'Thomas Capelle'
# What packages are required for this module to be executed?
install_reqs = parse_requirements('./requirements.txt', session=False)
reqs = [str(ir.req) for ir in install_reqs]
class OverrideInstall(install):
"""
Emulate sequential install of pip install -r requirements.txt
To fix numpy bug in scipy, scikit in py2
https://github.com/scikit-learn/scikit-learn/issues/4164
"""
def run(self):
for req in reqs:
pip.main(["install", req])
# Extensions
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in
# file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
class PublishCommand(Command):
"""Support setup.py publish."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except IOError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
setup_requires=[
'cython>=0.23.4',
'numpy>=1.10.4',
# setuptools 18.0 properly handles Cython extensions
'setuptools>=18.0'
],
install_requires=reqs,
ext_modules=[
Extension("pytranus.pylcal.utils.DX",
["pytranus/pylcal/utils/DX.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp']
)
],
include_package_data=True,
license='ISC',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'build_ext': build_ext,
'install': OverrideInstall,
'publish': PublishCommand
},
)
|
import gdb
global path
path = [];
def is_container(v):
c = v.type.code
return (c == gdb.TYPE_CODE_STRUCT or c == gdb.TYPE_CODE_UNION)
def is_pointer(v):
return (v.type.code == gdb.TYPE_CODE_PTR)
def print_struct_follow_pointers(s, file, level = 0, counter = 0):
indent = ' '
if not is_container(s):
if counter != 0:
file.write('%s | ' % (s,))
return
if counter != 0:
file.write('%s%s%s [ shape=record, label=\"' % (indent,s.type, counter))
for k in s.type.keys():
v = s[k]
if is_pointer(v):
if counter != 0:
file.write('%s%s | ' % (indent, k))
try:
v1 = v.dereference()
v1.fetch_lazy()
except gdb.error:
continue
path.append(v1)
elif is_container(v):
if counter != 0:
file.write('%s | ' % (indent, k))
path.append(v)
else:
if counter != 0:
file.write('%s<%s>%s | ' % (indent, k, v))
if counter != 0:
file.write('%s\"];\n' % (indent,))
if len(path) > 0:
temp_nm = path.pop()
print_struct_follow_pointers(temp_nm, file, level + 1, counter + 1)
class PrintStructFollowPointers(gdb.Command):
'''
visualize STRUCT-VALUE
'''
def __init__(self):
super(PrintStructFollowPointers, self).__init__(
'visualize',
gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, False)
def invoke(self, arg, from_tty):
try:
v = gdb.parse_and_eval(arg)
except gdb.error, e:
raise gdb.GdbError(e.message)
f = open('raw.txt', 'w+')
f.write("digraph {\n")
f.write("graph[rankdir=\"LR\"];\n")
f.write("node[shape=record];\n")
print_struct_follow_pointers(v, f)
f.write('}')
f.close()
gdb.write("Saved as raw file\n")
PrintStructFollowPointers() |
"""
Kombu modules.
"""
import logging
import socket
import ssl
from mtb.modules import md5_hash
from mtb.string import get_uuid
import auth.credential as credential
from messaging.message import Message
from amqpclt import common
from amqpclt.errors import AmqpcltError
LOGGER = logging.getLogger("amqpclt")
class KombuAdapter(object):
""" Kombu library adapter. """
direction = "unknown"
def __init__(self, config):
""" Initialize Kombu adapter """
self._kombu = __import__("kombu")
self._config = config
self._connection = None
self._channel = None
self._exchange = dict()
self._queue = dict()
self._bind = dict()
def _maybe_declare_exchange(self, exch_props):
""" May be declare an exchange. """
exch_name = exch_props.get("name", "")
if (exch_name and
not exch_name.startswith("amq.") and
exch_name not in self._exchange):
LOGGER.debug(
"declaring %s exchange: %s" % (self.direction, exch_name))
self._exchange[exch_name] = 1
self._channel.exchange_declare(
exchange=exch_name,
durable=exch_props.get("durable", False),
type=exch_props.get("type", "direct"),
auto_delete=exch_props.get("auto_delete", False),
arguments=exch_props.get("arguments", dict())
)
return exch_name
def _maybe_declare_queue(self, queue_props):
""" May be declare a queue. """
queue_name = queue_props.get("name", "")
if queue_name and queue_name in self._queue:
return queue_name
if not queue_name:
queue_name = get_uuid()
self._channel.queue_declare(
queue=queue_name,
durable=queue_props.get("durable", False),
exclusive=queue_props.get("exclusive", False),
auto_delete=queue_props.get("auto_delete", False),
arguments=queue_props.get("arguments", dict()))
# if not queue_name:
# # amq generated queue
# queue_name = result.method.queue
LOGGER.debug("incoming queue declared: %s" % (queue_name, ))
self._queue[queue_name] = 1
return queue_name
def _maybe_bind(self, queue_name, exchange, routing_key=""):
""" May be bind a queue to an exchange. """
bind_id = md5_hash(queue_name + exchange + routing_key).hexdigest()
if bind_id in self._bind:
return
LOGGER.debug(
"binding incoming queue: queue=%s, exchange=%s, routing_key=%s" %
(queue_name, exchange, routing_key))
self._channel.queue_bind(queue=queue_name,
exchange=exchange,
routing_key=routing_key)
self._bind[bind_id] = 1
def connect(self):
""" Create a kombu AMQP connection and channel. """
direction = self.direction
config = self._config[direction]["broker"]
params = common.parse_amqp_uri(config["uri"])
cred = config.get("auth")
if cred is None:
cred = credential.new(scheme="none")
if cred.scheme == "x509":
ssl_options = dict()
cred_dict = cred.dict()
for key, keyval in {"cert": "certfile",
"key": "keyfile", "ca": "ca_certs"}.items():
if key in cred_dict:
ssl_options[keyval] = cred_dict[key]
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3
extra = {"ssl": ssl_options,
"transport_options": {"login_method": "EXTERNAL"}}
elif cred.scheme == "plain":
extra = {
"userid": cred['name'],
"password": cred['pass'], }
else:
# none
extra = dict()
# if self._config.get("heartbeat") is not None:
# extra["heartbeat"] = self._config["heartbeat"]
parameters = {
"hostname": params["host"],
"port": int(params["port"]),
"transport": "amqplib",
"virtual_host": params.get("virtual_host", "rabbitmq")
}
timeout_connect = self._config.get("timeout-connect")
if timeout_connect is not None:
parameters["connect_timeout"] = timeout_connect
parameters.update(extra)
self._connection = connection = self._kombu.BrokerConnection(
**parameters)
self._channel = connection.channel()
LOGGER.debug(
"%s broker %s:%s: %s" %
(direction, params['host'], params['port'], self.server_type(),))
if self._config.get("%s-broker-type" % direction) is None:
self._config["%s-broker-type" % direction] = self.server_type()
return True
def server_type(self):
""" Return the broker type. """
if self._connection is None:
return None
return "RabbitMQ"
class KombuIncomingBroker(KombuAdapter):
""" Kombu incoming broker object. """
direction = "incoming"
def __init__(self, config):
""" Initialize kombu incoming broker module. """
super(KombuIncomingBroker, self).__init__(config)
self._msgbuf = list()
self._pending = list()
self._consume = dict()
def _maybe_subscribe(self, subscription):
""" May be subscribe to queue. """
if "queue" in subscription:
queue_name = self._maybe_declare_queue(subscription["queue"])
else:
raise AmqpcltError("subscription must contain a queue")
exchange_name = None
if "exchange" in subscription:
exchange_name = self._maybe_declare_exchange(
subscription["exchange"])
if exchange_name:
self._maybe_bind(queue_name,
exchange_name,
subscription.get("routing_key", ""))
if queue_name not in self._consume:
LOGGER.debug("incoming consume from queue: %s" % (queue_name, ))
tag = get_uuid()
params = {"callback": self._handle_message,
"no_ack": False,
"queue": queue_name,
"consumer_tag": tag}
if not self._config["reliable"]:
params["no_ack"] = True
self._channel.basic_consume(**params)
self._consume[queue_name] = tag
def _handle_message(self, msg):
""" Handle delivery. """
self._msgbuf.append((msg.delivery_info,
msg.properties, msg.body))
def _drain_events(self, timeout=0.1):
""" Drain events. """
try:
self._connection.drain_events(timeout=timeout)
except socket.timeout:
pass
except socket.error:
pass
def start(self):
""" Start the incoming broker module. """
self.connect()
if self._config.get("prefetch") >= 0:
self._channel.basic_qos(
prefetch_count=int(self._config["prefetch"]),
prefetch_size=0,
a_global=False)
subscribe = self._config.get("subscribe", [])
for sub in subscribe:
self._maybe_subscribe(sub)
def get(self):
""" Get a message. """
if len(self._msgbuf) == 0:
self._drain_events()
if len(self._msgbuf) == 0:
return "no messages received", None
(info, header, body) = self._msgbuf.pop(0)
if header.get("content_type") is not None and \
(header["content_type"].startswith("text/") or
"charset=" in header["content_type"]):
body = body.decode("utf-8")
headers = header["application_headers"]
for header_name, header_value in headers.items():
try:
headers[header_name] = header_value.encode("utf-8")
except UnicodeDecodeError:
headers[header_name] = header_value.decode("utf-8")
msg = Message(header=headers, body=body)
if self._config["reliable"]:
self._pending.append(info.get("delivery_tag"))
return msg, info.get("delivery_tag")
else:
return msg, None
def ack(self, delivery_tag):
""" Ack a message. """
LOGGER.debug("acking incoming message: %d" % (delivery_tag, ))
self._channel.basic_ack(delivery_tag=delivery_tag)
self._pending.remove(delivery_tag)
def idle(self):
""" Idle. """
self._drain_events()
def stop(self):
""" Stop. """
self._channel.close()
self._connection.close()
self._connection = None
class KombuOutgoingBroker(KombuAdapter):
""" Kombu outgoing broker object. """
direction = "outgoing"
def __init__(self, config):
""" Initialize the kombu outgoing broker module. """
super(KombuOutgoingBroker, self).__init__(config)
self._producer = None
def start(self):
""" Start the kombu outgoing broker module. """
self.connect()
self._producer = self._kombu.Producer(self._channel)
def put(self, msg, msg_id=None):
""" Put a message. """
delivery_mode = 1
if msg.header.get("persistent", "false") == "true":
delivery_mode = 2
# Send the message
if "destination" not in msg.header:
raise AmqpcltError("message doesn't have a destination: %s" % msg)
destination = common.parse_sender_destination(
msg.header["destination"])
if "queue" in destination:
queue_name = self._maybe_declare_queue(destination["queue"])
exch_name = self._maybe_declare_exchange(
destination.get("exchange", dict()))
if exch_name and "queue" in destination:
self._maybe_bind(queue_name,
exch_name,
destination.get("routing_key", ""))
extra = dict()
if "content-type" in msg.header:
content_type = msg.header["content-type"]
if content_type.startswith("text/") or \
"charset=" in content_type:
if not msg.is_text():
raise AmqpcltError("unexpected text content-type "
"for binary message: %s" % content_type)
else:
if msg.is_text():
raise AmqpcltError("unexpected binary content-type for "
"text message: %s" % content_type)
extra["content_type"] = content_type
elif msg.is_text():
extra["content_type"] = "text/unknown"
self._producer.publish(
exchange=exch_name,
routing_key=destination.get("routing_key", ""),
delivery_mode=delivery_mode,
serializer=None,
compression=None,
headers=msg.header,
body=msg.body, **extra)
if msg_id is None:
return list()
else:
return [msg_id, ]
def idle(self):
""" Idle. """
return list()
def stop(self):
""" Stop. """
self._connection.close()
self._channel = None
self._connection = None
|
## Send a Single Email to a Single Recipient
# import os
# import json
# from sendgrid import SendGridAPIClient
# from sendgrid.helpers.mail import Mail, From, To, Subject, PlainTextContent, HtmlContent, SendGridException
# message = Mail(from_email=From('dx@sendgrid.com', 'DX'),
# to_emails=To('elmer.thomas@sendgrid.com', 'Elmer Thomas'),
# subject=Subject('Sending with SendGrid is Fun'),
# plain_text_content=PlainTextContent('and easy to do anywhere, even with Python'),
# html_content=HtmlContent('<strong>and easy to do anywhere, even with Python</strong>'))
# try:
# sendgrid_client = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# print(json.dumps(message.get(), sort_keys=True, indent=4))
# response = sendgrid_client.send(message=message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
# except SendGridException as e:
# print(e.message)
# # Send a Single Email to Multiple Recipients
# import os
# import json
# from sendgrid import SendGridAPIClient
# from sendgrid.helpers.mail import Mail, From, To, Subject, PlainTextContent, HtmlContent, SendGridException
# to_emails = [
# To('elmer.thomas@sendgrid.com', 'Elmer SendGrid'),
# To('elmer.thomas@gmail.com', 'Elmer Thomas')
# ]
# message = Mail(from_email=From('dx@sendgrid.com', 'DX'),
# to_emails=to_emails,
# subject=Subject('Sending with SendGrid is Fun'),
# plain_text_content=PlainTextContent('and easy to do anywhere, even with Python'),
# html_content=HtmlContent('<strong>and easy to do anywhere, even with Python</strong>'))
# try:
# sendgrid_client = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# print(json.dumps(message.get(), sort_keys=True, indent=4))
# response = sendgrid_client.send(message=message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
# except SendGridException as e:
# print(e.message)
# # Send Multiple Emails to Multiple Recipients
# import os
# import json
# from sendgrid import SendGridAPIClient
# from sendgrid.helpers.mail import Mail, From, To, Subject, PlainTextContent, HtmlContent, SendGridException, Substitution
# import time
# import datetime
# to_emails = [
# To(email='elmer.thomas@sendgrid.com',
# name='Elmer SendGrid',
# substitutions={
# Substitution('-name-', 'Elmer SendGrid'),
# Substitution('-github-', 'http://github.com/ethomas'),
# },
# subject=Subject('Override Global Subject')),
# To(email='elmer.thomas@gmail.com',
# name='Elmer Thomas',
# substitutions={
# Substitution('-name-', 'Elmer Thomas'),
# Substitution('-github-', 'http://github.com/thinkingserious'),
# })
# ]
# ts = time.time()
# global_substitutions = Substitution('-time-', datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))
# message = Mail(from_email=From('dx@sendgrid.com', 'DX'),
# to_emails=to_emails,
# subject=Subject('Hi -name-'),
# plain_text_content=PlainTextContent('Hello -name-, your github is -github-, email sent at -time-'),
# html_content=HtmlContent('<strong>Hello -name-, your github is <a href=\"-github-\">here</a></strong> email sent at -time-'),
# global_substitutions=global_substitutions,
# is_multiple=True)
# try:
# sendgrid_client = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# print(json.dumps(message.get(), sort_keys=True, indent=4))
# response = sendgrid_client.send(message=message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
# except SendGridException as e:
# print(e.message)
# Kitchen Sink - an example with all settings used
import os
import json
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, From, To, Cc, Bcc, Subject, PlainTextContent, HtmlContent, SendGridException, Substitution, Header
import time
import datetime
message = Mail()
message.to = To('elmer+test1@sendgrid.com', 'Example User1')
message.to = [
To('elmer+test2@sendgrid.com', 'Example User2'),
To('elmer+test3@sendgrid.com', 'Example User3')
]
message.cc = Cc('test4@example.com', 'Example User4')
message.cc = [
Cc('test5@example.com', 'Example User5'),
Cc('test6@example.com', 'Example User6')
]
message.bcc = Bcc('test7@example.com', 'Example User7')
message.bcc = [
Bcc('test8@example.com', 'Example User8'),
Bcc('test9@example.com', 'Example User9')
]
# message.header = Header('X-Test1', 'Test1')
# message.header = Header('X-Test2', 'Test2')
# message.header = [
# Header('X-Test3', 'Test3'),
# Header('X-Test4', 'Test4')
# ]
try:
sendgrid_client = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
print(json.dumps(message.get(), sort_keys=True, indent=4))
# response = sendgrid_client.send(message=message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
except SendGridException as e:
print(e.message)
# ToDo
## The Mail constructor should also support passing in tuples and strings
|
#!/usr/bin/env python3
#
# Based on http://www.prooffreader.com/2014/05/graphing-distribution-of-english.html
#
import colorsys
import matplotlib.pyplot as plt
ALPHABET = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
BUCKETS_COUNT = 720
FILENAME = 'data/ru.txt'
def color(current, minimum, maximum):
s = (current - minimum) / (maximum - minimum)
r, g, b = colorsys.hsv_to_rgb(1.0, s, 1.0)
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
if __name__ == '__main__':
stat = {c: [0] * BUCKETS_COUNT for c in ALPHABET}
with open(FILENAME, 'r') as f:
for line in f:
lowered_stripped_line = line.strip().lower()
for i, c in enumerate(lowered_stripped_line):
if c not in ALPHABET:
continue
part = BUCKETS_COUNT // len(lowered_stripped_line)
for j in range(i * part, (i + 1) * part):
stat[c][j] += 1
l = [sum(i) for i in stat.values()]
min_ = min(l)
max_ = max(l)
for c in ALPHABET:
fig = plt.figure(figsize=(10, 10))
plt.xticks([])
plt.yticks([])
plt.fill_between(
range(BUCKETS_COUNT),
stat[c],
facecolor=color(sum(stat[c]), min_, max_)
)
fig.savefig('result/%s.png' % c)
plt.close(fig)
|
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from scipy.spatial import distance
import scipy.sparse as ss
class DataInput(object):
def __init__(self, params:dict):
self.params = params
def load_data(self):
prov_day_data = ss.load_npz(self.params['input_dir'] + '/od_day20180101_20210228.npz')
prov_day_data_dense = np.array(prov_day_data.todense()).reshape((-1, 47, 47))
OD_DAYS = [date.strftime('%Y-%m-%d') for date in pd.date_range(start='2020-01-01', end='2021-02-28', freq='1D')] # 425 days
data = prov_day_data_dense[-len(OD_DAYS):, :, :, np.newaxis]
OD_data = np.log(data + 1.0) # log transformation
print(OD_data.shape)
if self.params['norm']=='none':
pass
elif self.params['norm']=='minmax':
OD_data = self.minmax_normalize(OD_data)
elif self.params['norm']=='std':
OD_data = self.std_normalize(OD_data)
else:
raise ValueError
# return a dict
dataset = dict()
dataset['OD'] = OD_data
dataset['adj'] = np.load(self.params['input_dir'] + '/adjacency_matrix.npy')
dataset['O_dyn_G'], dataset['D_dyn_G'] = self.construct_dyn_G(data) # use unnormalized OD
return dataset
def construct_dyn_G(self, OD_data:np.array, perceived_period:int=7): # construct dynamic graphs based on OD history
train_len = int(OD_data.shape[0] * self.params['split_ratio'][0] / sum(self.params['split_ratio']))
num_periods_in_history = train_len // perceived_period # dump the remainder
OD_history = OD_data[:num_periods_in_history * perceived_period, :,:,:]
O_dyn_G, D_dyn_G = [], []
for t in range(perceived_period):
OD_t_avg = np.mean(OD_history[t::perceived_period,:,:,:], axis=0).squeeze(axis=-1)
O, D = OD_t_avg.shape
O_G_t = np.zeros((O, O)) # initialize O graph at t
for i in range(O):
for j in range(O):
O_G_t[i, j] = distance.cosine(OD_t_avg[i,:], OD_t_avg[j,:]) # eq (6)
D_G_t = np.zeros((D, D)) # initialize D graph at t
for i in range(D):
for j in range(D):
D_G_t[i, j] = distance.cosine(OD_t_avg[:,i], OD_t_avg[j,:]) # eq (7)
O_dyn_G.append(O_G_t), D_dyn_G.append(D_G_t)
return np.stack(O_dyn_G, axis=-1), np.stack(D_dyn_G, axis=-1)
def minmax_normalize(self, x:np.array): # normalize to [0, 1]
self._max, self._min = x.max(), x.min()
print('min:', self._min, 'max:', self._max)
x = (x - self._min) / (self._max - self._min)
return x
def minmax_denormalize(self, x:np.array):
x = (self._max - self._min) * x + self._min
return x
def std_normalize(self, x:np.array): # normalize to N(0, 1)
self._mean, self._std = x.mean(), x.std()
print('mean:', round(self._mean, 4), 'std:', round(self._std, 4))
x = (x - self._mean)/self._std
return x
def std_denormalize(self, x:np.array):
x = x * self._std + self._mean
return x
class ODDataset(Dataset):
def __init__(self, inputs:dict, output:torch.Tensor, mode:str, mode_len:dict, obs_len:int):
self.mode = mode
self.mode_len = mode_len
self.inputs, self.output = self.prepare_xy(inputs, output)
self.obs_len = obs_len
def __len__(self):
return self.mode_len[self.mode]
def __getitem__(self, item:int): # item: time index in current mode
O_G_t, D_G_t = self.timestamp_query(self.inputs['O_dyn_G'], self.inputs['D_dyn_G'], item)
return self.inputs['x_seq'][item], self.output[item], O_G_t, D_G_t # dynamic graph shape: (batch, N, N)
def timestamp_query(self, O_dyn_G:torch.Tensor, D_dyn_G:torch.Tensor, t:int, perceived_period:int=7): # for dynamic graph at t
# get y's timestamp relative to initial timestamp of the dataset
if self.mode == 'train':
timestamp = self.obs_len + t
elif self.mode == 'validate':
timestamp = self.obs_len + self.mode_len['train'] + t
else: # test
timestamp = self.obs_len + self.mode_len['train'] + self.mode_len['validate'] + t
key = timestamp % perceived_period
O_G_t, D_G_t = O_dyn_G[:,:,key], D_dyn_G[:,:,key]
return O_G_t, D_G_t
def prepare_xy(self, inputs:dict, output:torch.Tensor):
if self.mode == 'train':
start_idx = 0
elif self.mode == 'validate':
start_idx = self.mode_len['train']
else: # test
start_idx = self.mode_len['train']+self.mode_len['validate']
x = dict()
x['x_seq'] = inputs['x_seq'][start_idx : (start_idx + self.mode_len[self.mode])]
x['O_dyn_G'], x['D_dyn_G'] = inputs['O_dyn_G'], inputs['D_dyn_G']
y = output[start_idx : start_idx + self.mode_len[self.mode]]
return x, y
class DataGenerator(object):
def __init__(self, obs_len:int, pred_len, data_split_ratio:tuple):
self.obs_len = obs_len
self.pred_len = pred_len
self.data_split_ratio = data_split_ratio
def split2len(self, data_len:int):
mode_len = dict()
mode_len['validate'] = int(self.data_split_ratio[1]/sum(self.data_split_ratio) * data_len)
mode_len['test'] = int(self.data_split_ratio[2]/sum(self.data_split_ratio) * data_len)
mode_len['train'] = data_len - mode_len['validate'] - mode_len['test']
return mode_len
def get_data_loader(self, data:dict, params:dict):
x_seq, y_seq = self.get_feats(data['OD'])
feat_dict = dict()
feat_dict['x_seq'] = torch.from_numpy(np.asarray(x_seq)).float().to(params['GPU'])
feat_dict['O_dyn_G'], feat_dict['D_dyn_G'] = torch.from_numpy(data['O_dyn_G']).float(), torch.from_numpy(data['D_dyn_G']).float()
y_seq = torch.from_numpy(np.asarray(y_seq)).float().to(params['GPU'])
mode_len = self.split2len(data_len=y_seq.shape[0])
data_loader = dict() # data_loader for [train, validate, test]
for mode in ['train', 'validate', 'test']:
dataset = ODDataset(inputs=feat_dict, output=y_seq,
mode=mode, mode_len=mode_len, obs_len=self.obs_len)
data_loader[mode] = DataLoader(dataset=dataset, batch_size=params['batch_size'], shuffle=False)
# data loading default: single-processing | for multi-processing: num_workers=pos_int or pin_memory=True (GPU)
# data_loader multi-processing
return data_loader
def get_feats(self, data:np.array):
x, y = [], []
for i in range(self.obs_len, data.shape[0]-self.pred_len):
x.append(data[i-self.obs_len : i])
y.append(data[i : i+self.pred_len])
return x, y
|
from collections import OrderedDict
import hashlib
import json
import logging
import requests
import urllib.parse
import uuid
from django import forms
from django.conf import settings
from django.core import signing
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.translation import pgettext, ugettext_lazy as _
from requests import HTTPError
from pretix.base.decimal import round_decimal
from pretix.base.models import Event, OrderPayment, OrderRefund
from pretix.base.payment import BasePaymentProvider, PaymentException
from pretix.base.settings import SettingsSandbox
from pretix.multidomain.urlreverse import build_absolute_uri
logger = logging.getLogger(__name__)
class SaferpaySettingsHolder(BasePaymentProvider):
identifier = 'saferpay'
verbose_name = _('Saferpay')
is_enabled = False
is_meta = True
def __init__(self, event: Event):
super().__init__(event)
self.settings = SettingsSandbox('payment', 'saferpay', event)
@property
def settings_form_fields(self):
fields = [
('endpoint',
forms.ChoiceField(
label=_('Endpoint'),
initial='live',
choices=(
('live', pgettext('saferpay', 'Live')),
('test', pgettext('saferpay', 'Testing')),
),
)),
('api_user',
forms.CharField(
label=_('API Username'),
)),
('api_pass',
forms.CharField(
label=_('API Password'),
)),
('customer_id',
forms.CharField(
label=_('Customer ID'),
)),
('terminal_id',
forms.CharField(
label=_('Terminal ID'),
)),
]
d = OrderedDict(
fields + [
('method_visa',
forms.BooleanField(
label=_('VISA'),
required=False,
)),
('method_mastercard',
forms.BooleanField(
label=_('MasterCard'),
required=False,
)),
('method_diners',
forms.BooleanField(
label=_('Diners'),
required=False,
)),
('method_jcb',
forms.BooleanField(
label=_('JCB'),
required=False,
)),
('method_amex',
forms.BooleanField(
label=_('American Express'),
required=False,
)),
('method_bancontact',
forms.BooleanField(
label=_('Bancontact'),
required=False,
)),
('method_eprzelewy',
forms.BooleanField(
label=_('ePrzelewy'),
required=False,
)),
('method_eps',
forms.BooleanField(
label=_('eps'),
required=False,
)),
('method_giropay',
forms.BooleanField(
label=_('giropay'),
required=False,
)),
('method_ideal',
forms.BooleanField(
label=_('iDEAL'),
required=False,
)),
# Disabled because we couldn't test the flow which is documented as being different than the others
# (i.e. it has a payment state and uses callbacks)
#('method_paydirekt',
# forms.BooleanField(
# label=_('paydirekt'),
# required=False,
# )),
('method_paypal',
forms.BooleanField(
label=_('PayPal'),
required=False,
)),
('method_postfinance_card',
forms.BooleanField(
label=_('PostFinance Card'),
required=False,
)),
('method_postfinance_efinance',
forms.BooleanField(
label=_('PostFinance eFinance'),
required=False,
)),
('method_sepadebit',
forms.BooleanField(
label=_('SEPA Direct Debit'),
required=False,
)),
('method_sofort',
forms.BooleanField(
label=_('Sofort'),
required=False,
)),
] + list(super().settings_form_fields.items())
)
d.move_to_end('_enabled', last=False)
return d
class SaferpayMethod(BasePaymentProvider):
method = ''
abort_pending_allowed = False
refunds_allowed = True
cancel_flow = True
payment_methods = []
def __init__(self, event: Event):
super().__init__(event)
self.settings = SettingsSandbox('payment', 'saferpay', event)
@property
def settings_form_fields(self):
return {}
@property
def identifier(self):
return 'saferpay_{}'.format(self.method)
@property
def is_enabled(self) -> bool:
return self.settings.get('_enabled', as_type=bool) and self.settings.get('method_{}'.format(self.method),
as_type=bool)
def payment_refund_supported(self, payment: OrderPayment) -> bool:
return self.refunds_allowed
def payment_partial_refund_supported(self, payment: OrderPayment) -> bool:
return self.refunds_allowed
def payment_prepare(self, request, payment):
return self.checkout_prepare(request, None)
def payment_is_valid_session(self, request: HttpRequest):
return True
def payment_form_render(self, request) -> str:
template = get_template('pretix_saferpay/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_confirm_render(self, request) -> str:
template = get_template('pretix_saferpay/checkout_payment_confirm.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'provider': self}
return template.render(ctx)
def payment_can_retry(self, payment):
return self._is_still_available(order=payment.order)
def payment_pending_render(self, request, payment) -> str:
if payment.info:
payment_info = json.loads(payment.info)
else:
payment_info = None
template = get_template('pretix_saferpay/pending.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'provider': self,
'order': payment.order,
'payment': payment,
'payment_info': payment_info,
}
return template.render(ctx)
def payment_control_render(self, request, payment) -> str:
if payment.info:
payment_info = json.loads(payment.info)
if 'amount' in payment_info:
payment_info['amount'] /= 10 ** settings.CURRENCY_PLACES.get(self.event.currency, 2)
else:
payment_info = None
template = get_template('pretix_saferpay/control.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'payment_info': payment_info,
'payment': payment,
'method': self.method,
'provider': self,
}
return template.render(ctx)
def execute_refund(self, refund: OrderRefund):
d = refund.payment.info_data
try:
if self.cancel_flow and refund.amount == refund.payment.amount:
if 'Id' not in d:
raise PaymentException(_('The payment has not been captured successfully and can therefore not be '
'refunded.'))
req = self._post('Payment/v1/Transaction/Cancel', json={
"RequestHeader": {
"SpecVersion": "1.10",
"CustomerId": self.settings.customer_id,
"RequestId": str(uuid.uuid4()),
"RetryIndicator": 0
},
"TransactionReference": {
"TransactionId": d.get('Id')
}
})
if req.status_code == 200:
refund.info = req.text
refund.save(update_fields=['info'])
refund.done()
try:
err = req.json()
except:
req.raise_for_status()
else:
if err['ErrorName'] not in ('ACTION_NOT_SUPPORTED', 'TRANSACTION_ALREADY_CAPTURED', 'TRANSACTION_IN_WRONG_STATE'):
req.raise_for_status()
if 'CaptureId' not in d:
raise PaymentException(_('The payment has not been captured successfully and can therefore not be '
'refunded.'))
req = self._post('Payment/v1/Transaction/Refund', json={
"RequestHeader": {
"SpecVersion": "1.10",
"CustomerId": self.settings.customer_id,
"RequestId": str(uuid.uuid4()),
"RetryIndicator": 0
},
"Refund": {
"Amount": {
"Value": str(self._decimal_to_int(refund.amount)),
"CurrencyCode": self.event.currency
},
"OrderId": "{}-{}-R-{}".format(self.event.slug.upper(), refund.order.code, refund.local_id),
"Description": "Order {}-{}".format(self.event.slug.upper(), refund.order.code),
},
"CaptureReference": {
"CaptureId": d.get('CaptureId')
}
})
req.raise_for_status()
refund.info_data = req.json()
refund.save(update_fields=['info'])
if refund.info_data['Transaction'].get('Status') == 'AUTHORIZED':
req = self._post('Payment/v1/Transaction/Capture', json={
"RequestHeader": {
"SpecVersion": "1.10",
"CustomerId": self.settings.customer_id,
"RequestId": str(uuid.uuid4()),
"RetryIndicator": 0
},
"TransactionReference": {
"TransactionId": refund.info_data['Transaction'].get('Id')
}
})
req.raise_for_status()
data = req.json()
if data['Status'] == 'CAPTURED':
refund.order.log_action('pretix_saferpay.event.paid')
trans = refund.info_data
trans['Transaction']['Status'] = 'CAPTURED'
trans['Transaction']['CaptureId'] = data['CaptureId']
refund.info = json.dumps(trans)
refund.save(update_fields=['info'])
refund.done()
except HTTPError:
logger.exception('Saferpay error: %s' % req.text)
try:
refund.info_data = req.json()
except:
refund.info_data = {
'error': True,
'detail': req.text
}
refund.state = OrderRefund.REFUND_STATE_FAILED
refund.save()
refund.order.log_action('pretix.event.order.refund.failed', {
'local_id': refund.local_id,
'provider': refund.provider,
'data': refund.info_data
})
raise PaymentException(_('We had trouble communicating with Saferpay. Please try again and get in touch '
'with us if this problem persists.'))
@property
def test_mode_message(self):
if self.settings.endpoint == 'test':
return _('The Saferpay plugin is operating in test mode. No money will actually be transferred.')
return None
def _post(self, endpoint, *args, **kwargs):
r = requests.post(
'https://{env}.saferpay.com/api/{ep}'.format(
env='www' if self.settings.get('endpoint') == 'live' else 'test',
ep=endpoint,
),
auth=(self.settings.get('api_user'), self.settings.get('api_pass')),
*args, **kwargs
)
return r
def _get(self, endpoint, *args, **kwargs):
r = requests.get(
'https://{env}.saferpay.com/api/{ep}'.format(
env='www' if self.settings.get('endpoint') == 'live' else 'test',
ep=endpoint,
),
auth=(self.settings.get('api_user'), self.settings.get('api_pass')),
*args, **kwargs
)
return r
def get_locale(self, language):
saferpay_locales = {
'de', 'en', 'fr', 'da', 'cs', 'es', 'et', 'hr', 'it', 'hu', 'lv', 'lt', 'nl', 'nn',
'pl', 'pt', 'ru', 'ro', 'sk', 'sl', 'fi', 'sv', 'tr', 'el', 'ja', 'zh'
}
if language[:2] in saferpay_locales:
return language[:2]
return 'en'
def _amount_to_decimal(self, cents):
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
return round_decimal(float(cents) / (10 ** places), self.event.currency)
def _decimal_to_int(self, amount):
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
return int(amount * 10 ** places)
def _get_payment_page_init_body(self, payment):
b = {
"RequestHeader": {
"SpecVersion": "1.10",
"CustomerId": self.settings.customer_id,
"RequestId": str(uuid.uuid4()),
"RetryIndicator": 0,
"ClientInfo": {
"ShopInfo": "pretix",
}
},
"TerminalId": self.settings.terminal_id,
"Payment": {
"Amount": {
"Value": str(self._decimal_to_int(payment.amount)),
"CurrencyCode": self.event.currency
},
"OrderId": "{}-{}-P-{}".format(self.event.slug.upper(), payment.order.code, payment.local_id),
"Description": "Order {}-{}".format(self.event.slug.upper(), payment.order.code),
"PayerNote": "{}-{}".format(self.event.slug.upper(), payment.order.code),
},
"PaymentMethods": self.payment_methods,
"Payer": {
"LanguageCode": self.get_locale(payment.order.locale),
},
"ReturnUrls": {
"Success": build_absolute_uri(self.event, 'plugins:pretix_saferpay:return', kwargs={
'order': payment.order.code,
'payment': payment.pk,
'hash': hashlib.sha1(payment.order.secret.lower().encode()).hexdigest(),
'action': 'success'
}),
"Fail": build_absolute_uri(self.event, 'plugins:pretix_saferpay:return', kwargs={
'order': payment.order.code,
'payment': payment.pk,
'hash': hashlib.sha1(payment.order.secret.lower().encode()).hexdigest(),
'action': 'fail'
}),
"Abort": build_absolute_uri(self.event, 'plugins:pretix_saferpay:return', kwargs={
'order': payment.order.code,
'payment': payment.pk,
'hash': hashlib.sha1(payment.order.secret.lower().encode()).hexdigest(),
'action': 'abort'
}),
},
"Notification": {
"NotifyUrl": build_absolute_uri(self.event, 'plugins:pretix_saferpay:webhook', kwargs={
'payment': payment.pk,
}),
},
"BillingAddressForm": {
"Display": False
},
"DeliveryAddressForm": {
"Display": False
}
}
return b
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
req = self._post('Payment/v1/PaymentPage/Initialize', json=self._get_payment_page_init_body(payment))
req.raise_for_status()
except HTTPError:
logger.exception('Saferpay error: %s' % req.text)
try:
payment.info_data = req.json()
except:
payment.info_data = {
'error': True,
'detail': req.text
}
payment.state = OrderPayment.PAYMENT_STATE_FAILED
payment.save()
payment.order.log_action('pretix.event.order.payment.failed', {
'local_id': payment.local_id,
'provider': payment.provider,
'data': payment.info_data
})
raise PaymentException(_('We had trouble communicating with Saferpay. Please try again and get in touch '
'with us if this problem persists.'))
data = req.json()
payment.info = json.dumps(data)
payment.state = OrderPayment.PAYMENT_STATE_CREATED
payment.save()
request.session['payment_saferpay_order_secret'] = payment.order.secret
return self.redirect(request, data.get('RedirectUrl'))
def redirect(self, request, url):
if request.session.get('iframe_session', False) and self.method in ('paypal', 'sofort', 'giropay', 'paydirekt'):
signer = signing.Signer(salt='safe-redirect')
return (
build_absolute_uri(request.event, 'plugins:pretix_saferpay:redirect') + '?url=' +
urllib.parse.quote(signer.sign(url))
)
else:
return str(url)
def shred_payment_info(self, obj: OrderPayment):
if not obj.info:
return
d = json.loads(obj.info)
if 'details' in d:
d['details'] = {
k: '█' for k in d['details'].keys()
}
d['_shredded'] = True
obj.info = json.dumps(d)
obj.save(update_fields=['info'])
class SaferpayCC(SaferpayMethod):
method = 'creditcard'
verbose_name = _('Credit card via Saferpay')
public_name = _('Credit card')
@property
def payment_methods(self):
payment_methods = []
if self.settings.get('method_visa', as_type=bool):
payment_methods.append("VISA")
if self.settings.get('method_diners', as_type=bool):
payment_methods.append("DINERS")
if self.settings.get('method_jcb', as_type=bool):
payment_methods.append("JCB")
if self.settings.get('method_mastercard', as_type=bool):
payment_methods.append("MASTERCARD")
return payment_methods
@property
def is_enabled(self) -> bool:
return self.settings.get('_enabled', as_type=bool) and self.payment_methods
class SaferpayBancontact(SaferpayMethod):
method = 'bancontact'
verbose_name = _('Bancontact via Saferpay')
public_name = _('Bancontact')
payment_methods = ["BANCONTACT"]
class SaferpayBanktransfer(SaferpayMethod):
method = 'eprzelewy'
verbose_name = _('ePrzelewy via Saferpay')
public_name = _('ePrzelewy')
payment_methods = ["EPRZELEWY"]
class SaferpayEPS(SaferpayMethod):
method = 'eps'
verbose_name = _('EPS via Saferpay')
public_name = _('eps')
refunds_allowed = False
cancel_flow = False
payment_methods = ["EPS"]
class SaferpayGiropay(SaferpayMethod):
method = 'giropay'
verbose_name = _('giropay via Saferpay')
public_name = _('giropay')
refunds_allowed = False
cancel_flow = False
payment_methods = ["GIROPAY"]
class SaferpayIdeal(SaferpayMethod):
method = 'ideal'
verbose_name = _('iDEAL via Saferpay')
public_name = _('iDEAL')
refunds_allowed = False
cancel_flow = False
payment_methods = ["IDEAL"]
class SaferpayPaydirekt(SaferpayMethod):
method = 'paydirekt'
verbose_name = _('paydirekt via Saferpay')
public_name = _('paydirekt')
payment_methods = ["PAYDIREKT"]
class SaferpayPayPal(SaferpayMethod):
method = 'paypal'
verbose_name = _('PayPal via Saferpay')
public_name = _('PayPal')
payment_methods = ["PAYPAL"]
class SaferpayPostfinanceCard(SaferpayMethod):
method = 'postfinance_card'
verbose_name = _('PostFinance Card via Saferpay')
public_name = _('PostFinance Card')
payment_methods = ["POSTCARD"]
class SaferpayPostfinanceEfinance(SaferpayMethod):
method = 'postfinance_card'
verbose_name = _('PostFinance eFinance via Saferpay')
public_name = _('PostFinance eFinance')
payment_methods = ["POSTFINANCE"]
class SaferpaySepadebit(SaferpayMethod):
method = 'sepadebit'
verbose_name = _('SEPA Direct Debit via Saferpay')
public_name = _('SEPA Direct Debit')
refunds_allowed = False
payment_methods = ["DIRECTDEBIT"]
class SaferpaySofort(SaferpayMethod):
method = 'sofort'
verbose_name = _('Sofort via Saferpay')
public_name = _('Sofort')
refunds_allowed = False
cancel_flow = False
payment_methods = ["SOFORT"]
|
# -*- coding: utf-8 -*-
from data.reader import wiki_from_pickles, corpus_to_pickle
from filtering.speaker_restriction import filter_speaker_restrict
import argparse
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("--lang", type=str)
p.add_argument("--n_tokens", type=int)
p.add_argument("--hist_len", type=int,
help="The history length for the sampling constraint.")
args = p.parse_args()
return args.lang, args.n_tokens, args.hist_len
if __name__ == "__main__":
lang, n, hist_len = parse_args()
m = 10
wiki = list(wiki_from_pickles("data/"+lang+"_pkl"))
sents = [s for a in wiki for s in a]
for m_i in range(m):
print("started ", m_i, flush=True)
filtered = list(filter_speaker_restrict(sents, n, hist_len))
print("filtered ", m_i, flush=True)
name = "_".join((str(n), str(hist_len), str(m_i)))
corpus_to_pickle(filtered, "results/" + lang + "/SRF", name) |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import logging
import traceback
from thrift.async_common import (
AsyncioRpcConnectionContext,
FramedProtocol,
THeaderProtocol,
ThriftHeaderClientProtocolBase,
TReadWriteBuffer,
WrappedTransport,
)
from thrift.server.TServer import TServerEventHandler
from thrift.Thrift import (
TException,
TProcessor,
)
__all__ = [
"ThriftAsyncServerFactory",
"ThriftClientProtocolFactory",
"ThriftServerProtocolFactory",
]
logger = logging.getLogger(__name__)
#
# Thrift server support
#
async def ThriftAsyncServerFactory(
processor,
*,
interface=None,
port=0,
loop=None,
nthreads=None,
sock=None,
backlog=100,
ssl=None,
event_handler=None,
protocol_factory=None
):
"""
ThriftAsyncServerFactory(processor) -> asyncio.Server
asyncio.Server factory for Thrift processors. In the spirit of "there
should be one obvious way to do it", this server only supports the new
THeader protocol.
If `interface` is None (the default), listens on all interfaces. `port` is
0 by default, which makes the OS allocate the port. Enumerate the returned
server's "sockets" attribute to know the port in this case.
If not given, the default event loop is used. If `nthreads` is given, the
default executor for the event loop is set to a thread pool of up to
`nthreads`.
ssl is an instance of ssl.SSLContext. If None (default) or False SSL/TLS is
not used.
event_handler must be a subclass of thrift.server.TServer. If None,
thrift.server.TServer.TServerEventHandler is used. Specify a custom handler
for custom event handling (e.g. handling new connections)
protocol_factory is a function that takes a triplet of
(processor, event_handler, loop=None) and returns a `asyncio.Protocol` instance
that will be passed to a call to `asyncio.create_server`. processor will be a
subclass of `TProcessor`, event_handler will be a subclass of `TServer`, and
loop is an `Optional[asyncio.AbstractEventLoop]`. If protocol_factory is None
`ThriftHeaderServerProtocol` is used.
Notes about the processor method handling:
1. By default all methods are executed synchronously on the event loop.
This can lead to poor performance if a single run takes long to process.
2. Mark coroutines with `async def` if you wish to use `await`
to call async services, schedule tasks with customized executors, etc.
3. Mark methods with @run_on_thread if you wish to run them on the thread
pool executor. Note that unless you're accessing C extensions which free
the GIL, this is not going to win you any performance.
Use this to initialize multiple servers asynchronously::
loop = asyncio.get_event_loop()
servers = [
ThriftAsyncServerFactory(handler1, port=9090, loop=loop),
ThriftAsyncServerFactory(handler2, port=9091, loop=loop),
]
loop.run_until_complete(asyncio.wait(servers))
try:
loop.run_forever() # Servers are initialized now
finally:
for server in servers:
server.close()
"""
if loop is None:
loop = asyncio.get_event_loop()
if not isinstance(processor, TProcessor):
try:
processor = processor._processor_type(processor, loop=loop)
except AttributeError:
raise TypeError(
"Unsupported processor type: {}".format(type(processor)),
)
if nthreads:
from concurrent.futures import ThreadPoolExecutor
loop.set_default_executor(
ThreadPoolExecutor(max_workers=nthreads),
)
ehandler = TServerEventHandler() if event_handler is None else event_handler
protocol_factory = protocol_factory or ThriftHeaderServerProtocol
pfactory = functools.partial(protocol_factory, processor, ehandler, loop)
server = await loop.create_server(
pfactory,
interface,
port,
sock=sock,
backlog=backlog,
ssl=ssl,
)
if server.sockets:
for socket in server.sockets:
ehandler.preServe(socket.getsockname())
return server
def ThriftServerProtocolFactory(processor, server_event_handler, loop=None):
return functools.partial(
ThriftHeaderServerProtocol,
processor,
server_event_handler,
loop,
)
class ThriftHeaderServerProtocol(FramedProtocol):
def __init__(self, processor, server_event_handler, loop=None):
super().__init__(loop=loop)
self.processor = processor
self.server_event_handler = server_event_handler
self.server_context = None
async def message_received(self, frame):
# Note: we are using a single `prot` for in and out so that
# we can support legacy clients that only understand FRAMED.
# The discovery of what the client supports happens in iprot's
# transport so we have to reuse a single one here.
buf = TReadWriteBuffer(frame)
prot = THeaderProtocol(buf)
try:
await self.processor.process(
prot,
prot,
self.server_context,
)
msg = buf.getvalue()
if len(msg) > 0:
self.transport.write(msg)
except TException as e:
logger.warning("TException while processing request: %s", str(e))
msg = buf.getvalue()
if len(msg) > 0:
self.transport.write(msg)
except asyncio.CancelledError:
self.transport.close()
except BaseException as e:
logger.error("Exception while processing request: %s", str(e))
logger.error(traceback.format_exc())
self.transport.close()
def connection_made(self, transport):
self.upgrade_transport(transport)
def upgrade_transport(self, transport):
self.transport = transport
socket = self.transport.get_extra_info("socket")
if socket is not None:
self.server_context = AsyncioRpcConnectionContext(socket)
self.server_event_handler.newConnection(self.server_context)
def connection_lost(self, exc):
self.server_event_handler.connectionDestroyed(self.server_context)
#
# Thrift client support
#
def ThriftClientProtocolFactory(
client_class,
loop=None,
timeouts=None,
client_type=None,
):
return functools.partial(
ThriftHeaderClientProtocol,
client_class,
loop,
timeouts,
client_type,
)
class SenderTransport(WrappedTransport):
async def _send(self):
while True:
msg = await self._queue.get()
self._clean_producers()
self._trans.write(msg)
class ThriftHeaderClientProtocol(ThriftHeaderClientProtocolBase):
async def timeout_task(self, fname, seqid, delay):
await asyncio.sleep(delay, loop=self.loop)
self._handle_timeout(fname, seqid)
def wrapAsyncioTransport(self, asyncio_transport):
return SenderTransport(asyncio_transport, self, self.loop)
|
#!/usr/bin/env python3
from aws_cdk import core
from dockerpipeline.docker_pipeline import DockerPipelineConstruct
from fluxcd.fluxcd_construct import FluxcdConstruct
from cluster.cluster_construct import ClusterConstruct
import os
git_auth_user = os.environ["GIT_AUTH_USER"]
git_auth_key = os.environ["GIT_AUTH_KEY"]
app = core.App()
name = app.node.try_get_context("name")
region = app.node.try_get_context("region")
aws_env = core.Environment(region=region)
stack = core.Stack(scope=app,id=f"{name}-stack",env=aws_env)
cluster_construct = ClusterConstruct(
scope=stack,
id=f"{name}-cluster",
cluster_name=f"{name}-cluster"
)
fluxcd_docker_pipeline = DockerPipelineConstruct(
scope=stack,
id=f"{name}-docker-pipeline"
)
fluxcd_construct = FluxcdConstruct(
scope=stack,
id=f"{name}-fluxcd",
git_user=git_auth_user,
git_password=git_auth_key,
eks_base_cluster=cluster_construct.cluster
)
app.synth() |
"""Add audit_events type, object and created_at indexes
Revision ID: 280_switch_g7_framework_to_open
Revises: 270_add_audit_events_indexes
Create Date: 2015-09-01 13:45:44.886576
"""
# revision identifiers, used by Alembic.
revision = '280_switch_g7_framework_to_open'
down_revision = '270_add_audit_events_indexes'
from alembic import op
def upgrade():
op.execute("UPDATE frameworks SET status='open' WHERE name='G-Cloud 7'")
def downgrade():
pass
|
from dataclasses import dataclass, field
from datetime import datetime
from typing import ClassVar, Set
import marshmallow # type: ignore
from marshmallow import fields, post_load # type: ignore
from ..json import CamelCaseSchema, Serializable
from ..util import normalize_datetime
class DatasetSchema(CamelCaseSchema):
id = fields.String()
int_id = fields.Integer()
name = fields.String()
@post_load
def make(self, data, **kwargs):
return Dataset(**data)
@dataclass(frozen=True)
class Dataset(Serializable):
"""
A representation of a Dataset on the Pennsieve data platform.
"""
__schema__: ClassVar[DatasetSchema] = DatasetSchema(unknown=marshmallow.EXCLUDE)
PUBLIC: ClassVar[Set[str]] = set(["id", "int_id", "name"])
id: str
int_id: int
name: str
|
DEBUG = False
SITE_ID = 3
STATIC_ROOT = '/home/navin/webapps/{{project_name}}t_staticx'
STATIC_URL = 'http://static.test.{{project_name}}.com/'
# Here you can over-ride ADMINS, MANAGERS
# And pretty much everything else
EMAIL_HOST = 'smtp.webfaction.com'
EMAIL_HOST_USER = 'navin_{{project_name}}'
SERVER_EMAIL = 'info@{{project_name}}.com'
DEFAULT_FROM_EMAIL = 'info@{{project_name}}.com'
# If rabbitmq
# BROKER_HOST, BROKER_PORT, BROKER_USER, BROKER_VHOST, BROKER_ROUTING_KEY
EXTRA_APPS = ()
EXTRA_MIDDLEWARE = ()
EXTRA_CONTEXT_PROCESSORS = () # these are template_context_processors
DATABASE_ROUTERS = ()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20160206_1539'),
]
operations = [
migrations.RenameField(
model_name='servicearea',
old_name='polygon',
new_name='area',
),
]
|
'''
Dimensions in Arrays
* A dimension in arrays is one level of array depth (nested arrays).
* nested array: are arrays that have arrays as their elements.
0-D Arrays
* 0-D arrays, or Scalars, are the elements in an array. Each value in an array is a 0-D array.
1-D Arrays
* An array that has 0-D arrays as its elements is called uni-dimensional or 1-D array.
2-D Arrays
* An array that has 1-D arrays as its elements is called a 2-D array.
3-D Arrays
* An array that has 2-D arrays (matrices) as its elements is called 3-D array.
Check Number of Dimensions?
* NumPy Arrays provides the ndim attribute that returns an integer that tells us how many dimensions the array have.
'''
import numpy as np
# Create a 0-D array with value 42
arr0 = np.array(42)
print(arr0)
print(type(arr0))
print('Dimensión: ', arr0.ndim)
print('\n')
# Create a 1-D array containing the values 1,2,3,4,5:
arr1 = np.array([1, 2, 3, 4, 5])
print(arr1)
print(type(arr1))
print('Dimensión: ', arr1.ndim)
print('\n')
# Create a 2-D array containing two arrays with the values 1,2,3 and 4,5,6:
arr2 = np.array([[1, 2, 3], [4, 5, 6]])
print(arr2)
print(type(arr2))
print('Dimensión: ', arr2.ndim)
print('\n')
# Create a 3-D array with two 2-D arrays, both containing two arrays with the values 1,2,3 and 4,5,6:
arr3 = np.array([ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]] ])
print(arr3)
print(type(arr3))
print('Dimensión: ', arr3.ndim)
|
'''
Given a positive integer n, find the least number of perfect square numbers
(for example, 1, 4, 9, 16, ...) which sum to n.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
'''
class SolutionDP(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square_nums = [i**2 for i in range(0, int(math.sqrt(n))+1)]
dp = [float('inf')] * (n+1)
# bottom case
dp[0] = 0
for i in range(1, n+1):
for square in square_nums:
if i < square:
break
dp[i] = min(dp[i], dp[i-square] + 1)
return dp[-1]
class SolutionGreedy:
def numSquares(self, n):
def is_divided_by(n, count):
"""
return: true if "n" can be decomposed into "count" number of perfect square numbers.
e.g. n=12, count=3: true.
n=12, count=2: false
"""
if count == 1:
return n in square_nums
for k in square_nums:
if is_divided_by(n - k, count - 1):
return True
return False
square_nums = set([i * i for i in range(1, int(n**0.5)+1)])
for count in range(1, n+1):
if is_divided_by(n, count):
return count |
def grasp2dict(res):
pred = {
"x": res.grasp.center.x,
"y": res.grasp.center.y,
"angle": res.grasp.angle,
"q": res.q_value,
"approachAxis": [int(res.grasp.approach_axis[0]), int(res.grasp.approach_axis[1]), int(res.grasp.approach_axis[2])],
"axis": [res.grasp.axis[0], res.grasp.axis[0]],
"width": res.grasp.width,
"depth": res.grasp.depth,
"approachAngle": res.grasp.approach_angle
}
return pred |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'video_maker_prompt_ui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_VideoMakerPrompt(object):
def setupUi(self, VideoMakerPrompt):
VideoMakerPrompt.setObjectName("VideoMakerPrompt")
VideoMakerPrompt.resize(357, 150)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(VideoMakerPrompt.sizePolicy().hasHeightForWidth())
VideoMakerPrompt.setSizePolicy(sizePolicy)
VideoMakerPrompt.setMinimumSize(QtCore.QSize(357, 150))
VideoMakerPrompt.setMaximumSize(QtCore.QSize(357, 151))
VideoMakerPrompt.setSizeIncrement(QtCore.QSize(336, 0))
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(VideoMakerPrompt)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.line_4 = QtWidgets.QFrame(VideoMakerPrompt)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.horizontalLayout_2.addWidget(self.line_4)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.line_3 = QtWidgets.QFrame(VideoMakerPrompt)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_2.addWidget(self.line_3)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout_75 = QtWidgets.QHBoxLayout()
self.horizontalLayout_75.setObjectName("horizontalLayout_75")
self.label_2 = QtWidgets.QLabel(VideoMakerPrompt)
self.label_2.setMinimumSize(QtCore.QSize(0, 27))
self.label_2.setMaximumSize(QtCore.QSize(16777215, 27))
self.label_2.setObjectName("label_2")
self.horizontalLayout_75.addWidget(self.label_2)
self.verticalLayout_2.addLayout(self.horizontalLayout_75)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.line_5 = QtWidgets.QFrame(VideoMakerPrompt)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.verticalLayout_2.addWidget(self.line_5)
self.horizontalLayout_76 = QtWidgets.QHBoxLayout()
self.horizontalLayout_76.setObjectName("horizontalLayout_76")
self.yes_button = QtWidgets.QPushButton(VideoMakerPrompt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.yes_button.sizePolicy().hasHeightForWidth())
self.yes_button.setSizePolicy(sizePolicy)
self.yes_button.setMinimumSize(QtCore.QSize(80, 27))
self.yes_button.setMaximumSize(QtCore.QSize(80, 27))
self.yes_button.setToolTip("")
self.yes_button.setObjectName("yes_button")
self.horizontalLayout_76.addWidget(self.yes_button)
self.no_button = QtWidgets.QPushButton(VideoMakerPrompt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.no_button.sizePolicy().hasHeightForWidth())
self.no_button.setSizePolicy(sizePolicy)
self.no_button.setMinimumSize(QtCore.QSize(80, 27))
self.no_button.setMaximumSize(QtCore.QSize(80, 27))
self.no_button.setToolTip("")
self.no_button.setObjectName("no_button")
self.horizontalLayout_76.addWidget(self.no_button)
self.verticalLayout_2.addLayout(self.horizontalLayout_76)
self.line_2 = QtWidgets.QFrame(VideoMakerPrompt)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_2.addWidget(self.line_2)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.line = QtWidgets.QFrame(VideoMakerPrompt)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout_2.addWidget(self.line)
self.retranslateUi(VideoMakerPrompt)
QtCore.QMetaObject.connectSlotsByName(VideoMakerPrompt)
def retranslateUi(self, VideoMakerPrompt):
_translate = QtCore.QCoreApplication.translate
VideoMakerPrompt.setWindowTitle(_translate("VideoMakerPrompt", "𝕀mage𝔼vo"))
self.label_2.setText(_translate("VideoMakerPrompt", "Start Video Maker?"))
self.yes_button.setText(_translate("VideoMakerPrompt", "Yes"))
self.no_button.setText(_translate("VideoMakerPrompt", "No"))
|
#-*- coding: utf-8 -*-
"""
Copyright (C) 2013 Roman Bondarenko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
from argparse import ArgumentParser
from EVBE.EVBFile import EVBFile
from EVBE.EVBContainer import Container
from aplib import aPLib
__version__ = '1.0.9'
__author__ = 'Roman Bondarenko'
__contact__ = 'roman@reu.org.ua'
if __name__ == '__main__':
parser = ArgumentParser(description='Enigma Virtual Box Extractor v%s, by Roman Bondarenko' % __version__)
parser.add_argument('file', help='input file')
parser.add_argument('-e', action='store_true', help='extract data')
parser.add_argument('-o', dest='output_directory',
help='path to folder into which the data will be retrieved (default: <file name>_data)')
args = parser.parse_args()
file_ = EVBFile(args.file)
if not file_.read():
sys.exit(2)
container = Container(file_.data, file_.offset)
if container.read_header():
if not args.e:
container.info()
else:
offset = 0
output = args.output_directory
if output is None:
output = args.file[:-4] + '_data'
if not os.path.exists(output):
os.mkdir(output)
for fs in container.read_data():
name = os.path.join(output, fs['name'])
if not 'offset' in fs.keys():
if not os.path.exists(name):
os.mkdir(name)
else:
with open(name, 'wb') as f:
offset, size = fs['offset'], fs['size']
try:
data = file_.data[offset:offset + size]
offset += size
if container.header.compress_files == 1:
a = aPLib(data)
data = a.depack()
except Exception as e:
print(e)
else:
f.write(data) |
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from pxr import Usd, UsdSkel, UsdGeom, Vt, Sdf
import unittest
class TestUsdSkelSkinningQuery(unittest.TestCase):
def test_JointInfluences(self):
"""Tests interpretation of joint influences."""
testFile = "jointInfluences.usda"
stage = Usd.Stage.Open(testFile)
rootPath = "/JointInfluences"
cache = UsdSkel.Cache()
root = UsdSkel.Root(stage.GetPrimAtPath(rootPath))
assert cache.Populate(root)
def _GetSkinningQuery(path):
return cache.GetSkinningQuery(stage.GetPrimAtPath(path))
assert not _GetSkinningQuery(
rootPath+"/ErrorCases/MismatchedInterpolation")
assert not _GetSkinningQuery(
rootPath+"/ErrorCases/InvalidInterpolation1")
assert not _GetSkinningQuery(
rootPath+"/ErrorCases/InvalidInterpolation2")
assert not _GetSkinningQuery(
rootPath+"/ErrorCases/MismatchedElementSize")
assert not _GetSkinningQuery(
rootPath+"/ErrorCases/InvalidElementSize")
#
# Validate error cases during ComputeJointInfluences()
#
query = _GetSkinningQuery(
rootPath+"/ErrorCases/InvalidArraySize1")
assert query
assert not query.ComputeJointInfluences()
assert not query.ComputeVaryingJointInfluences(10)
query = _GetSkinningQuery(
rootPath+"/ErrorCases/InvalidArraySize2")
assert query
assert not query.ComputeJointInfluences()
assert not query.ComputeVaryingJointInfluences(10)
#
# The remaining cases should all be valid.
#
query = _GetSkinningQuery(rootPath+"/RigidWeights")
assert query
assert query.IsRigidlyDeformed()
assert query.GetNumInfluencesPerComponent() == 3
influences = query.ComputeJointInfluences()
assert influences
indices,weights = influences
assert indices == Vt.IntArray([1,2,3])
assert weights == Vt.FloatArray([5,6,7])
influences = query.ComputeVaryingJointInfluences(3)
assert influences
indices, weights = influences
assert indices == Vt.IntArray([1,2,3,1,2,3,1,2,3])
assert weights == Vt.FloatArray([5,6,7,5,6,7,5,6,7])
query = _GetSkinningQuery(rootPath+"/NonRigidWeights")
assert query
assert not query.IsRigidlyDeformed()
assert query.GetNumInfluencesPerComponent() == 2
influences = query.ComputeJointInfluences()
assert influences
indices,weights = influences
assert indices == Vt.IntArray([1,2,3,4])
assert weights == Vt.FloatArray([5,6,7,8])
varyingInfluences = query.ComputeVaryingJointInfluences(2)
assert influences == varyingInfluences
def test_Skinning(self):
testFile = "skinning.usda"
stage = Usd.Stage.Open(testFile)
UsdSkel.BakeSkinning(stage.Traverse())
stage.GetRootLayer().Export("skinning.skinned.usda")
if __name__ == "__main__":
unittest.main()
|
from PIL import Image
import json
import os
from django.conf import settings
from django.db import transaction
from django.http.response import JsonResponse
from dataCRUD.forms import ImgForm
from dataCRUD.models import ImageMetadata, Dataset, WorkspaceDataset
from common.models import UserWorkspace, Workspace
from modules.annotation import DEFAULT_ANNO
IMG_DIR_PATH = getattr(settings, 'IMG_DIR_PATH', None)
def create_image(request, target):
try:
titles = request.POST.getlist('title')
files = request.FILES.getlist('file')
form = ImgForm(request.POST, request.FILES)
if form.is_valid():
for i, filename in enumerate(titles):
image = files[i]
image = Image.open(image)
width = image.width
height = image.height
file_loc = IMG_DIR_PATH + str(target.id) + "/" + filename
url_loc = str(target.id) + "/" + filename
image.save(file_loc)
ImageMetadata.objects.create(annotation=DEFAULT_ANNO, image_url="https://oasys.ml/res/img/" +
url_loc, image_name=filename, image_size=str(width)+" "+str(height), dataset_id=target.id)
return JsonResponse({"type": "image_upload", "success": True}, json_dumps_params={'indent': 2})
except Exception as e:
print(e)
return JsonResponse({"type": "image_upload", "success": False, "error_msg": str(e)}, json_dumps_params={'indent': 2})
def create_dataset(request, target):
submit = json.loads(request.body.decode("utf-8"))
try:
with transaction.atomic():
same_name = Dataset.objects.filter(name=submit["name"])
if len(same_name) >= 1:
return JsonResponse({"type": "dataset_create", "success": False, "error_msg": "The name already exists."},
json_dumps_params={'indent': 2})
new_dataset = Dataset.objects.create(
name=submit["name"], local_flag=0)
WorkspaceDataset.objects.create(
workspace=target.id, dataset=new_dataset.id)
directory = IMG_DIR_PATH + str(target.id)
if not os.path.exists(directory):
os.makedirs(directory)
return JsonResponse({"type": "dataset_create", "success": True},
json_dumps_params={'indent': 2})
except Exception as e:
return JsonResponse({"type": "dataset_create", "success": False, "error_msg": str(e)}, json_dumps_params={'indent': 2})
def create_workspace(request, target):
submit = json.loads(request.body.decode("utf-8"))
try:
with transaction.atomic():
same_name = Workspace.objects.filter(workspace_name=submit["name"])
if len(same_name) >= 1:
return JsonResponse({"type": "workspace_create", "success": False, "error_msg": "The name already exists."},
json_dumps_params={'indent': 2})
new_workspace = Workspace.objects.create(
workspace_name=submit["name"], user=target.id)
UserWorkspace.objects.create(
user=target.id, workspace=new_workspace.id)
return JsonResponse({"type": "workspace_create", "success": True},
json_dumps_params={'indent': 2})
except Exception as e:
return JsonResponse({"type": "workspace_create", "success": False, "error_msg": str(e)}, json_dumps_params={'indent': 2})
|
"""
MVPD - Linear Regression Model + L2 Regularization
"""
import sklearn
from sklearn import linear_model
def L2_LR(ROI_1_train, ROI_2_train, ROI_1_test, ROI_2_test, alpha):
"""
Build a linear regression model with L2 regularization.
"""
# initialize and fit model on training data
ridgereg = linear_model.Ridge(alpha)
ridgereg.fit(ROI_1_train, ROI_2_train)
# predict on test set
predict_ROI_2_test = ridgereg.predict(ROI_1_test)
err_LR = predict_ROI_2_test - ROI_2_test
# coefficients and intercept
coef = ridgereg.coef_
intercept = ridgereg.intercept_
return predict_ROI_2_test, err_LR
|
# coding: utf-8
# train_numeric.csv -
# train_date.csv
# In[1]:
# Import the packages
import pandas as pd
import numpy as np
from sklearn import cross_validation
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import *
from sklearn.ensemble import *
from sklearn import grid_search
from sklearn import preprocessing
# In[2]:
DataPath='C:\\Users\\Jameel shaik\\Downloads\\Dezyre\\HackerDay\\Bosch Performance line\\'
# In[3]:
train_numeric=pd.read_csv(DataPath+'train_numeric.csv',nrows=10000)
train_date=pd.read_csv(DataPath+'train_date.csv',nrows=10000)
#train_cat=pd.read_csv(DataPath+'train_cat.csv',nrows=10000)
# In[4]:
train_numeric.shape # (Rows,Col)
# In[5]:
train_numeric.head()
# In[6]:
train_date.head()
# In[7]:
train_numeric.describe
# In[8]:
data_merge = pd.merge(train_numeric,train_date,on = 'Id')
data_merge.head()
# In[9]:
dataclean = data_merge.dropna(axis=1,thresh = int(len(data_merge)*0.5))
dataclean = dataclean.fillna(0)
# In[10]:
dataclean.head()
# In[11]:
1 column: 50% data whih is filled and 50 % are emtpy
1) Data imbalance : Noisy, overfit,
# In[12]:
# label the encoder ( aligning he labels in order)
le = preprocessing.LabelEncoder()
dataclean['Id'] = le.fit_transform(dataclean.Id)
# In[13]:
# Splitting my data into Training and testing by ignoring ID column as its Identical column
featurelist = list(dataclean.columns.values)
featurelist.remove('Id')
featurelist.remove('Response')
features_train,features_test,labels_train,labels_test = cross_validation.train_test_split(dataclean[featurelist],
dataclean['Response'], test_size=0.1, random_state=42)
# Training data
# features_train # ind columns
# labels_train # dependent columns
# Testing Data
# features_test # ind columns
# labels_test# dependent columns
# In[14]:
# 10k -- accuracy 92 % 99 % (on sample the accuray may be higher but when we consider total amount of data we the accuray can goes down to )
# 80% --- 89%
# In[15]:
#########################
######### Naive Bayes###########
##################################
# In[16]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import seaborn as sns
from sklearn.naive_bayes import BernoulliNB
# In[17]:
naive_bayes = BernoulliNB()
naive_bayes.fit(features_train,labels_train)
# In[18]:
p_station = naive_bayes.predict_proba(features_test)
p_station
# In[28]:
# 0 = Not failure, 1 = Failure
pred = naive_bayes.predict(features_test)
pred
# In[29]:
labels_test.shape
# In[30]:
pred.shape
# In[32]:
accuracy = accuracy_score(labels_test,pred)
accuracy
# In[ ]:
#############
## Random Forest Classifier################
#####################
# In[33]:
from sklearn.ensemble import RandomForestClassifier
# In[34]:
clf = RandomForestClassifier(100, max_depth = 20, n_jobs =3)
# In[35]:
clf
# In[36]:
clf.fit(features_train,labels_train)
# In[40]:
accuracy = accuracy_score(labels_test,pred)
accuracy
# In[37]:
pred = clf.predict(features_test)
pred
# In[ ]:
##################
### Grid Search##############
#################
# In[45]:
param_grid= { "criterion" : ['gini','entropy'],
"min_samples_split": [2,4,5,6,7,8,9,10],
"max_depth" : [None,2,4],
"min_samples_leaf" :[1,3,5,6,7,8,10],
'n_estimators':[20,30,50,70],
'n_jobs' :[-1]
}
# In[46]:
modeloptimal = grid_search.GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, scoring='f1', cv=5)
modeloptimal
# In[ ]:
modeloptimal.fit(features_train, labels_train)
# In[ ]:
# Using this we can find the best acuracy model from the above parameter estimators
clf = modeloptimal.best_estimator_
clf
# In[ ]:
pred = clf.predict(features_test)
pred
# In[ ]:
accuracy = accuracy_score(labels_test)
accuracy
# In[19]:
#######################################
### Extra Tree Classifier################
##################################
# In[23]:
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
# In[24]:
clf = ExtraTreesClassifier(n_estimators = 50, n_jobs = -1,min_samples_leaf= 10, verbose = 1)
# In[25]:
clf.fit(features_train,labels_train)
# In[26]:
pred = clf.predict(features_test)
pred
# In[27]:
accuracy = accuracy_score(labels_test, pred)
accuracy
# In[28]:
################################
## xgboost ###########
####################################
# In[29]:
import xgboost as xgb
from sklearn.grid_search import GridSearchCV
# In[30]:
cv_params = {'max_depth': [3,5,7], 'min_child_weight': [1,3,5]}
ind_params = {'learning_rate': 0.1, 'n_estimators': 1000, 'seed':0, 'subsample': 0.8, 'colsample_bytree': 0.8,
'objective': 'binary:logistic'}
optimized_GBM = GridSearchCV(xgb.XGBClassifier(**ind_params),
cv_params,
scoring = 'accuracy', cv = 5, n_jobs = -1)
# In[ ]:
optimized_GBM.fit(features_train, labels_train)
# In[ ]:
optimized_GBM.grid_scores_
# In[ ]:
cv_params = {'learning_rate': [0.1, 0.01], 'subsample': [0.7,0.8,0.9]}
ind_params = {'n_estimators': 1000, 'seed':0, 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth': 3, 'min_child_weight': 1}
optimized_GBM = GridSearchCV(xgb.XGBClassifier(**ind_params),
cv_params,
scoring = 'accuracy', cv = 5, n_jobs = -1)
optimized_GBM.fit(features_train, labels_train)
# In[ ]:
optimized_GBM.grid_scores_
# There are a few other parameters we could tune in theory to squeeze out further performance, but this is a good enough starting point.
#
# To increase the performance of XGBoost’s speed through many iterations of the training set, and since we are using only XGBoost’s API and not sklearn’s anymore, we can create a DMatrix. This sorts the data initially to optimize for XGBoost when it builds trees, making the algorithm more efficient. This is especially helpful when you have a very large number of training examples. To create a DMatrix:
# In[ ]:
xgdmat = xgb.DMatrix(features_train, labels_train) # Create our DMatrix to make XGBoost more efficient
# In[ ]:
our_params = {'eta': 0.1, 'seed':0, 'subsample': 0.8, 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth':3, 'min_child_weight':1}
# Grid Search CV optimized settings
cv_xgb = xgb.cv(params = our_params, dtrain = xgdmat, num_boost_round = 3000, nfold = 5,
metrics = ['error'], # Make sure you enter metrics inside a list or you may encounter issues!
early_stopping_rounds = 100) # Look for early stopping that minimizes error
# We can look at our CV results to see how accurate we were with these settings. The output is automatically saved into a pandas dataframe for us.
# In[ ]:
cv_xgb.tail(5)
# In[ ]:
Now that we have our best settings, let’s create this as an XGBoost object model that we can reference later.
# In[ ]:
our_params = {'eta': 0.1, 'seed':0, 'subsample': 0.8, 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth':3, 'min_child_weight':1}
final_gb = xgb.train(our_params, xgdmat, num_boost_round = 432)
# In[31]:
get_ipython().magic(u'matplotlib inline')
import seaborn as sns
sns.set(font_scale = 1.5)
# In[ ]:
xgb.plot_importance(final_gb)
# In[ ]:
importances = final_gb.get_fscore()
importances
# In[ ]:
importance_frame = pd.DataFrame({'Importance': list(importances.values()), 'Feature': list(importances.keys())})
importance_frame.sort_values(by = 'Importance', inplace = True)
importance_frame.plot(kind = 'barh', x = 'Feature', figsize = (8,8), color = 'orange')
# Analyzing Performance on Test Data
#
# The model has now been tuned using cross-validation grid search through the sklearn API and early stopping through the built-in XGBoost API. Now, we can see how it finally performs on the test set. Does it match our CV performance? First, create another DMatrix (this time for the test data).
# In[ ]:
testdmat = xgb.DMatrix(features_test, labels_test)
# In[ ]:
from sklearn.metrics import accuracy_score
y_pred = final_gb.predict(testdmat) # Predict using our testdmat
y_pred
# In[ ]:
accuracy_score(y_pred, labels_test), 1-accuracy_score(y_pred, labels_test)
# In[32]:
import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
# In[ ]:
# test set
test_numeric = pd.read_csv(Datapath+'test_numeric.csv')
test_date = pd.read_csv(Datapath+'test_date.csv')
data_merge = pd.merge(test_numeric, test_date, on='Id',suffixes=('num', 'date'))
#
# In[ ]:
def makesubmit(clf,testdf,featurelist,output="submit.csv"):
testdf = testdf.fillna(0)
feature_test = testdf[featurelist]
pred = clf.predict(feature_test)
ids = list(testdf['Id'])
fout = open(output,'w')
fout.write("Id,Response\n")
for i,id in enumerate(ids):
fout.write('%s,%s\n' % (str(id),str(pred[i])))
fout.close()
# In[ ]:
makesubmit(clf,data_merge,featurelist,output="submit.csv")
# In the first step, we import standard libraries and fix the most essential features as suggested by an XGB
# In[33]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import numpy as np
import pandas as pd
import seaborn as sns
feature_names = ['L3_S38_F3960', 'L3_S33_F3865', 'L3_S38_F3956', 'L3_S33_F3857',
'L3_S29_F3321', 'L1_S24_F1846', 'L3_S32_F3850', 'L3_S29_F3354',
'L3_S29_F3324', 'L3_S35_F3889', 'L0_S1_F28', 'L1_S24_F1844',
'L3_S29_F3376', 'L0_S0_F22', 'L3_S33_F3859', 'L3_S38_F3952',
'L3_S30_F3754', 'L2_S26_F3113', 'L3_S30_F3759', 'L0_S5_F114']
# We determine the indices of the most important features. After that the training data is loaded
# In[38]:
numeric_cols = pd.read_csv(DataPath+"train_numeric.csv", nrows = 10000).columns.values
imp_idxs = [np.argwhere(feature_name == numeric_cols)[0][0] for feature_name in feature_names]
train = pd.read_csv(DataPath+"train_numeric.csv",
index_col = 0, header = 0, usecols = [0, len(numeric_cols) - 1] + imp_idxs)
train = train[feature_names + ['Response']]
# The data is split into positive and negative samples.
# In[39]:
X_neg, X_pos = train[train['Response'] == 0].iloc[:, :-1], train[train['Response']==1].iloc[:, :-1]
# # Univariate characteristics
# In order to understand better the predictive power of single features, we compare the univariate distributions of the most important features. First, we divide the train data into batches column-wise to prepare the data for plotting.
# In[40]:
BATCH_SIZE = 5
train_batch =[pd.melt(train[train.columns[batch: batch + BATCH_SIZE].append(np.array(['Response']))],
id_vars = 'Response', value_vars = feature_names[batch: batch + BATCH_SIZE])
for batch in list(range(0, train.shape[1] - 1, BATCH_SIZE))]
# After this split, we can now draw violin plots. Due to memory reasons, we have to split the presentation into several cells. For many of the distributions there is no clear difference between the positive and negative samples.
# In[41]:
FIGSIZE = (12,16)
_, axs = plt.subplots(len(train_batch), figsize = FIGSIZE)
plt.suptitle('Univariate distributions')
for data, ax in zip(train_batch, axs):
sns.violinplot(x = 'variable', y = 'value', hue = 'Response', data = data, ax = ax, split =True)
# # Correlation structure
# In the previous section we have seen differences between negative and positive samples for univariate characteristics. We go down the rabbit hole a little further and analyze covariances for the negative and positive samples separately.
# In[42]:
FIGSIZE = (13,4)
_, (ax1, ax2) = plt.subplots(1,2, figsize = FIGSIZE)
MIN_PERIODS = 100
triang_mask = np.zeros((X_pos.shape[1], X_pos.shape[1]))
triang_mask[np.triu_indices_from(triang_mask)] = True
ax1.set_title('Negative Class')
sns.heatmap(X_neg.corr(min_periods = MIN_PERIODS), mask = triang_mask, square=True, ax = ax1)
ax2.set_title('Positive Class')
sns.heatmap(X_pos.corr(min_periods = MIN_PERIODS), mask = triang_mask, square=True, ax = ax2)
# The difference between the two matrices is sparse except for three specific feature combinations.
# In[43]:
sns.heatmap(X_pos.corr(min_periods = MIN_PERIODS) -X_neg.corr(min_periods = MIN_PERIODS),
mask = triang_mask, square=True)
# Finally, as in the univariate case, we analyze correlations between missing values in different features.
# In[44]:
nan_pos, nan_neg = np.isnan(X_pos), np.isnan(X_neg)
triang_mask = np.zeros((X_pos.shape[1], X_pos.shape[1]))
triang_mask[np.triu_indices_from(triang_mask)] = True
FIGSIZE = (13,4)
_, (ax1, ax2) = plt.subplots(1,2, figsize = FIGSIZE)
MIN_PERIODS = 100
ax1.set_title('Negative Class')
sns.heatmap(nan_neg.corr(), square=True, mask = triang_mask, ax = ax1)
ax2.set_title('Positive Class')
sns.heatmap(nan_pos.corr(), square=True, mask = triang_mask, ax = ax2)
# For the difference of the missing-value correlation matrices, a striking pattern emerges. A further and more systematic analysis of such missing-value patterns has the potential to beget powerful features.
# In[45]:
sns.heatmap(nan_neg.corr() - nan_pos.corr(), mask = triang_mask, square=True)
# #Hope you have enjoyed the session.. Have a great day ahead.....!!!
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import logging
import os
from smartcard.util import toHexString, toASCIIString, PACK
from model.plugin.plugins.base_plugin import base_plugin
from model.plugin.api.fcp import get_data_length, get_record_count
from model.plugin.api.convert import convert_bcd_to_string, convert_string_to_bcd, convert_arguments_to_dict
from model.plugin.api.select import select_file_in_mf, USIM_FILE_ID
class iccid(base_plugin):
def __init__(self):
self.__logging = logging.getLogger(os.path.basename(__file__))
def summary(self):
return "Display or modify the value of ICCID."
def version(self):
return "1.00"
def help(self):
return ("Usage:\n"
" - iccid [set=iccid] [format=raw]\n"
"\n"
"Example:\n"
" Original: 89860009191190000108\n"
" - iccid\n"
" > ICCID: 89860009191190000108\n"
" - iccid format=raw\n"
" > ICCID: 98 68 00 90 91 11 09 00 10 80\n"
" - iccid set=1234\n"
" > ICCID: 12340009191190000108\n"
" - iccid set=12340009191190004321\n"
" > ICCID: 12340009191190004321\n"
"\n"
"PS. Suggest to verify ICCID with Luhn algorithm by https://planetcalc.com/2464/ first")
@property
def auto_execute(self):
return False
def execute(self, arg_connection, arg_parameter=""):
self.__logging.debug("execute()")
ret_content = "Can't read the content from EF_ICCID!"
raw_format = False
update_iccid = False
set_content = ""
dict_args = convert_arguments_to_dict(arg_parameter)
for key, value in dict_args.items():
if key == "format" and value.lower() == "raw":
raw_format = True
elif key == "set":
set_content = value
update_iccid = True
# select EF_ICCID
response, sw1, sw2 = select_file_in_mf(
arg_connection, USIM_FILE_ID.ICCID.value)
if sw1 == 0x90:
data_length = get_data_length(response)
response, sw1, sw2 = arg_connection.read_binary(data_length)
if update_iccid:
original = convert_bcd_to_string(response)
update_content = set_content + original[len(set_content):]
response, sw1, sw2 = arg_connection.update_binary(
convert_string_to_bcd(update_content))
if sw1 == 0x90:
ret_content = "ICCID: Updated to '%s'" % (update_content)
else:
ret_content = "Can't update the new content to EF_ICCID!"
else:
if raw_format:
ret_content = "ICCID: " + toHexString(response)
else:
ret_content = "ICCID: " + convert_bcd_to_string(response)
return ret_content
|
#!/usr/bin/env python
"""
quick.py
Quickly Plot SBOL Designs
Usage:
------
python quick.py -input "p.gray p.lightblue i.lightred r.green c.orange t.purple -t.black -c.yellow -p.yellow" -output out.pdf
allowed part types:
p: promoter i: ribozyme r: rbs c: cds t: terminator s: spacer =: scar
reverse part direction by using '-' before the 1-letter part type
allowed colors
black, gray, red, orange, yellow, green, blue, purple, lightred, lightorange,
lightyellow, lightgreen, lightblue, lightpurple
"""
# Quickly Plot SBOL Designs
# Copyright (C) 2014 by
# Thomas E. Gorochowski <tom@chofski.co.uk>
# Bryan Der <bder@mit.edu>
# All rights reserved.
# OSI Non-Profit Open Software License ("Non-Profit OSL") 3.0 license.
# Set the backend to use (important for headless servers)
import matplotlib
matplotlib.use('Agg')
# Other modules we require
import argparse
import dnaplotlib as dpl
import matplotlib.pyplot as plt
def process_arguments (input):
# Types mapping
types = {}
types['p'] = 'Promoter'
types['i'] = 'Ribozyme'
types['r'] = 'RBS'
types['c'] = 'CDS'
types['t'] = 'Terminator'
types['s'] = 'Spacer'
types['='] = 'Scar'
# Colours mapping
colors = {}
colors['black'] = (0.00,0.00,0.00)
colors['gray'] = (0.60,0.60,0.60)
colors['red'] = (0.89,0.10,0.11)
colors['orange'] = (1.00,0.50,0.00)
colors['yellow'] = (1.00,1.00,0.00)
colors['green'] = (0.20,0.63,0.17)
colors['blue'] = (0.12,0.47,0.71)
colors['purple'] = (0.42,0.24,0.60)
colors['lightred'] = (0.98,0.60,0.60)
colors['lightorange'] = (0.99,0.75,0.44)
colors['lightyellow'] = (1.00,1.00,0.60)
colors['lightgreen'] = (0.70,0.87,0.54)
colors['lightblue'] = (0.65,0.81,0.89)
colors['lightpurple'] = (0.79,0.70,0.84)
# Generate the parts list from the arguments
part_list = []
part_idx = 1
for el in input.split(' '):
if el != '':
part_parts = el.split('.')
if len(part_parts) == 2:
part_short_type = part_parts[0]
part_fwd = True
if part_short_type[0] == '-':
part_fwd = False
part_short_type = el[1:]
if part_short_type in types.keys():
part_type = types[part_short_type]
part_color = part_parts[1]
part_rgb = (0,0,0)
if part_color in colors.keys():
part_rgb = colors[part_color]
part_list.append( {'name' : str(part_idx),
'type' : part_type,
'fwd' : part_fwd,
'opts' : {'color': part_rgb}} )
return part_list
def main():
# Parse the command line inputs
parser = argparse.ArgumentParser(description="one line quick plot")
parser.add_argument("-input", dest="input", required=True, help="\"p.gray p.lightblue i.lightred r.green c.orange t.purple -t.black -c.yellow -p.yellow\"", metavar="string")
parser.add_argument("-output", dest="output", required=False, help="output pdf filename")
args = parser.parse_args()
# Process the arguments
design = process_arguments(args.input)
# Create objects for plotting (dnaplotlib)
dr = dpl.DNARenderer(linewidth=1.15, backbone_pad_left=3, backbone_pad_right=3)
reg_renderers = dr.std_reg_renderers()
part_renderers = dr.SBOL_part_renderers()
regs = None
# Generate the figure
fig = plt.figure(figsize=(5.0,5.0))
ax = fig.add_subplot(1,1,1)
# Plot the design
dna_start, dna_end = dr.renderDNA(ax, design, part_renderers, regs, reg_renderers)
max_dna_len = dna_end-dna_start
# Format the axis
ax.set_xticks([])
ax.set_yticks([])
# Set bounds
ax.set_xlim([(-0.01*max_dna_len),
max_dna_len+(0.01*max_dna_len)])
ax.set_ylim([-35,35])
ax.set_aspect('equal')
ax.set_axis_off()
# Update the size of the figure to fit the constructs drawn
fig_x_dim = max_dna_len/60.0
if fig_x_dim < 1.0:
fig_x_dim = 1.0
fig_y_dim = 1.2
plt.gcf().set_size_inches( (fig_x_dim, fig_y_dim) )
# Save the figure
plt.tight_layout()
fig.savefig(args.output, transparent=True)
# Enable the script to be run from the command line
if __name__ == "__main__":
main()
|
import json
import os
import requests
id_phone = os.environ['ID_PHONE']
product_id = os.environ['PRODUCT_ID']
base_url = f"https://api.maytapi.com/api/{product_id}"
token = os.environ['TOKEN']
headers = {
'content-type': 'application/json',
'x-maytapi-key': token
}
def send_text(phone: str, text: str):
"""
Send message text
Example: send_text("573166187553","hello world 😄")
:param phone: str* (number phone)
:param text: str* (message)
:return: success: Bool, data Dict
"""
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
'type': 'text',
'message': text
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def send_multimedia(phone: str, text: str, url_media: str):
"""
Send message with multimedia (AUDIO,FILE,IMAGE)
Example: send_multimedia("573166187553","hello world 😄","http://oyepepe.com/static/dashboard/assets/images/logo.png")
Example: send_multimedia("573166187553","","http://oyepepe.com/static/dashboard/assets/images/logo.png")
:param phone: str* (number_phone)
:param text: str (message)
:param url_media: str* (url_of_file)
:return: success: Bool, data Dict
"""
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
'type': 'media',
'message': url_media,
}
if text:
payload["text"] = text
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def send_contact(phone: str, contact: str):
"""
Send message with contact
Example: send_contact("573166187553",'573166187553')
:param phone: str* (number_phone)
:param contact:
:return: success: Bool, data Dict
"""
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
'type': 'contact',
'message': f"{contact}@c.us",
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def send_forward(phone: str, chatId: str, msgId: str):
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
'type': 'forward',
'message': f"false_{chatId}_{msgId}",
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def send_reply(phone: str, message: str, chatId: str, msgId: str):
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
'type': 'text',
'message': message,
'reply_to': f"false_{chatId}_{msgId}",
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def send_location(phone: str, message: str, latitude: str, longitude: str):
"""
Send message with location
Example: send_location("573166187553","Hello","12.654","-72.776")
Example: send_location("573166187553","","12.654","-72.776")
:param phone: str* (number_phone)
:param message: str (number_phone)
:param latitude: str* (latitude)
:param longitude: str* (longitude)
:return: success: Bool, data Dict
"""
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
"type": "location",
"text": message,
"latitude": latitude,
"longitude": longitude
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def send_link(phone: str, message: str, url_link: str):
"""
Send message with link
Example: send_link("573166187553","Text","https://google.com")
Example: send_link("573166187553","","https://google.com")
:param phone: str* (number_phone)
:param message: str (number_phone)
:param url_link: str* (link)
:return: success: Bool, data Dict
"""
url = f"{base_url}/{id_phone}/sendMessage"
payload = {
'to_number': f"{phone}@c.us",
"type": "link",
"text": message,
"message": url_link
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
data = response.get("data")
return success, data
else:
return False, {}
def set_config(url_server: str,ack_delivery: bool):
"""
Config phone
Example: set_config("https://f2d55e5eceae.ngrok.io/chatbot/recibir-mensage/",True)
:param url_server: str* (url_webhook)
:param ack_delivery: bool* (send_notifications_state_account_whatsapp)
:return: success: Bool, data Dict
"""
url = f"{base_url}/{id_phone}/config"
payload = {
'webhook': url_server,
'ack_delivery': ack_delivery,
}
req = requests.post(url, data=json.dumps(payload), headers=headers)
if req.status_code == 200:
response = req.json()
success = response.get("success")
status = response.get("status")
return success, status
else:
return False, {}
# NOT WORKING
# send_forward("573166187553","573162557014","87f667a0-be38-11ea-9422-99a655694b14")
# send_reply("573166187553","Hello","573166187553","ad2d6c70-be39-11ea-894e-d7d465b17ba0")
|
import hashlib,os
def hash(y):
hasher = hashlib.md5(y.encode())
return (hasher.hexdigest())
def askCredentials(x):
if x!=0:
os.chdir("Data")
username=input("Enter Username: ")
password=input("Enter Password: ")
mainPass,mainUser=hash(password),hash(username)
if mainUser=="c724418d2617be3a1f16c5caa3b6987c" and mainPass=="50680400cbc42d694023e2e3653d63b1":
open("_Log_.txt","a").write("\n{} {} GRANTED".format(username,mainPass))
import EDITH as edith
else:
open("_Log_.txt","a").write("\n{} {} DENIED".format(username,password))
x-=1
print("Wrong Username or Password")
print(x,"Chances Left.")
os.chdir("..")
askCredentials(x)
else:
exit()
if __name__=="__main__":askCredentials(3)
|
# Standard Library
# Django Library
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from apps.base.models import PyCompany, PyFather
from apps.base.views.sequence import get_next_value
from taggit.managers import TaggableManager
# Localfolder Library
from .journal import PyJournal
from .plan import PyAccountPlan
ACCOUNT_MOVE_STATE = (
(0, _('No asentado')),
(1, _('Validado')),
(2, _('cancel')),
(3, _('confirmed'))
)
# ========================================================================== #
class PyAccountMove(PyFather):
name = models.CharField(_('Name'), max_length=80)
state = models.IntegerField(
_('Status'),
choices=ACCOUNT_MOVE_STATE,
default=0
)
journal_id = models.ForeignKey(PyJournal, on_delete=models.PROTECT)
date_move = models.DateField(default=timezone.now)
company_move = models.ForeignKey(
PyCompany,
on_delete=models.PROTECT,
null=True,
blank=True
)
reference = models.CharField(
_('Reference'),
max_length=80,
null=True,
blank=True
)
amount = models.DecimalField(
_('Total'),
max_digits=10,
decimal_places=2,
default=0
)
debit = models.DecimalField(
_('debit'),
max_digits=10,
decimal_places=2,
default=0
)
credit = models.DecimalField(
_('credit'),
max_digits=10,
decimal_places=2,
default=0
)
def save(self, *args, **kwargs):
self.name = get_next_value(self._meta.object_name, 'ACC')
if not self.date_move or self.date_move == "":
self.date_move = timezone.now
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('PyAccountMove:detail', kwargs={'pk': self.pk})
def __str__(self):
return "{}".format(self.name)
class Meta:
verbose_name = _("Account Move")
verbose_name_plural = _("Account Moves")
# ========================================================================== #
class PyAccountMoveDetail(PyFather):
"""Modelo del detalle de la orden de pago
"""
name = models.CharField(_('Name'), max_length=80)
account_move_id = models.ForeignKey(
PyAccountMove,
on_delete=models.PROTECT,
verbose_name=_('account')
)
account_plan_id = models.ForeignKey(
PyAccountPlan,
on_delete=models.PROTECT
)
reference_company = models.ForeignKey(
PyCompany,
on_delete=models.PROTECT,
verbose_name=_('company')
)
tags = TaggableManager(blank=True, verbose_name=_('tag'))
debit = models.DecimalField(
_('debit'),
max_digits=10,
decimal_places=2,
default=0
)
credit = models.DecimalField(
_('credit'),
max_digits=10,
decimal_places=2,
default=0
)
date_due = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
if not self.pk:
self.name = get_next_value(self._meta.object_name, 'ACN')
if not self.date_due or self.date_due == "":
self.date_due = timezone.now
super().save(*args, **kwargs)
class Meta:
ordering = ['pk']
verbose_name = _('Account Move detail')
verbose_name_plural = _('Account Move details')
|
from abc import abstractmethod
import torch.nn as nn
from graph4nlp.pytorch.data.data import GraphData
from graph4nlp.pytorch.modules.graph_embedding_initialization.embedding_construction import (
EmbeddingConstruction,
)
class GraphEmbeddingInitialization(nn.Module):
def __init__(
self,
word_vocab,
embedding_style,
hidden_size=None,
fix_word_emb=True,
fix_bert_emb=True,
word_dropout=None,
rnn_dropout=None,
):
super(GraphEmbeddingInitialization, self).__init__()
self.embedding_layer = EmbeddingConstruction(
word_vocab,
embedding_style["single_token_item"],
emb_strategy=embedding_style["emb_strategy"],
hidden_size=hidden_size,
num_rnn_layers=embedding_style.get("num_rnn_layers", 1),
fix_word_emb=fix_word_emb,
fix_bert_emb=fix_bert_emb,
bert_model_name=embedding_style.get("bert_model_name", "bert-base-uncased"),
bert_lower_case=embedding_style.get("bert_lower_case", True),
word_dropout=word_dropout,
rnn_dropout=rnn_dropout,
)
@abstractmethod
def forward(self, graph_data: GraphData):
return self.embedding_layer(graph_data)
|
import os
import subprocess
import pytest
@pytest.mark.parametrize(
"filename",
[
"base-with-nbs-pys.yml",
"base-with-nbs.yml",
"base-with-pys.yml",
"base-without-nbs.yml",
"material-with-nbs-pys.yml",
"material-with-nbs.yml",
"material-with-pys.yml",
],
)
def test_can_render_notebook(filename):
this_dir = os.path.dirname(os.path.realpath(__file__))
mkdocs_dir = os.path.join(this_dir, "mkdocs")
run = subprocess.run(
["mkdocs", "build", "-q", "-f", f"{filename}"], cwd=mkdocs_dir
)
assert run.returncode == 0
|
myNumber = list(range(0,10))
for number in myNumber:
if number % 2 == 0:
print(f"\nThe Number {number} Is Even.")
else:
print(f"\nThe Number {number} Is Odd.")
else:
print("\nLoop Is Finished.")
|
import traceback
from django.db.models import query
import funcy
from eraserhead.model_instance_wrapper import ModelInstanceWrapper
from eraserhead.request_storage import RequestStorage
from eraserhead.queryset_storage import QuerySetStorage
current_request_storage = RequestStorage()
@funcy.once
def patch_queryset():
""" Patch QuerySet """
# Patch `ModelIterable.__iter__` method
@funcy.monkey(query.ModelIterable)
def __iter__(self):
tb = traceback.extract_stack()
queryset_storage = QuerySetStorage(self.queryset, tb)
current_request_storage.add_queryset_storage_instance(queryset_storage)
iterator = __iter__.original(self)
for model_instance in iterator:
wrapped_model_instance = ModelInstanceWrapper(model_instance)
queryset_storage.add_wrapped_model_instance(wrapped_model_instance)
yield wrapped_model_instance
def request_started_handler(sender, **kwargs):
""" Create new request storage """
global current_request_storage
current_request_storage = RequestStorage()
def request_finished_handler(sender, **kwargs):
""" Print request statistics """
current_request_storage.print_stats()
|
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from mpl_toolkits.basemap import Basemap
import glob
import struct
import time
import sys
import getopt
import string
from datetime import date
import datetime
obsid_dict = {5525:'SST', 5522:'SSS', 5351:'ADT', 3073: 'T prof', 5521: 'S prof', 6000:'Ice Fraction'}
class OdaStats():
def __init__(self, fname='obs01020.dat.nc',alpha=1.0, marker='o'):
self.alpha=alpha
self.marker=marker
ncfile = Dataset(fname, 'r')
self.fname=fname
self.OBSID=ncfile.variables['OBSID'][:]
self.lon=ncfile.variables['lon'][:]
self.lat=ncfile.variables['lat'][:]
self.lev=ncfile.variables['lev'][:]
self.obs=ncfile.variables['obs'][:]
self.sigo=ncfile.variables['sigo'][:]
self.omf=ncfile.variables['omf'][:]
self.fcst=self.obs-self.omf
try:
self.oma=ncfile.variables['oma'][:]
except:
self.oma=np.nan*self.omf
self.std_omf=ncfile.variables['std_omf'][:]
self.yyyymmdd=fname[-30:-26]+'-'+fname[-26:-24]+'-'+fname[-24:-22] #fname[-27:-23]+'-'+fname[-23:-21]+'-'+fname[-21:-19]
self.time=int(fname[-30:-22])
self.date=datetime.datetime(int(fname[-30:-26]), int(fname[-26:-24]), int(fname[-24:-22]), int(fname[-21:-19]))
ncfile.close()
#print self.fcst[self.fcst==0.0]
#raw_input('<>?')
def plot(self, obstype='ADT'):
IDS=np.unique(self.OBSID)
plt.interactive(True)
fig = plt.figure(num=1, figsize=(16,14), facecolor='w')
for ids in IDS:
#I = np.where( (self.OBSID==ids) & (self.qc==1) & (np.abs(self.lat)<90.0) & (np.abs(self.lev)<300.0) )
#I = np.where( (self.OBSID==ids) & (np.abs(self.lev)<2000.0) & (np.abs(self.lev)>0.0) & (np.abs(self.omf)<100.0) & (np.abs(self.lat)<90.0) )
I = np.where( (self.OBSID==ids) & (np.abs(self.lev)<300.0) & (self.fcst!=0.0) & (np.abs(self.lat)<90.0) & (np.abs(self.omf)<10.0))
if ( (ids == 3073) | (ids == 5521) ):
if ids==3073:
vmin=-3.
vmax=3.
#ax = fig.add_subplot(411)
#ax2=ax.twinx()
plt.subplot(611)
plt.plot_date(self.date, np.mean(np.abs(self.omf[I])),'ok',markersize=8, alpha=self.alpha, marker=self.marker)
plt.plot_date(self.date, np.mean(np.abs(self.oma[I])),'or',markersize=8, alpha=self.alpha, marker=self.marker)
#plt.plot_date(self.date, np.sqrt(np.mean((self.omf[I])**2)),'k',markersize=8, alpha=self.alpha, marker=self.marker)
#plt.plot_date(self.date, np.mean(np.abs(self.oma[I])),'r',markersize=8, alpha=self.alpha, marker=self.marker)
#ax2.plot_date(self.date, len(I[0]),marker='.',color='gray',markersize=8)
plt.grid(True)
#ax2.grid(False)
#ax2.locator_params(axis='y',nbins=6)
print 'Nobs=',len(I[0])
#plt.grid(True)
#plt.subplot(812)
#plt.plot_date(self.date, np.mean(self.omf[I]),'ok',markersize=8)
#plt.grid(True)
if ids==5521:
vmin=-1
vmax=1
plt.subplot(612)
plt.plot_date(self.date, np.mean(np.abs(self.omf[I])),'ok',markersize=8, alpha=self.alpha, marker=self.marker)
plt.plot_date(self.date, np.mean(np.abs(self.oma[I])),'or',markersize=8, alpha=self.alpha, marker=self.marker)
plt.grid(True)
#plt.subplot(814)
#plt.plot_date(self.date, np.mean(self.omf[I]),'ok',markersize=8)
#plt.grid(True)
#plt.plot(self.omf[I], -self.lev[I],'.k',alpha=0.2)
#plt.xlim((vmin,vmax))
#plt.ylim((-3000,0))
if (ids == 5525):
if ids==5525:
vmin=-1.
vmax=1.
if ids==5351:
vmin=-.2
vmax=.2
plt.subplot(613)
plt.plot_date(self.date, np.mean(np.abs(self.omf[I])),'ok',markersize=8, alpha=self.alpha, marker=self.marker)
plt.plot_date(self.date, np.mean(np.abs(self.oma[I])),'or',markersize=8, alpha=self.alpha, marker=self.marker)
plt.grid(True)
plt.title(obsid_dict[ids],fontweight='bold')
#plt.subplot(816)
#plt.plot_date(self.date, np.mean(self.omf[I]),'ok',markersize=8)
#plt.grid(True)
if (ids == 5351):
plt.subplot(614)
plt.plot_date(self.date, np.mean(np.abs(self.omf[I])),'ok',markersize=8, alpha=self.alpha, marker=self.marker)
plt.plot_date(self.date, np.mean(np.abs(self.oma[I])),'or',markersize=8, alpha=self.alpha, marker=self.marker)
plt.grid(True)
plt.title(obsid_dict[ids],fontweight='bold')
if (ids == 6000):
IN = np.where( (self.OBSID==ids) & (self.lat>55.0) )#& (np.abs(self.lev)<2000.0) & (self.fcst!=0.0) & (self.lat>55.0) & (np.abs(self.omf)<40.0))
plt.subplot(615)
plt.plot_date(self.date, np.mean(np.abs(self.omf[IN])),'ok',markersize=8, alpha=self.alpha, marker=self.marker)
plt.plot_date(self.date, np.mean(np.abs(self.oma[IN])),'or',markersize=8, alpha=self.alpha, marker=self.marker)
plt.grid(True)
plt.title('Arctic '+obsid_dict[ids],fontweight='bold')
IS = np.where( (self.OBSID==ids) & (self.lat<-55.0) )#& (np.abs(self.lev)<2000.0) & (self.fcst!=0.0) & (self.lat<-55.0) & (np.abs(self.omf)<40.0))
plt.subplot(616)
plt.plot_date(self.date, np.mean(np.abs(self.omf[IS])),'ok',markersize=8, alpha=self.alpha, marker=self.marker)
plt.plot_date(self.date, np.mean(np.abs(self.oma[IS])),'or',markersize=8, alpha=self.alpha, marker=self.marker)
plt.grid(True)
plt.title('Antarctic '+obsid_dict[ids],fontweight='bold')
#plt.subplot(818)
#plt.plot_date(self.date, np.mean(self.omf[I]),'ok',markersize=8)
#plt.grid(True)
'''
map = Basemap(projection='mill', llcrnrlon=20, llcrnrlat=-70, urcrnrlon=380, urcrnrlat=80, resolution='l')
map.drawcoastlines()
map.drawcountries()
#map.bluemarble()
map.fillcontinents(color='coral')
map.drawmapboundary()
x, y = map(self.lon[I],self.lat[I])
map.scatter(x,y,5,c=self.omf[I],cmap=cm.spectral,vmin=vmin,vmax=vmax,edgecolor=None,lw=0)
x, y = map(self.lon[I]+360,self.lat[I])
map.scatter(x,y,5,c=self.omf[I],cmap=cm.spectral,vmin=vmin,vmax=vmax,edgecolor=None,lw=0)
plt.colorbar(shrink=.5, pad=0.05)
'''
#print obsid_dict[ids],' mean omf=',np.mean(self.omf[I]),' mean |omf|=',np.mean(np.abs(self.omf[I])),' mean std omf=',np.mean(np.abs(self.stdomf[I]))
#plt.plot(self.lat[I],self.omf[I],'.k')
#plt.title(self.yyyymmdd+' '+obsid_dict[ids])
#fig.autofmt_xdate()
#plt.savefig(self.yyyymmdd+'-'+obsid_dict[ids])
#plt.draw()
#raw_input('<>?')
#plt.clf()
def plot2dpolar(self, obstype='Ice Fraction', obsid=2819):
I = np.where( (self.OBSID==obsid) )
valmin=.0
valmax=1.0
errmax=0.5
fig = plt.figure(num=1, figsize=(12,12), facecolor='w')
cnt=1
for proj in ['npstere','spstere']:
map = Basemap(projection=proj,lon_0=0,boundinglat=55, resolution='c')
if (proj=='spstere'):
map = Basemap(projection=proj,lon_0=0,boundinglat=-55, resolution='c')
fcst=np.exp(self.obs[I]-self.omf[I])/(1.0+np.exp(self.obs[I]-self.omf[I]))
obs=np.exp(self.obs[I])/(1.0+np.exp(self.obs[I]))
#fcst[fcst<0.15]=np.nan
#obs[obs<0.15]=np.nan
plt.subplot(2,2,cnt)
map.drawcoastlines()
map.drawcountries()
map.fillcontinents(color='coral')
map.drawmapboundary()
x, y = map(self.lon[I], self.lat[I])
map.scatter(x, y, 1, c=fcst,cmap=cm.spectral,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
x, y = map(self.lon[I]+360, self.lat[I])
map.scatter(x, y, 1, c=fcst,cmap=cm.spectral,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
plt.subplot(2,2,cnt+1)
map.drawcoastlines()
map.drawcountries()
map.fillcontinents(color='coral')
map.drawmapboundary()
x, y = map(self.lon[I], self.lat[I])
map.scatter(x, y, 1, c=obs,cmap=cm.spectral,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
x, y = map(self.lon[I]+360, self.lat[I])
map.scatter(x, y, 1, c=obs,cmap=cm.spectral,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
cnt+=2
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,wspace=0.0, hspace=0.0)
plt.suptitle(self.yyyymmdd)
plt.draw()
fnameout='AICE_'+self.yyyymmdd
plt.savefig(fnameout)
plt.clf()
def plot2d(self, obstype='Ice Fraction', obsid=5525):
I = np.where( (self.OBSID==obsid) )
valmin=-1.0
valmax=1.0
errmax=0.5
fig = plt.figure(num=1, figsize=(16,8), facecolor='w')
cnt=1
map = Basemap(projection='mill', llcrnrlon=20, llcrnrlat=-80, urcrnrlon=380, urcrnrlat=90, resolution='l')
#fcst=self.obs[I]-self.omf[I]
#obs=self.obs[I]
#fcst[fcst<0.15]=np.nan
#obs[obs<0.15]=np.nan
#plt.subplot(2,1,1)
map.drawcoastlines()
map.drawcountries()
map.fillcontinents(color='coral')
map.drawmapboundary()
x, y = map(self.lon[I], self.lat[I])
map.scatter(x, y, 2, c=self.omf[I],cmap=cm.bwr,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
x, y = map(self.lon[I]+360, self.lat[I])
map.scatter(x, y, 2, c=self.omf[I],cmap=cm.bwr,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
#plt.subplot(2,1,2)
#map.drawcoastlines()
#map.drawcountries()
#map.fillcontinents(color='coral')
#map.drawmapboundary()
#x, y = map(self.lon[I], self.lat[I])
#map.scatter(x, y, 1, c=obs,cmap=cm.spectral,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
#x, y = map(self.lon[I]+360, self.lat[I])
#map.scatter(x, y, 1, c=obs,cmap=cm.spectral,vmin=valmin,vmax=valmax,edgecolor=None,lw=0)
plt.colorbar(shrink=0.15)
plt.suptitle(self.yyyymmdd)
plt.draw()
fnameout='SST_'+self.yyyymmdd
plt.savefig(fnameout)
plt.clf()
|
import unittest
import at_checks
import daemons_check
import modem_checks
import python_checks
import sim_checks
import simple_cmd_checks
from plmn.utils import *
from plmn.results import *
if __name__ == '__main__':
nargs = process_args()
suite = unittest.TestSuite()
# Add all regression test-cases to this test-suite.
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(python_checks.PythonChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(daemons_check.DaemonChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(modem_checks.ModemChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(at_checks.AtCmdChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(sim_checks.SimChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(simple_cmd_checks.SimpleCmdChecks))
# Run the regression suite.
unittest.TextTestRunner().run(suite)
# Print final system state.
Results.print_results()
|
"""
Barrido del chi2 en el parametro b para el resto de los parametros fijos.
"""
import numpy as np
from scipy.interpolate import interp1d
from scipy.constants import c as c_luz #metros/segundos
c_luz_km = c_luz/1000
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/utils/')
from int import Hubble_teorico
from supernovas import magn_aparente_teorica, chi2_supernovas
from BAO import r_drag, Hs_to_Ds, Ds_to_obs_final
from AGN import zs_2_logDlH0
from alternativos import params_to_chi2
from matplotlib import pyplot as plt
os.chdir(path_git)
sys.path.append('./Software/utils/')
from data import leer_data_pantheon, leer_data_cronometros, leer_data_BAO, leer_data_AGN
# Supernovas
os.chdir(path_git+'/Software/Estadística/Datos/Datos_pantheon/')
ds_SN = leer_data_pantheon('lcparam_full_long_zhel.txt')
# Cronómetros
os.chdir(path_git+'/Software/Estadística/Datos/')
ds_CC = leer_data_cronometros('datos_cronometros.txt')
# BAO
os.chdir(path_git+'/Software/Estadística/Datos/BAO/')
ds_BAO = []
archivos_BAO = ['datos_BAO_da.txt','datos_BAO_dh.txt','datos_BAO_dm.txt',
'datos_BAO_dv.txt','datos_BAO_H.txt']
for i in range(5):
aux = leer_data_BAO(archivos_BAO[i])
ds_BAO.append(aux)
# AGN
os.chdir(path_git+'/Software/Estadística/Datos/Datos_AGN')
ds_AGN = leer_data_AGN('table3.dat')
#%%
bs = np.linspace(0.01,7,40)
Hs = np.arange(67,71)
chies = np.zeros((len(bs),len(Hs)))
omega_m=0.3
for (i,b) in enumerate(bs):
print(i,b)
for (j,H0) in enumerate(Hs):
chies[i,j] = params_to_chi2([-19.41, omega_m, b, H0], _, index=4,
dataset_SN = ds_SN,
dataset_CC = ds_CC,
dataset_BAO = ds_BAO,
dataset_AGN = ds_AGN,
#H0_Riess = True,
model = 'EXP',
integrador=int
)
#1077.8293845284927/(1048+20+len(ds_CC[0])-4)
#%%
%matplotlib qt5
plt.figure()
plt.title('EXP: CC+SN+BAO+AGN, omega_m=0.3, M=-19.41')
plt.grid(True)
#for k in range(0,6):
#for k in range(7,len(chies[0,:])):
for k in range(len(chies[0,:])):
plt.plot(bs,chies[:,k],label='H0={}'.format(Hs[k]))
plt.ylabel(r'$\chi^2$')
plt.xlabel('b')
plt.legend()
plt.savefig('/home/matias/Barrido_en_b/Barrido_b.png')
#%%
bs = np.linspace(0.01,7,40)
omegas = np.linspace(0.25,0.45,10)
chies = np.zeros((len(bs),len(omegas)))
H0 = 73.48
int = 0
for (i,b) in enumerate(bs):
print(i,b)
for (j,omega_m) in enumerate(omegas):
chies[i,j] = params_to_chi2([-19.41, omega_m, b, H0], _, index=4,
#dataset_SN = ds_SN,
#dataset_CC = ds_CC,
#dataset_BAO = ds_BAO,
dataset_AGN = ds_AGN,
#H0_Riess = True,
model = 'EXP',
integrador=int
)
#%%
%matplotlib qt5
plt.figure()
plt.title('EXP: AGN, H0=73.48, M=-19.41')
plt.grid(True)
#for k in range(0,6):
#for k in range(7,len(chies[0,:])):
for k in range(len(chies[0,:])):
plt.plot(bs,chies[:,k],label='omega_m={}'.format(omegas[k]))
plt.ylabel(r'$\chi^2$')
plt.xlabel('b')
plt.legend()
plt.savefig('/home/matias/Barrido_en_b/Barrido_b.png')
#%%
bs = np.linspace(0.1,4,20)
chies = np.zeros((len(bs),3))
for j in range(3):
for (i,b) in enumerate(bs):
print(j)
print(i,b)
chies[i,j] = params_to_chi2([-19.41, 0.352, b, ], _, index=4,
dataset_SN = ds_SN,
dataset_CC = ds_CC,
#dataset_BAO = ds_BAO,
#dataset_AGN = ds_AGN,
#H0_Riess = True,
model = 'HS',
integrador=j
)
#%%
metodo_0=chies[:,0]
metodo_1=chies[:,1]
metodo_2=chies[:,2]
error_01 =np.abs((metodo_0-metodo_1)/metodo_0)*100
error_12 =np.abs((metodo_1-metodo_2)/metodo_1)*100
plt.figure()
plt.title('HS: CC+SN, omega_m=0.352, M=-19.41')
plt.grid(True)
plt.plot(bs,error_01,label='Error_01')
#plt.plot(bs,error_12,label='Error_12')
plt.ylabel('Error porcentual')
plt.xlabel('b')
plt.legend()
plt.savefig('/home/matias/integrador/Error_en_b.png')
|
def string_compression(str1):
compress_string = ""
current_count = 1
for i in range (1, len(str1)):
if str1[i] == str1[i-1]:
current_count += 1
else:
compress_string = compress_string + str1[i-1] + str(current_count)
current_count = 1
if i == len(str1) - 1:
compress_string = compress_string + str1[i] + str(current_count)
if len(compress_string) < len(str1):
return compress_string
else:
return str1
if __name__ == "__main__":
print(string_compression("aabcccccaaa")) |
##
# Contains information about products, regions, etc. for each site
# in the country.
# region= two-letter regional identifier, mainly used for installation of
# text product templates
SiteInfo= {
'ABQ': {
'region': 'SR',
'fullStationID': 'KABQ',
'wfoCityState': 'Albuquerque NM',
'wfoCity': 'Albuquerque',
'state': 'New Mexico',
},
'ABR': {
'region': 'CR',
'fullStationID': 'KABR',
'wfoCityState': 'Aberdeen SD',
'wfoCity': 'Aberdeen',
'state': 'South Dakota',
},
'AFC': {
'region': 'AR',
'fullStationID': 'PAFC',
'wfoCityState': 'Anchorage AK',
'wfoCity': 'Anchorage',
'state': 'Alaska',
},
'AER': {
'region': 'AR',
'fullStationID': 'PAFC',
'wfoCityState': 'Anchorage AK',
'wfoCity': 'Anchorage',
'state': 'Alaska',
},
'ALU': {
'region': 'AR',
'fullStationID': 'PAFC',
'wfoCityState': 'Anchorage AK',
'wfoCity': 'Anchorage',
'state': 'Alaska',
},
'AFG': {
'region': 'AR',
'fullStationID': 'PAFG',
'wfoCityState': 'Fairbanks AK',
'wfoCity': 'Fairbanks',
'state': 'Alaska',
},
'AJK': {
'region': 'AR',
'fullStationID': 'PAJK',
'wfoCityState': 'Juneau AK',
'wfoCity': 'Juneau',
'state': 'Alaska',
},
'AKQ': {
'region': 'ER',
'fullStationID': 'KAKQ',
'wfoCityState': 'Wakefield VA',
'wfoCity': 'Wakefield',
'state': 'Virginia',
},
'ALY': {
'region': 'ER',
'fullStationID': 'KALY',
'wfoCityState': 'Albany NY',
'wfoCity': 'Albany',
'state': 'New York',
},
'AMA': {
'region': 'SR',
'fullStationID': 'KAMA',
'wfoCityState': 'Amarillo TX',
'wfoCity': 'Amarillo',
'state': 'Texas',
},
'APX': {
'region': 'CR',
'fullStationID': 'KAPX',
'wfoCityState': 'Gaylord MI',
'wfoCity': 'Gaylord',
'state': 'Michigan',
},
'ARX': {
'region': 'CR',
'fullStationID': 'KARX',
'wfoCityState': 'La Crosse WI',
'wfoCity': 'La Crosse',
'state': 'Wisconsin',
},
'BGM': {
'region': 'ER',
'fullStationID': 'KBGM',
'wfoCityState': 'Binghamton NY',
'wfoCity': 'Binghamton',
'state': 'New York',
},
'BIS': {
'region': 'CR',
'fullStationID': 'KBIS',
'wfoCityState': 'Bismarck ND',
'wfoCity': 'Bismarck',
'state': 'North Dakota',
},
'BMX': {
'region': 'SR',
'fullStationID': 'KBMX',
'wfoCityState': 'Birmingham AL',
'wfoCity': 'Birmingham',
'state': 'Alabama',
},
'BOI': {
'region': 'WR',
'fullStationID': 'KBOI',
'wfoCityState': 'Boise ID',
'wfoCity': 'Boise',
'state': 'Idaho',
},
'BOU': {
'region': 'CR',
'fullStationID': 'KBOU',
'wfoCityState': 'Denver CO',
'wfoCity': 'Denver',
'state': 'Colorado',
},
'BOX': {
'region': 'ER',
'fullStationID': 'KBOX',
'wfoCityState': 'Taunton MA',
'wfoCity': 'Taunton',
'state': 'Massachusetts',
},
'BRO': {
'region': 'SR',
'fullStationID': 'KBRO',
'wfoCityState': 'Brownsville TX',
'wfoCity': 'Brownsville',
'state': 'Texas',
},
'BTV': {
'region': 'ER',
'fullStationID': 'KBTV',
'wfoCityState': 'Burlington VT',
'wfoCity': 'Burlington',
'state': 'Vermont',
},
'BUF': {
'region': 'ER',
'fullStationID': 'KBUF',
'wfoCityState': 'Buffalo NY',
'wfoCity': 'Buffalo',
'state': 'New York',
},
'BYZ': {
'region': 'WR',
'fullStationID': 'KBYZ',
'wfoCityState': 'Billings MT',
'wfoCity': 'Billings',
'state': 'Montana',
},
'CAE': {
'region': 'ER',
'fullStationID': 'KCAE',
'wfoCityState': 'Columbia SC',
'wfoCity': 'Columbia',
'state': 'South Carolina',
},
'CAR': {
'region': 'ER',
'fullStationID': 'KCAR',
'wfoCityState': 'Caribou ME',
'wfoCity': 'Caribou',
'state': 'Maine',
},
'CHS': {
'region': 'ER',
'fullStationID': 'KCHS',
'wfoCityState': 'Charleston SC',
'wfoCity': 'Charleston',
'state': 'South Carolina',
},
'CLE': {
'region': 'ER',
'fullStationID': 'KCLE',
'wfoCityState': 'Cleveland OH',
'wfoCity': 'Cleveland',
'state': 'Ohio',
},
'CRP': {
'region': 'SR',
'fullStationID': 'KCRP',
'wfoCityState': 'Corpus Christi TX',
'wfoCity': 'Corpus Christi',
'state': 'Texas',
},
'CTP': {
'region': 'ER',
'fullStationID': 'KCTP',
'wfoCityState': 'State College PA',
'wfoCity': 'State College',
'state': 'Pennsylvania',
},
'CYS': {
'region': 'CR',
'fullStationID': 'KCYS',
'wfoCityState': 'Cheyenne WY',
'wfoCity': 'Cheyenne',
'state': 'Wyoming',
},
'DDC': {
'region': 'CR',
'fullStationID': 'KDDC',
'wfoCityState': 'Dodge City KS',
'wfoCity': 'Dodge City',
'state': 'Kansas',
},
'DLH': {
'region': 'CR',
'fullStationID': 'KDLH',
'wfoCityState': 'Duluth MN',
'wfoCity': 'Duluth',
'state': 'Minnesota',
},
'DMX': {
'region': 'CR',
'fullStationID': 'KDMX',
'wfoCityState': 'Des Moines IA',
'wfoCity': 'Des Moines',
'state': 'Iowa',
},
'DTX': {
'region': 'CR',
'fullStationID': 'KDTX',
'wfoCityState': 'Detroit/Pontiac MI',
'wfoCity': 'Detroit/Pontiac',
'state': 'Michigan',
},
'DVN': {
'region': 'CR',
'fullStationID': 'KDVN',
'wfoCityState': 'Quad Cities Ia IL',
'wfoCity': 'Quad Cities',
'state': 'Illinois',
},
'EAX': {
'region': 'CR',
'fullStationID': 'KEAX',
'wfoCityState': 'Kansas City/Pleasant Hill MO',
'wfoCity': 'Kansas City/Pleasant Hill',
'state': 'Missouri',
},
'EKA': {
'region': 'WR',
'fullStationID': 'KEKA',
'wfoCityState': 'Eureka CA',
'wfoCity': 'Eureka',
'state': 'California',
},
'EPZ': {
'region': 'SR',
'fullStationID': 'KEPZ',
'wfoCityState': 'El Paso Tx/Santa Teresa NM',
'wfoCity': 'El Paso Tx/Santa Teresa',
'state': 'New Mexico',
},
'EWX': {
'region': 'SR',
'fullStationID': 'KEWX',
'wfoCityState': 'Austin/San Antonio TX',
'wfoCity': 'Austin/San Antonio',
'state': 'Texas',
},
'FFC': {
'region': 'SR',
'fullStationID': 'KFFC',
'wfoCityState': 'Peachtree City GA',
'wfoCity': 'Peachtree City',
'state': 'Georgia',
},
'FGF': {
'region': 'CR',
'fullStationID': 'KFGF',
'wfoCityState': 'Grand Forks ND',
'wfoCity': 'Grand Forks',
'state': 'North Dakota',
},
'FGZ': {
'region': 'WR',
'fullStationID': 'KFGZ',
'wfoCityState': 'Flagstaff AZ',
'wfoCity': 'Flagstaff',
'state': 'Arizona',
},
'FSD': {
'region': 'CR',
'fullStationID': 'KFSD',
'wfoCityState': 'Sioux Falls SD',
'wfoCity': 'Sioux Falls',
'state': 'South Dakota',
},
'FWD': {
'region': 'SR',
'fullStationID': 'KFWD',
'wfoCityState': 'Fort Worth TX',
'wfoCity': 'Fort Worth',
'state': 'Texas',
},
'GGW': {
'region': 'WR',
'fullStationID': 'KGGW',
'wfoCityState': 'Glasgow MT',
'wfoCity': 'Glasgow',
'state': 'Montana',
},
'GID': {
'region': 'CR',
'fullStationID': 'KGID',
'wfoCityState': 'Hastings NE',
'wfoCity': 'Hastings',
'state': 'Nebraska',
},
'GJT': {
'region': 'CR',
'fullStationID': 'KGJT',
'wfoCityState': 'Grand Junction CO',
'wfoCity': 'Grand Junction',
'state': 'Colorado',
},
'GLD': {
'region': 'CR',
'fullStationID': 'KGLD',
'wfoCityState': 'Goodland KS',
'wfoCity': 'Goodland',
'state': 'Kansas',
},
'GRB': {
'region': 'CR',
'fullStationID': 'KGRB',
'wfoCityState': 'Green Bay WI',
'wfoCity': 'Green Bay',
'state': 'Wisconsin',
},
'GRR': {
'region': 'CR',
'fullStationID': 'KGRR',
'wfoCityState': 'Grand Rapids MI',
'wfoCity': 'Grand Rapids',
'state': 'Michigan',
},
'GSP': {
'region': 'ER',
'fullStationID': 'KGSP',
'wfoCityState': 'Greenville-Spartanburg SC',
'wfoCity': 'Greenville-Spartanburg',
'state': 'South Carolina',
},
'GUM': {
'region': 'PR',
'fullStationID': 'PGUM',
'wfoCityState': 'Tiyan GU',
'wfoCity': 'Tiyan',
'state': 'Guam',
},
'GYX': {
'region': 'ER',
'fullStationID': 'KGYX',
'wfoCityState': 'Gray ME',
'wfoCity': 'Gray',
'state': 'Maine',
},
'HFO': {
'region': 'PR',
'fullStationID': 'PHFO',
'wfoCityState': 'Honolulu HI',
'wfoCity': 'Honolulu',
'state': 'Hawaii',
},
'HGX': {
'region': 'SR',
'fullStationID': 'KHGX',
'wfoCityState': 'Houston/Galveston TX',
'wfoCity': 'Houston/Galveston',
'state': 'Texas',
},
'HNX': {
'region': 'WR',
'fullStationID': 'KHNX',
'wfoCityState': 'Hanford CA',
'wfoCity': 'Hanford',
'state': 'California',
},
'HUN': {
'region': 'SR',
'fullStationID': 'KHUN',
'wfoCityState': 'Huntsville AL',
'wfoCity': 'Huntsville',
'state': 'Alabama',
},
'ICT': {
'region': 'CR',
'fullStationID': 'KICT',
'wfoCityState': 'Wichita KS',
'wfoCity': 'Wichita',
'state': 'Kansas',
},
'ILM': {
'region': 'ER',
'fullStationID': 'KILM',
'wfoCityState': 'Wilmington NC',
'wfoCity': 'Wilmington',
'state': 'North Carolina',
},
'ILN': {
'region': 'ER',
'fullStationID': 'KILN',
'wfoCityState': 'Wilmington OH',
'wfoCity': 'Wilmington',
'state': 'Ohio',
},
'ILX': {
'region': 'CR',
'fullStationID': 'KILX',
'wfoCityState': 'Lincoln IL',
'wfoCity': 'Lincoln',
'state': 'Illinois',
},
'IND': {
'region': 'CR',
'fullStationID': 'KIND',
'wfoCityState': 'Indianapolis IN',
'wfoCity': 'Indianapolis',
'state': 'Indiana',
},
'IWX': {
'region': 'CR',
'fullStationID': 'KIWX',
'wfoCityState': 'Northern Indiana',
'wfoCity': 'Northern Indiana',
'state': 'Indiana',
},
'JAN': {
'region': 'SR',
'fullStationID': 'KJAN',
'wfoCityState': 'Jackson MS',
'wfoCity': 'Jackson',
'state': 'Mississippi',
},
'JAX': {
'region': 'SR',
'fullStationID': 'KJAX',
'wfoCityState': 'Jacksonville FL',
'wfoCity': 'Jacksonville',
'state': 'Florida',
},
'JKL': {
'region': 'CR',
'fullStationID': 'KJKL',
'wfoCityState': 'Jackson KY',
'wfoCity': 'Jackson',
'state': 'Kentucky',
},
'KEY': {
'region': 'SR',
'fullStationID': 'KKEY',
'wfoCityState': 'Key West FL',
'wfoCity': 'Key West',
'state': 'Florida',
},
'LBF': {
'region': 'CR',
'fullStationID': 'KLBF',
'wfoCityState': 'North Platte NE',
'wfoCity': 'North Platte',
'state': 'Nebraska',
},
'LCH': {
'region': 'SR',
'fullStationID': 'KLCH',
'wfoCityState': 'Lake Charles LA',
'wfoCity': 'Lake Charles',
'state': 'Louisiana',
},
'LIX': {
'region': 'SR',
'fullStationID': 'KLIX',
'wfoCityState': 'New Orleans LA',
'wfoCity': 'New Orleans',
'state': 'Louisiana',
},
'LKN': {
'region': 'WR',
'fullStationID': 'KLKN',
'wfoCityState': 'Elko NV',
'wfoCity': 'Elko',
'state': 'Nevada',
},
'LMK': {
'region': 'CR',
'fullStationID': 'KLMK',
'wfoCityState': 'Louisville KY',
'wfoCity': 'Louisville',
'state': 'Kentucky',
},
'LOT': {
'region': 'CR',
'fullStationID': 'KLOT',
'wfoCityState': 'Chicago IL',
'wfoCity': 'Chicago',
'state': 'Illinois',
},
'LOX': {
'region': 'WR',
'fullStationID': 'KLOX',
'wfoCityState': 'Los Angeles/Oxnard CA',
'wfoCity': 'Los Angeles/Oxnard',
'state': 'California',
},
'LSX': {
'region': 'CR',
'fullStationID': 'KLSX',
'wfoCityState': 'St Louis MO',
'wfoCity': 'St Louis',
'state': 'Missouri',
},
'LUB': {
'region': 'SR',
'fullStationID': 'KLUB',
'wfoCityState': 'Lubbock TX',
'wfoCity': 'Lubbock',
'state': 'Texas',
},
'LWX': {
'region': 'ER',
'fullStationID': 'KLWX',
'wfoCityState': 'Baltimore MD/Washington DC',
'wfoCity': 'Baltimore MD/Washington',
'state': 'Washington DC',
},
'LZK': {
'region': 'SR',
'fullStationID': 'KLZK',
'wfoCityState': 'Little Rock AR',
'wfoCity': 'Little Rock',
'state': 'Arkansas',
},
'MAF': {
'region': 'SR',
'fullStationID': 'KMAF',
'wfoCityState': 'Midland/Odessa TX',
'wfoCity': 'Midland/Odessa',
'state': 'Texas',
},
'MEG': {
'region': 'SR',
'fullStationID': 'KMEG',
'wfoCityState': 'Memphis TN',
'wfoCity': 'Memphis',
'state': 'Tennessee',
},
'MFL': {
'region': 'SR',
'fullStationID': 'KMFL',
'wfoCityState': 'Miami FL',
'wfoCity': 'Miami',
'state': 'Florida',
},
'MFR': {
'region': 'WR',
'fullStationID': 'KMFR',
'wfoCityState': 'Medford OR',
'wfoCity': 'Medford',
'state': 'Oregon',
},
'MHX': {
'region': 'ER',
'fullStationID': 'KMHX',
'wfoCityState': 'Newport/Morehead City NC',
'wfoCity': 'Newport/Morehead City',
'state': 'North Carolina',
},
'MKX': {
'region': 'CR',
'fullStationID': 'KMKX',
'wfoCityState': 'Milwaukee/Sullivan WI',
'wfoCity': 'Milwaukee/Sullivan',
'state': 'Wisconsin',
},
'MLB': {
'region': 'SR',
'fullStationID': 'KMLB',
'wfoCityState': 'Melbourne FL',
'wfoCity': 'Melbourne',
'state': 'Florida',
},
'MOB': {
'region': 'SR',
'fullStationID': 'KMOB',
'wfoCityState': 'Mobile AL',
'wfoCity': 'Mobile',
'state': 'Alabama',
},
'MPX': {
'region': 'CR',
'fullStationID': 'KMPX',
'wfoCityState': 'Twin Cities/Chanhassen MN',
'wfoCity': 'Twin Cities/Chanhassen',
'state': 'Minnesota',
},
'MQT': {
'region': 'CR',
'fullStationID': 'KMQT',
'wfoCityState': 'Marquette MI',
'wfoCity': 'Marquette',
'state': 'Michigan',
},
'MRX': {
'region': 'SR',
'fullStationID': 'KMRX',
'wfoCityState': 'Morristown TN',
'wfoCity': 'Morristown',
'state': 'Tennessee',
},
'MSO': {
'region': 'WR',
'fullStationID': 'KMSO',
'wfoCityState': 'Missoula MT',
'wfoCity': 'Missoula',
'state': 'Montana',
},
'MTR': {
'region': 'WR',
'fullStationID': 'KMTR',
'wfoCityState': 'San Francisco CA',
'wfoCity': 'San Francisco',
'state': 'California',
},
'OAX': {
'region': 'CR',
'fullStationID': 'KOAX',
'wfoCityState': 'Omaha/Valley NE',
'wfoCity': 'Omaha/Valley',
'state': 'Nebraska',
},
'OHX': {
'region': 'SR',
'fullStationID': 'KOHX',
'wfoCityState': 'Nashville TN',
'wfoCity': 'Nashville',
'state': 'Tennessee',
},
'OKX': {
'region': 'ER',
'fullStationID': 'KOKX',
'wfoCityState': 'New York NY',
'wfoCity': 'Upton',
'state': 'New York',
},
'OTX': {
'region': 'WR',
'fullStationID': 'KOTX',
'wfoCityState': 'Spokane WA',
'wfoCity': 'Spokane',
'state': 'Washington',
},
'OUN': {
'region': 'SR',
'fullStationID': 'KOUN',
'wfoCityState': 'Norman OK',
'wfoCity': 'Norman',
'state': 'Oklahoma',
},
'PAH': {
'region': 'CR',
'fullStationID': 'KPAH',
'wfoCityState': 'Paducah KY',
'wfoCity': 'Paducah',
'state': 'Kentucky',
},
'PBZ': {
'region': 'ER',
'fullStationID': 'KPBZ',
'wfoCityState': 'Pittsburgh PA',
'wfoCity': 'Pittsburgh',
'state': 'Pennsylvania',
},
'PDT': {
'region': 'WR',
'fullStationID': 'KPDT',
'wfoCityState': 'Pendleton OR',
'wfoCity': 'Pendleton',
'state': 'Oregon',
},
'PHI': {
'region': 'ER',
'fullStationID': 'KPHI',
'wfoCityState': 'Mount Holly NJ',
'wfoCity': 'Mount Holly',
'state': 'New Jersey',
},
'PIH': {
'region': 'WR',
'fullStationID': 'KPIH',
'wfoCityState': 'Pocatello ID',
'wfoCity': 'Pocatello',
'state': 'Idaho',
},
'PPG': {
'region': 'PR',
'fullStationID': 'PPPG',
'wfoCityState': 'Pago Pago AS',
'wfoCity': 'Pago Pago',
'state': 'American Samoa',
},
'PQR': {
'region': 'WR',
'fullStationID': 'KPQR',
'wfoCityState': 'Portland OR',
'wfoCity': 'Portland',
'state': 'Oregon',
},
'PSR': {
'region': 'WR',
'fullStationID': 'KPSR',
'wfoCityState': 'Phoenix AZ',
'wfoCity': 'Phoenix',
'state': 'Arizona',
},
'PUB': {
'region': 'CR',
'fullStationID': 'KPUB',
'wfoCityState': 'Pueblo CO',
'wfoCity': 'Pueblo',
'state': 'Colorado',
},
'RAH': {
'region': 'ER',
'fullStationID': 'KRAH',
'wfoCityState': 'Raleigh NC',
'wfoCity': 'Raleigh',
'state': 'North Carolina',
},
'REV': {
'region': 'WR',
'fullStationID': 'KREV',
'wfoCityState': 'Reno NV',
'wfoCity': 'Reno',
'state': 'Nevada',
},
'RIW': {
'region': 'CR',
'fullStationID': 'KRIW',
'wfoCityState': 'Riverton WY',
'wfoCity': 'Riverton',
'state': 'Wyoming',
},
'RLX': {
'region': 'ER',
'fullStationID': 'KRLX',
'wfoCityState': 'Charleston WV',
'wfoCity': 'Charleston',
'state': 'West Virginia',
},
'RNK': {
'region': 'ER',
'fullStationID': 'KRNK',
'wfoCityState': 'Blacksburg VA',
'wfoCity': 'Blacksburg',
'state': 'Virginia',
},
'SEW': {
'region': 'WR',
'fullStationID': 'KSEW',
'wfoCityState': 'Seattle WA',
'wfoCity': 'Seattle',
'state': 'Washington',
},
'SGF': {
'region': 'CR',
'fullStationID': 'KSGF',
'wfoCityState': 'Springfield MO',
'wfoCity': 'Springfield',
'state': 'Missouri',
},
'SGX': {
'region': 'WR',
'fullStationID': 'KSGX',
'wfoCityState': 'San Diego CA',
'wfoCity': 'San Diego',
'state': 'California',
},
'SHV': {
'region': 'SR',
'fullStationID': 'KSHV',
'wfoCityState': 'Shreveport LA',
'wfoCity': 'Shreveport',
'state': 'Louisiana',
},
'SJT': {
'region': 'SR',
'fullStationID': 'KSJT',
'wfoCityState': 'San Angelo TX',
'wfoCity': 'San Angelo',
'state': 'Texas',
},
'SJU': {
'region': 'SR',
'fullStationID': 'TJSJ',
'wfoCityState': 'San Juan PR',
'wfoCity': 'San Juan',
'state': 'Puerto Rico',
},
'SLC': {
'region': 'WR',
'fullStationID': 'KSLC',
'wfoCityState': 'Salt Lake City UT',
'wfoCity': 'Salt Lake City',
'state': 'Utah',
},
'STO': {
'region': 'WR',
'fullStationID': 'KSTO',
'wfoCityState': 'Sacramento CA',
'wfoCity': 'Sacramento',
'state': 'California',
},
'TAE': {
'region': 'SR',
'fullStationID': 'KTAE',
'wfoCityState': 'Tallahassee FL',
'wfoCity': 'Tallahassee',
'state': 'Florida',
},
'TBW': {
'region': 'SR',
'fullStationID': 'KTBW',
'wfoCityState': 'Tampa Bay Ruskin FL',
'wfoCity': 'Tampa Bay Ruskin',
'state': 'Florida',
},
'TFX': {
'region': 'WR',
'fullStationID': 'KTFX',
'wfoCityState': 'Great Falls MT',
'wfoCity': 'Great Falls',
'state': 'Montana',
},
'TOP': {
'region': 'CR',
'fullStationID': 'KTOP',
'wfoCityState': 'Topeka KS',
'wfoCity': 'Topeka',
'state': 'Kansas',
},
'TSA': {
'region': 'SR',
'fullStationID': 'KTSA',
'wfoCityState': 'Tulsa OK',
'wfoCity': 'Tulsa',
'state': 'Oklahoma',
},
'TWC': {
'region': 'WR',
'fullStationID': 'KTWC',
'wfoCityState': 'Tucson AZ',
'wfoCity': 'Tucson',
'state': 'Arizona',
},
'UNR': {
'region': 'CR',
'fullStationID': 'KUNR',
'wfoCityState': 'Rapid City SD',
'wfoCity': 'Rapid City',
'state': 'South Dakota',
},
'VEF': {
'region': 'WR',
'fullStationID': 'KVEF',
'wfoCityState': 'Las Vegas NV',
'wfoCity': 'Las Vegas',
'state': 'Nevada',
},
}
try:
import OtherCFG
SiteInfo.update(OtherCFG.SiteInfo)
except:
LogStream.logProblem("Error importing OtherCFG:\n"+LogStream.exc())
|
# -*- coding: utf-8 -*-
import os
import sys
try:
import pkg_resources
get_module_res = lambda *res: pkg_resources.resource_stream(__name__,
os.path.join(*res))
except ImportError:
get_module_res = lambda *res: open(os.path.normpath(os.path.join(
os.getcwd(), os.path.dirname(__file__), *res)), 'rb')
PY2 = sys.version_info[0] == 2
default_encoding = sys.getfilesystemencoding()
if PY2:
text_type = unicode
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
text_type = str
string_types = (str,)
xrange = range
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
def strdecode(sentence):
if not isinstance(sentence, text_type):
try:
sentence = sentence.decode('utf-8')
except UnicodeDecodeError:
sentence = sentence.decode('gbk', 'ignore')
return sentence
def resolve_filename(f):
try:
return f.name
except AttributeError:
return repr(f)
|
"""
Dead simple example of using GoAway.
This should always report success.
"""
import sys
import os
import time
import goaway
s = goaway.StrictCentralized("s")
def set_shared(x):
s.val = x
if __name__ == "__main__":
config_path = os.path.join(os.path.dirname(__file__), 'remote.yaml')
goaway.init(config_path)
print "Started."
print "Setting val to 0."
s.val = 0
assert(s.val == 0)
print "Starting goaway routine to set val to 5."
goaway.goaway(set_shared, 5)
print "Waiting for val to be non-zero..."
while s.val == 0:
print "Still waiting..."
time.sleep(.05)
print "Received non-0 value."
assert(s.val == 5)
print "Success."
|
#!python3
#encoding: utf-8
import argparse
import pathlib
from log.Log import Log
import requests
from bs4 import BeautifulSoup
import dataset
import datetime
import os.path
from database.Database import Database as Db
class Main(object):
def Run(self):
parser = argparse.ArgumentParser(
description='GitHub Repository Uploader.',
)
parser.add_argument('path_dir_pj')
parser.add_argument('-n', '--username', action='append')
parser.add_argument('-d', '--path_dir_db')
parser.add_argument('-id', '--path_dir_input_db')
parser.add_argument('-od', '--path_dir_output_db')
parser.add_argument('-u', '--url', action='append')
parser.add_argument('-y', '--yaml')
self.__args = parser.parse_args()
usernames = self.__GetUsernames()
Log.debug('対象ユーザ:', usernames)
path_out = GetDirOutputDb()
path_out.mkdir(parents=True, exist_ok=True)
def __GetUsernames(self):
if None is not self.__args.username: return self.__args.username
elif None is self.__args.path_dir_db:
path_db = self.__GetDirInputDb()
#path_db = self.__GetPathDbAccount()
if not path_db.isfile(): raise Main.ArgumentError(f'指定パスにDBが存在しません。: {path_db}')
db_account = dataset.connect('sqlite:///' + str(path_db))
return [a['Username'] for a in db_account['Accounts'].find()]
else:
def __GetDirInputDb(self):
if None is not self.__args.path_dir_db: return pathlib.Path(self.__args.path_dir_db)
elif None is not self.__args.path_dir_input_db: return pathlib.Path(self.__args.path_dir_input_db)
else:
if None is self.__args.username:
raise Main.ArgumentError(f'ユーザ名か入力DBディレクトリのパスを指定してください。')
def __GetDirOutputDb(self):
if None is not self.__args.path_dir_db: return pathlib.Path(self.__args.path_dir_db)
elif None is not self.__args.path_dir_output_db: return pathlib.Path(self.__args.path_dir_output_db)
else: pathlib.Path(__file__).parent / 'res/db/'
"""
def __GetDirInputDb(self):
if None is self.__args.path_dir_db:
if None is self.__args.path_dir_input_db and None is self.__args.path_dir_output_db:
raise Main.ArgumentError(f'DBパスが存在しません。: {path_db}')
def __GetPathDbAccount(self):
path_db = pathlib.Path(self.__args.path_dir_db).resolve()
return path_db / 'Github.Accounts.sqlite3'
def __GetPathDirDb(self):
path_dir_db = pathlib.Path(self.__args.path_dir_db).resolve()
if path_dir_db.isdir(): Main.ArgumentError(f'指定ディレクトリが存在しません。: {path_dir_db}')
return path_dir_db
def __Getyaml(self):
if None is not self.__args.yaml: raise NotImplementedError()
"""
class ArgumentError(RuntimeError):
def __init__(*args, **kwargs):
message = '起動引数エラー。'
if 0 < len(args): message += str(args[0])
super().__init__(message)
#super().__init__(args, kwargs)
"""
ContributionDBの生成と更新を実行する。
DBが存在しなければDBファイルとテーブルを作成する。
DBが存在すればレコードを更新する。
@param {string} usernameは対象ユーザ名。
"""
def Run(self, username):
self.__Create(username)
self.__Insert(self.__GetContributionsSince(self.__GetContributionsSVG(username), self.__GetLastDateFromDB()))
def __Create(self, username):
# DB用に空ファイルを作成する
path_file_db = self.__GetDbFilePath(username)
if os.path.isfile(path_file_db):
self.__db = self.__OpenDb(username)
return
with open(path_file_db, 'w') as f:
pass
# テーブルを作成する
self.__db = self.__OpenDb(username)
sql = """
create table "Contributions"(
"Id" integer primary key,
"Date" text not null,
"Count" integer not null check(0 <= "Count")
);
"""
self.__db.query(sql)
"""
DBファイルパスを返す。
@param {str} usernameは対象ユーザ名。
"""
def __GetDbFilePath(self, username):
return os.path.join(self.__path_dir_db, 'GitHub.Contributions.{username}.sqlite3'.format(username=username))
"""
ContributionDBファイルを開く。
"""
def __OpenDb(self, username):
path_file_db = self.__GetDbFilePath(username)
if os.path.isfile(path_file_db):
return dataset.connect('sqlite:///' + path_file_db)
else:
return None
"""
DBから最終日を取得する。
@return {soup.find} HTML内のSVG要素。
"""
def __GetLastDateFromDB(self):
if None is self.__db:
return None
sql = 'select MAX("Date") LastDate from Contributions;'
return self.__db.query(sql).next()['LastDate']
"""
GitHub個人ページからSVG要素を取得し返す。
@param {string} usernameは対象ユーザ名。
@return {soup.find} HTML内のSVG要素。
"""
def __GetContributionsSVG(self, username):
last_date = self.__GetLastDateFromDB()
if None is last_date or None is not last_date and last_date < "{0:%Y-%m-%d}".format(datetime.datetime.now()):
print('************************{0}'.format(username))
url = 'https://github.com/{username}'.format(username=username)
file_name = '{username}_contributions'.format(username=username)
r = requests.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'html.parser') # html.parser, lxml
return soup.find("svg", attrs={"class": "js-calendar-graph-svg"})
else:
return None
"""
指定した日付と同じかそれより未来のみを対象としたContributionsを取得する。
@param {soup.find} svgはHTML内のSVG要素。
@param {string} sinceは日付。yyyy-MM-dd書式。Noneや空文字ならすべての日付を対象とする。
@return {dict} 指定した日付とそれ以降におけるSVGのContributions。
"""
def __GetContributionsSince(self, svg, since):
if None is svg:
return None
contributions = []
for rect in svg.find_all('rect'):
date = rect.get('data-date')
if not since or (since and since <= date):
contributions.append({"Date": date, "Count": rect.get('data-count')})
return contributions
"""
指定した日付と同じかそれより未来のみを対象としたContributionsを取得する。
@param {dict}
"""
def __Insert(self, contributions):
if None is contributions:
return
self.__db.begin()
# 指定した日付と同日なら更新する
self.__db['Contributions'].update(contributions[0], 'Date')
# 指定した日付以降なら挿入する
for c in contributions[1:]:
self.__db['Contributions'].insert(c)
self.__db.commit()
if __name__ == '__main__':
username = 'ytyaru'
path_dir_db = os.path.abspath(os.path.dirname(__file__))
m = Main(path_dir_db)
m.Run(username)
|
import cv2
import numpy as np
# Import matplotlib libraries
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.patches as patches
from movenet.constants import *
def valid_resolution(width, height, output_stride=16):
target_width = (int(width) // output_stride) * output_stride + 1
target_height = (int(height) // output_stride) * output_stride + 1
return target_width, target_height
def _process_input(source_img, size=192, crop_region=None):
if crop_region != None:
input_img = source_img[crop_region['y_min']:crop_region['y_max'], crop_region['x_min']:crop_region['y_max']]
input_img = cv2.resize(input_img, (size, size),
interpolation=cv2.INTER_LINEAR)
else:
input_img = cv2.resize(source_img, (size, size),
interpolation=cv2.INTER_LINEAR)
input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
# input_img = input_img.transpose((2, 0, 1)).reshape(1, 3, size, size)
input_img = input_img.reshape(1, size, size, 3)
return input_img, source_img
def read_cap(cap, size=192, crop_region=None):
res, img = cap.read()
if not res:
raise IOError("webcam failure")
return _process_input(img, size, crop_region)
def read_imgfile(path, size=192):
img = cv2.imread(path)
return _process_input(img, size)
def draw_keypoints(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_confidence:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
return out_img
def get_adjacent_keypoints(keypoint_scores, keypoint_coords, min_confidence=0.1):
results = []
for left, right in CONNECTED_PART_INDICES:
if keypoint_scores[left] < min_confidence or keypoint_scores[right] < min_confidence:
continue
results.append(
np.array([keypoint_coords[left][::-1],
keypoint_coords[right][::-1]]).astype(np.int32),
)
return results
def draw_skeleton(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
out_img = img
adjacent_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_confidence)
adjacent_keypoints.extend(new_keypoints)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
def draw_skel_and_kp(
img, kpt_with_conf, conf_thres=0.1):
out_img = img
height, width, _ = img.shape
adjacent_keypoints = []
cv_keypoints = []
keypoint_scores = kpt_with_conf[:, 2]
keypoint_coords = kpt_with_conf[:, :2]
keypoint_coords[:, 0] = keypoint_coords[:, 0] * height
keypoint_coords[:, 1] = keypoint_coords[:, 1] * width
new_keypoints = get_adjacent_keypoints(
keypoint_scores, keypoint_coords, conf_thres)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores, keypoint_coords):
if ks < conf_thres:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 5))
if cv_keypoints:
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
'''
Intelligent cropping algorithm borrowed from Movenet doc:
https://www.tensorflow.org/hub/tutorials/movenet
'''
# Confidence score to determine whether a keypoint prediction is reliable.
MIN_CROP_KEYPOINT_SCORE = 0.2
# Dictionary that maps from joint names to keypoint indices.
KEYPOINT_DICT = {
'nose': 0,
'left_eye': 1,
'right_eye': 2,
'left_ear': 3,
'right_ear': 4,
'left_shoulder': 5,
'right_shoulder': 6,
'left_elbow': 7,
'right_elbow': 8,
'left_wrist': 9,
'right_wrist': 10,
'left_hip': 11,
'right_hip': 12,
'left_knee': 13,
'right_knee': 14,
'left_ankle': 15,
'right_ankle': 16
}
def init_crop_region(image_height, image_width):
"""Defines the default crop region.
The function provides the initial crop region (pads the full image from both
sides to make it a square image) when the algorithm cannot reliably determine
the crop region from the previous frame.
"""
# if image_width > image_height:
# box_height = image_width / image_height
# box_width = 1.0
# y_min = (image_height / 2 - image_width / 2) / image_height
# x_min = 0.0
# else:
# box_height = 1.0
# box_width = image_height / image_width
# y_min = 0.0
# x_min = (image_width / 2 - image_height / 2) / image_width
# return {
# 'y_min': y_min,
# 'x_min': x_min,
# 'y_max': y_min + box_height,
# 'x_max': x_min + box_width,
# 'height': box_height,
# 'width': box_width
# }
return {
'y_min': 0,
'x_min': 0,
'y_max': image_height,
'x_max': image_width,
'height': 1.0, # image_width / image_height,
'width': 1.0
}
def torso_visible(keypoints):
"""Checks whether there are enough torso keypoints.
This function checks whether the model is confident at predicting one of the
shoulders/hips which is required to determine a good crop region.
"""
return ((keypoints[KEYPOINT_DICT['left_hip'], 2] >
MIN_CROP_KEYPOINT_SCORE or
keypoints[KEYPOINT_DICT['right_hip'], 2] >
MIN_CROP_KEYPOINT_SCORE) and
(keypoints[KEYPOINT_DICT['left_shoulder'], 2] >
MIN_CROP_KEYPOINT_SCORE or
keypoints[KEYPOINT_DICT['right_shoulder'], 2] >
MIN_CROP_KEYPOINT_SCORE))
def determine_torso_and_body_range(
keypoints, target_keypoints, center_y, center_x):
"""Calculates the maximum distance from each keypoints to the center location.
The function returns the maximum distances from the two sets of keypoints:
full 17 keypoints and 4 torso keypoints. The returned information will be
used to determine the crop size. See determineCropRegion for more detail.
"""
torso_joints = ['left_shoulder', 'right_shoulder', 'left_hip', 'right_hip']
max_torso_yrange = 0.0
max_torso_xrange = 0.0
for joint in torso_joints:
dist_y = abs(center_y - target_keypoints[joint][0])
dist_x = abs(center_x - target_keypoints[joint][1])
if dist_y > max_torso_yrange:
max_torso_yrange = dist_y
if dist_x > max_torso_xrange:
max_torso_xrange = dist_x
max_body_yrange = 0.0
max_body_xrange = 0.0
for joint in KEYPOINT_DICT.keys():
if keypoints[KEYPOINT_DICT[joint], 2] < MIN_CROP_KEYPOINT_SCORE:
continue
dist_y = abs(center_y - target_keypoints[joint][0]);
dist_x = abs(center_x - target_keypoints[joint][1]);
if dist_y > max_body_yrange:
max_body_yrange = dist_y
if dist_x > max_body_xrange:
max_body_xrange = dist_x
return [max_torso_yrange, max_torso_xrange, max_body_yrange, max_body_xrange]
def determine_crop_region(
keypoints, image_height,
image_width):
"""Determines the region to crop the image for the model to run inference on.
The algorithm uses the detected joints from the previous frame to estimate
the square region that encloses the full body of the target person and
centers at the midpoint of two hip joints. The crop size is determined by
the distances between each joints and the center point.
When the model is not confident with the four torso joint predictions, the
function returns a default crop which is the full image padded to square.
"""
target_keypoints = {}
for joint in KEYPOINT_DICT.keys():
target_keypoints[joint] = [
keypoints[KEYPOINT_DICT[joint], 0] * image_height,
keypoints[KEYPOINT_DICT[joint], 1] * image_width
]
if torso_visible(keypoints):
center_y = (target_keypoints['left_hip'][0] +
target_keypoints['right_hip'][0]) / 2;
center_x = (target_keypoints['left_hip'][1] +
target_keypoints['right_hip'][1]) / 2;
(max_torso_yrange, max_torso_xrange,
max_body_yrange, max_body_xrange) = determine_torso_and_body_range(
keypoints, target_keypoints, center_y, center_x)
crop_length_half = np.amax(
[max_torso_xrange * 1.9, max_torso_yrange * 1.9,
max_body_yrange * 1.2, max_body_xrange * 1.2])
tmp = np.array(
[center_x, image_width - center_x, center_y, image_height - center_y])
crop_length_half = np.amin(
[crop_length_half, np.amax(tmp)]);
crop_corner = [center_y - crop_length_half, center_x - crop_length_half];
if crop_length_half > max(image_width, image_height) / 2:
return init_crop_region(image_height, image_width)
else:
crop_length = crop_length_half * 2;
return {
'y_min': crop_corner[0], # / image_height,
'x_min': crop_corner[1], # / image_width,
'y_max': (crop_corner[0] + crop_length), # / image_height,
'x_max': (crop_corner[1] + crop_length), # / image_width,
'height': (crop_corner[0] + crop_length) / image_height -
crop_corner[0] / image_height,
'width': (crop_corner[1] + crop_length) / image_width -
crop_corner[1] / image_width
}
else:
return init_crop_region(image_height, image_width)
def crop_and_resize(image, crop_region, crop_size):
"""Crops and resize the image to prepare for the model input."""
boxes = [[crop_region['y_min'], crop_region['x_min'],
crop_region['y_max'], crop_region['x_max']]]
output_image = tf.image.crop_and_resize(
image, box_indices=[0], boxes=boxes, crop_size=crop_size)
return output_image
|
import warnings
import sys
import os
import tracemalloc
import time
from sys import getsizeof, exit
import pickle
import json
import requests
from rpy2.robjects import pandas2ri
import numpy as np
from Big_Data_Platform.Kubernetes.Kafka_Client.Confluent_Kafka_Python.src.classes.CKafkaPC import KafkaPC
from Use_Cases.VPS_Popcorn_Production.Kubernetes.src.classes.caai_util import ModelLearner, DataWindow, get_cv_scores
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore"
X_MIN = 4000
X_MAX = 10100
N_INITIAL_DESIGN = 5
pandas2ri.activate()
class Learner(KafkaPC):
def __init__(self, config_path, config_section):
super().__init__(config_path, config_section)
self.func_dict = {
"AB_test_function": self.process_test_function,
"DB_features": self.process_features,
}
def get_model_parameters(self, API_URL):
ENDPOINT_USE_CASE = "/use_case/"
URL = API_URL + ENDPOINT_USE_CASE
api_request = requests.get(url=URL)
use_case_info = json.loads(api_request.content)
payload = {
"use_case": use_case_info["use_case"],
"goal": use_case_info["goal"],
"feature": use_case_info["feature"],
"algorithm": MODEL_ALGORITHM,
}
ENDPOINT_KNOWLEDGE = "/knowledge/algorithm/"
URL = API_URL + ENDPOINT_KNOWLEDGE
api_request = requests.get(url=URL, params=payload)
algo_info = json.loads(api_request.content)
MODEL_PARAMETERS = {}
for key, value in algo_info["parameter"].items():
if type(value) is str:
MODEL_PARAMETERS[key] = value
elif type(value) is dict:
MODEL_PARAMETERS[key] = value["default"]
return MODEL_PARAMETERS
def process_test_function(self, msg):
"""
"name": "Simulation",
"fields": [
{"name": "id", "type": ["int"]},
{"name": "selection_phase", "type": ["int"]},
{"name": "simulation", "type": ["byte"]},
]
"""
# new_sim = self.decode_avro_msg(msg)
new_sim = self.decode_msg(msg)
# extract objective
objFunction = pickle.loads(new_sim["simulation"])
selection_phase = new_sim["selection_phase"]
# performance tracking
tracemalloc.start()
start = time.perf_counter()
start_process = time.process_time()
budget = 20
# initdesign to sample obj for initial model training
X = np.linspace(X_MIN, X_MAX, num=budget)
# evaluate design
y = objFunction(X)
# fit model
ML = ModelLearner(MODEL_ALGORITHM, MODEL_PARAMETERS)
if ML.reshape_x:
X = X.reshape(-1, 1)
if ML.reshape_y:
y = y.reshape(-1, 1)
ML.model.fit(X, y)
rmse_score, mae_score, r2_score = get_cv_scores(ML.model, X, y)
print(
f"Fitted model of test instance with -> " f"RMSE: {round(rmse_score, 3)}"
)
real_time = round(time.perf_counter() - start, 4)
process_time = round(time.process_time() - start_process, 4)
# print(f'Found result in {real_time}s')
# print(f'CPU time is {process_time}s')
current, peak = tracemalloc.get_traced_memory()
current_mb = current / 10 ** 6
peak_mb = peak / 10 ** 6
# print(f"Current memory usage is {current_mb}MB; Peak was {peak_mb}MB")
tracemalloc.stop()
# pickle model and send to optimizer
model_pickle = pickle.dumps(ML.model)
"""
"name": "Simulation_Model",
"fields": [
{"name": "selection_phase", "type": ["int"]},
{"name": "algorithm", "type": ["string"]},
{"name": "repetition", "type": ["int"]},
{"name": "budget", "type": ["int"]},
{"name": "model", "type": ["bytes"]},
]
"""
simulation_model_data = {
"selection_phase": selection_phase,
"algorithm": MODEL_ALGORITHM,
"repetition": 1,
"budget": budget,
"model": model_pickle,
# "CPU_ms": real_time,
# "RAM": peak_mb,
}
self.send_msg(topic="AB_simulation_model_data",
message=simulation_model_data)
exit(0)
def process_features(self, msg):
"""
"name": "Data",
"fields": [
{"name": "phase", "type": ["string"]},
{"name": "algorithm", "type": ["string"]},
{"name": "id_x", "type": ["int"]},
{"name": "x", "type": ["float"]},
{"name": "y", "type": ["float"]}
]
new_data_point = {
"cycle": current_data_point,
"timestamp": 12345,
"x": {"x": new_x},
"y_values": {"y": new_y},
"y_agg": new_y,
"y_values_norm": {"y": new_y},
"y_agg_norm": new_y
}
"""
new_data = self.decode_msg(msg)
# print(new_data)
new_data_point = new_window.Data_Point(
new_data["cycle"], new_data["x"]["conveyorRuntime"], new_data["y_agg_norm"]
)
new_window.append_and_check(new_data_point)
if len(new_window.data) < MIN_DATA_POINTS:
print(
f"Collecting training data for {MODEL_ALGORITHM} "
f"({len(new_window.data)}/{MIN_DATA_POINTS})"
)
# elif new_data["algorithm"] == MODEL_ALGORITHM:
else:
# performance tracking
tracemalloc.start()
start = time.perf_counter()
start_process = time.process_time()
ML = ModelLearner(MODEL_ALGORITHM, MODEL_PARAMETERS)
X, y = new_window.get_arrays(
reshape_x=ML.reshape_x, reshape_y=ML.reshape_y)
id_start_x = new_window.get_id_start_x()
ML.model.fit(X, y)
# print(f'n = {len(X)}')
rmse_score, mae_score, r2_score = get_cv_scores(ML.model, X, y)
print(
f"Update model with (x={round(new_data['x']['conveyorRuntime'], 3)}, y={round(new_data['y_agg_norm'], 3)}) -> "
f"RMSE: {round(rmse_score, 3)}"
)
real_time = round(time.perf_counter() - start, 4)
process_time = round(time.process_time() - start_process, 4)
# print(f'Found result in {real_time}s')
# print(f'CPU time is {process_time}s')
current, peak = tracemalloc.get_traced_memory()
current_mb = current / 10 ** 6
peak_mb = peak / 10 ** 6
# print(f"Current memory usage is {current_mb}MB; Peak was {peak_mb}MB")
tracemalloc.stop()
model_pickle = pickle.dumps(ML.model)
"""
"name": "Model",
"fields": [
{"name": "phase", "type": ["enum"], "symbols": ["init", "observation"]},
{"name": "model_name", "type": ["string"]},
{"name": "n_data_points", "type": ["int"]},
{"name": "id_start_x", "type": ["int"]},
{"name": "model", "type": ["bytes"]},
{"name": "model_size", "type": ["int"]},
{"name": "rmse", "type": ["null, float"]},
{"name": "mae", "type": ["null, float"]},
{"name": "rsquared", "type": ["null, float"]},
]
"""
model_data = {
"phase": "observation",
"model_name": MODEL_ALGORITHM,
"id": new_data["cycle"],
"n_data_points": len(X),
"id_start_x": id_start_x,
"model": model_pickle,
"model_size": getsizeof(model_pickle),
"rmse": rmse_score,
"mae": mae_score,
"rsquared": r2_score,
# "CPU_ms": real_time,
# "RAM": peak_mb,
}
self.send_msg(topic="AB_model_data", message=model_data)
env_vars = {
"config_path": os.getenv("config_path"),
"config_section": os.getenv("config_section"),
}
new_pc = Learner(**env_vars)
MODEL_ALGORITHM = new_pc.config["MODEL_ALGORITHM"]
API_URL = new_pc.config["API_URL"]
MODEL_PARAMETERS = new_pc.get_model_parameters(API_URL)
new_window = DataWindow()
MIN_DATA_POINTS = 5
try:
while True:
msg = new_pc.consumer.poll(0.1)
if msg is None:
continue
elif msg.error() is not None:
print(f"Error occured: {str(msg.error())}")
else:
new_pc.func_dict[msg.topic()](msg)
except KeyboardInterrupt:
pass
finally:
new_pc.consumer.close()
|
from qtools.qtpy.QtCore import Qt
__all__ = ['UserActionGenerator', 'LEAP']
def get_maximum_norm(p1, p2):
"""Return the inf norm between two points."""
return max(abs(p1[0]-p2[0]), abs(p1[1]-p2[1]))
# try importing leap motion SDK
try:
import Leap
LEAP = {}
LEAP['frame'] = None
class LeapListener(Leap.Listener):
def on_frame(self, controller):
try:
LEAP['frame'] = controller.frame()
except:
pass
except:
# leap SDK not available
LEAP = {}
class UserActionGenerator(object):
"""Raise user action events.
Define what is the current user action, from the QT events related to mouse
and keyboard.
"""
def get_pos(self, pos):
"""Return the coordinate of a position object."""
return (pos.x(), pos.y())
def __init__(self):
self.reset()
def reset(self):
"""Reinitialize the actions."""
self.action = None
self.key = None
self.key_modifier = None
self.mouse_button = 0
self.mouse_position = (0, 0)
self.mouse_position_diff = (0, 0)
self.mouse_press_position = (0, 0)
self.pinch_position = (0, 0)
self.pinch_rotation = 0.
self.pinch_scale = 1.
self.pinch_scale_diff = 0.
self.pinch_start_position = (0, 0)
self.wheel = 0
self.init_leap()
def init_leap(self):
if LEAP:
self.leap_listener = LeapListener()
self.leap_controller = Leap.Controller()
self.leap_controller.add_listener(self.leap_listener)
def get_action_parameters(self):
"""Return an action parameter object."""
mp = self.mouse_position
mpd = self.mouse_position_diff
mpp = self.mouse_press_position
if not mp:
mp = (0, 0)
if not mpd:
mpd = (0, 0)
if not mpp:
mpp = (0, 0)
parameters = dict(mouse_position=mp,
mouse_position_diff=mpd,
mouse_press_position=mpp,
pinch_start_position=self.pinch_start_position,
pinch_position=self.pinch_position,
pinch_rotation=self.pinch_rotation,
pinch_scale=self.pinch_scale,
pinch_scale_diff=self.pinch_scale_diff,
wheel=self.wheel,
key_modifier=self.key_modifier,
key=self.key)
return parameters
def clean_action(self):
"""Reset the current action."""
self.action = None
def pinchEvent(self, e):
if e.state() == Qt.GestureStarted:
self.action = 'Pinch'
self.pinch_start_position = (0, 0)
elif e.state() == Qt.GestureUpdated:
self.action = 'Pinch'
self.pinch_position = self.get_pos(e.centerPoint())
# Save the pinch start position at the first GestureUpdated event
if self.pinch_start_position == (0, 0):
self.pinch_start_position = self.pinch_position
self.pinch_rotation_diff = e.rotationAngle()
self.pinch_rotation = e.totalRotationAngle()
self.pinch_scale_diff = e.scaleFactor() - 1
self.pinch_scale = e.totalScaleFactor()
elif e.state() == Qt.GestureFinished:
self.action = None
self.pinch_position = (0, 0)
self.pinch_rotation = 0.
self.pinch_scale = 1.
self.pinch_scale_diff = 0.
self.pinch_start_position = (0, 0)
def mousePressEvent(self, e):
self.mouse_button = e.button()
self.mouse_press_position = self.mouse_position = self.get_pos(e.pos())
def mouseDoubleClickEvent(self, e):
self.action = 'DoubleClick'
def mouseReleaseEvent(self, e):
if get_maximum_norm(self.mouse_position,
self.mouse_press_position) < 10:
if self.mouse_button == Qt.LeftButton:
self.action = 'LeftClick'
elif self.mouse_button == Qt.MiddleButton:
self.action = 'MiddleClick'
elif self.mouse_button == Qt.RightButton:
self.action = 'RightClick'
# otherwise, terminate the current action
else:
self.action = None
self.mouse_button = 0
def mouseMoveEvent(self, e):
pos = self.get_pos(e.pos())
self.mouse_position_diff = (pos[0] - self.mouse_position[0],
pos[1] - self.mouse_position[1])
self.mouse_position = pos
if self.mouse_button == Qt.LeftButton:
self.action = 'LeftClickMove'
elif self.mouse_button == Qt.MiddleButton:
self.action = 'MiddleClickMove'
elif self.mouse_button == Qt.RightButton:
self.action = 'RightClickMove'
else:
self.action = 'Move'
def keyPressEvent(self, e):
key = e.key()
# set key_modifier only if it is Ctrl, Shift, Alt or AltGr
if key in (Qt.Key_Control, Qt.Key_Shift, Qt.Key_Alt, Qt.Key_AltGr):
self.key_modifier = key
else:
self.action = 'KeyPress'
self.key = key
def keyReleaseEvent(self, e):
if e.key() in (Qt.Key_Control, Qt.Key_Shift, Qt.Key_Alt, Qt.Key_AltGr):
self.key_modifier = None
else:
self.key = None
def wheelEvent(self, e):
self.wheel = e.delta()
self.action = 'Wheel'
def focusOutEvent(self, e):
# reset all actions when the focus goes out
self.reset()
def close(self):
if LEAP:
self.leap_controller.remove_listener(self.leap_listener)
|
import logging
from .base import BaseDownloadAdapter
logger = logging.getLogger(__name__)
class PgDumpAdapter(BaseDownloadAdapter):
def set_args(self, schema_name, table_name, data_only=False):
# command line for pg_dump:
# pg_dump ... --dbname=postgresql://user:password@host:port/database --table=schema.table
# pg_dump ... --user=user --host=host --port=port --dbname=database --table=schema.table
self.args = ['pg_dump', '--no-owner']
if data_only:
self.args += ['--data-only', '--inserts']
if 'PASSWORD' in self.database_config and self.database_config['PASSWORD']:
if 'PORT' in self.database_config and self.database_config['PORT']:
dbname = '--dbname=postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s:%(PORT)s/%(NAME)s'
else:
dbname = '--dbname=postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s'
self.args.append(dbname % self.database_config)
else:
if 'USER' in self.database_config and self.database_config['USER']:
self.args.append('--user=%(USER)s' % self.database_config)
if 'HOST' in self.database_config and self.database_config['HOST']:
self.args.append('--host=%(HOST)s' % self.database_config)
if 'PORT' in self.database_config and self.database_config['PORT']:
self.args.append('--port=%(PORT)d' % self.database_config)
self.args.append('--dbname=%(NAME)s' % self.database_config)
self.args.append('--table="%s"."%s"' % (schema_name, table_name))
|
import logging, datetime, os
from pathlib import Path
class FileHandler(logging.FileHandler):
def __init__(self, path, mode):
Path(path).mkdir(parents=True, exist_ok=True)
path = "{}/{}.log".format(path, datetime.datetime.now().isoformat())
super(FileHandler, self).__init__(path, mode)
|
from vantage6.tools.mock_client import ClientMockProtocol
from vantage6.tools.container_client import ClientContainerProtocol
## Mock client
client = ClientMockProtocol(["./local/data.csv", "local/data.csv"], "v6-histogram-py")
# client = ClientContainerProtocol(
# "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODc0NzY4OTAsIm5iZiI6MTU4NzQ3Njg5MCwianRpIjoiNzNmNWI1MjEtZWQwMi00YzFkLTg4ZDUtOGM4N2EzYjEwMWJhIiwiaWRlbnRpdHkiOnsidHlwZSI6ImNvbnRhaW5lciIsIm5vZGVfaWQiOjQsImNvbGxhYm9yYXRpb25faWQiOjEsInRhc2tfaWQiOjE2MDEsImltYWdlIjoiaGVsbG8td29ybGQifSwiZnJlc2giOmZhbHNlLCJ0eXBlIjoiYWNjZXNzIiwidXNlcl9jbGFpbXMiOnsidHlwZSI6ImNvbnRhaW5lciIsInJvbGVzIjpbXX19.SxjyOjo-6rhz5-v17PvbSvC8WrsafEFIvhw2uPjrkbM",
# host="https://trolltunga.vantage6.ai",
# port=443,
# path=""
# )
# non_disclosive_binary_binning
organizations = client.get_organizations_in_my_collaboration()
# print(organizations)
ids = [organization["id"] for organization in organizations]
task = client.create_new_task({
"method":"non_disclosive_binary_binning",
"kwargs": {
"edges": [0, 135, 240, 300],
"column_name": "Weight(lbs)"
}
}, ids)
# print(task)
results = client.get_results(task.get("id"))
# print(results)
master_task = client.create_new_task({
"master": 1,
"method":"master",
"kwargs": {
"edges": [0, 135, 240, 300],
"column_name": "Weight(lbs)"
}
}, [ids[0]])
results = client.get_results(master_task.get("id"))
print(results) |
from collections import Counter
import xml.etree.ElementTree as ET
import math
import sys
from nltk import *
import os
#initializes the Porter Stemmer
stemmer=PorterStemmer()
class Scoring:
#initializes important variables
def __init__(self):
self.average_query_length, self.queries_list = self.read_query_len_file()
self.tfquery_list = self.tf_query()
self.docnames=self.doc_names()
self.average_doc_length, self.doc_length = self.read_doc_len_file()
self.termIDs, self.doc_offset = get_misc_info()
#computes the occurences of terms in a query
def tf_query(self):
tfquery={}
for query in self.queries_list:
query_counts = [tokenize(term) for term in query[1]]
tf = Counter(query_counts)
tfquery[query[0]] = tf
return tfquery
#Parses the file topics.xml and gets the queries with their ids
def read_query_len_file(self):
print('Loading queries..')
tree = ET.parse('topics.xml')
root = tree.getroot()
queries_list=[]
summation=0
total_number=0
for child in root:
query_id = int(child.get('number'))
for attribute in child:
if attribute.tag == 'query':
query_text = attribute.text.split()
queries_list.append([query_id, query_text])
total_number = total_number + 1
summation = summation + len(query_text)
average_query_length = summation/total_number
return average_query_length, queries_list
#loads all names of documents of a corpus in memory
def doc_names(self):
docnames={}
print('Loading document names..')
with open('docids.txt', 'r') as docfile:
while (True):
line=docfile.readline().split()
if (len(line)<1):
break
docnames[int(line[0])]=line[1]
return docnames
#loads the lengths of all corpus documents in memory
def read_doc_len_file(self):
print('Loading document lengths..')
summation=0
doc_length = {}
with open("doc_index.txt", "r") as doc_index_file:
while(True):
line = doc_index_file.readline().split()
if (len(line)<1):
break
doc_length[int(line[0])]=int(line[1])
summation=summation+int(line[1])
average_doc_length = summation / (len(doc_length))
return average_doc_length, doc_length
#returns okapi-tf scores for each term of a document in a dictionary
def whole_doc_okapi(self, docID):
doc_okapi = {}
if docID != 0:
try:
doc_length = self.doc_length[docID]
except:
doc_length = 0
if doc_length > 0:
reached = False
with open("doc_index.txt",'r') as doc_index_file:
offset_in_docindex = self.doc_offset[docID]
doc_index_file.seek(offset_in_docindex)
while (True):
doc = doc_index_file.readline().split()
if (len(doc) < 1):
break
if (int(doc[0]) != docID and reached):
break
if int(doc[0])==docID:
reached=True
doc_tf=len(doc[2:])
doc_okapi[int(doc[1])] = float(doc_tf) / (float(doc_tf) + 0.5 + 1.5*(float(doc_length) / float(self.average_doc_length)))
return doc_okapi
#returns okapi-tf scores for each term of a query in a dictionary
def whole_query_okapi(self, ids_of_query_terms, tfquery):
query_okapi = {}
if len(ids_of_query_terms) > 1 :
query_length = sum(tfquery.values())
for term, freq in tfquery.items():
term = tokenize(term)
if (len(term))>1 and term in ids_of_query_terms.keys():
termfrequency = freq
a = termfrequency / (termfrequency + 0.5 + 1.5 * (query_length/ self.average_query_length))
query_okapi[ids_of_query_terms[term]]=a
return query_okapi
#this function takes a single query and computes scores for all
#relevant documents using the scoring function okapi-tf
def okapi_TF_all(self, query):
queryID = query[0]
tfquery = self.tfquery_list[queryID]
ids_of_query_terms = get_IDs_of_query_terms(query, self.termIDs)
query_okapi = self.whole_query_okapi(ids_of_query_terms,tfquery)
query_norm = sum(square([i for i in query_okapi.values()]))
term_docs, relevant_doc_list = query_relevant_docs(ids_of_query_terms)
for docID in relevant_doc_list:
score = 0.0
doc_okapi = self.whole_doc_okapi(docID)
doc_norm=sum(square([i for i in doc_okapi.values()]))
for qtid, qtoka in query_okapi.items():
try:
dtoka = doc_okapi[int(qtid)]
except KeyError:
dtoka = 0.0
score = score + ((qtoka * dtoka))
score = score / (doc_norm * query_norm)
with open('oktf.txt', 'a') as oktffile:
oktffile.write(str(queryID) + '\t0\t' + self.docnames[docID] + '\t' + str(docID) + '\t' + str(score) + ' run1' + '\n')
print(queryID, ' 0 ', self.docnames[docID] ,'', docID, '', score, ' run1')
#this function takes a single query and computes scores for all
#relevant documents using the scoring function tf-idf
def TF_IDF_all(self, query):
print("TF-IDF is now running")
queryID = query[0]
query_text = query[1]
print('query_text:',query_text)
tfquery = self.tfquery_list[queryID]
ids_of_query_terms = get_IDs_of_query_terms(query, self.termIDs)
query_okapi = self.whole_query_okapi(ids_of_query_terms, tfquery)
all_dfs={}
D = len(self.doc_length)
term_docs, relevant_doc_list = query_relevant_docs(ids_of_query_terms)
query_okapi.pop(0, None) #remove key that doesn't exist in the corpus
for qtID, qok in query_okapi.items():
df = len(term_docs[qtID])
query_okapi[qtID] = qok * (math.log(D/df))
query_norm = sum(square([i for i in query_okapi.values()]))
for docID in relevant_doc_list:
score = 0.0
doc_okapi = self.whole_doc_okapi(docID)
dot_product = 0.0
tempd = []
for qtID, qok in query_okapi.items():
df = len(term_docs[qtID])
try:
dok = doc_okapi[int(qtID)]
except:
dok = 0.0
d = dok * (math.log(D/df))
dot_product = dot_product + (qok * d)
tempd.append(d)
doc_norm=sum(square([i for i in tempd]))
score = dot_product / (query_norm * doc_norm)
with open('tfidf.txt', 'a') as tfidffile:
tfidffile.write(str(queryID) + '\t0\t' + self.docnames[docID] + '\t' + str(docID) + '\t' + str(score) + ' run1' + '\n')
print(queryID, ' 0 ', self.docnames[docID] ,'', docID, '', score, ' run1')
#this function takes a single query and computes scores for a single
#document using the language model with jelinek mercer smoothing
def jelinek_mercer_doc(self, docID):
lmbda = 0.6
all_terms=all_terms_in_doc(docID, self.doc_offset) # {termID: tf}
try:
length_doc = doc_length[docID]
except KeyError:
total_prob = 0.0
else:
doc_term_stats = corpus_stats(all_terms.keys()) # {termID : {tftotal, total_occur} }
corpus_length = sum(self.doc_length.values())
total_prob = 1
for termID, tf in all_terms.items():
term_prob=0.0
tftotal = doc_term_stats[termID][0]
total_occurences = doc_term_stats[termID][1]
term_prob = lmbda * (tf/length_doc) + (1 - lmbda) * (total_occurences/corpus_length)
total_prob = total_prob * term_prob
print(queryID, ' 0 ', self.docnames[docID] ,'', docID, '', total_prob, ' run1')
return total_prob
#this function takes a single query and computes scores for all
#relevant documents using the language model with jelinek mercer smoothing
def jelinek_mercer_all(self, query):
queryID = query[0]
tfquery = self.tfquery_list[queryID]
ids_of_query_terms = get_IDs_of_query_terms(query, self.termIDs)
query_stats = corpus_stats(ids_of_query_terms.values())
corpus_length = sum(self.doc_length.values())
term_docs, relevant_doc_list = query_relevant_docs(ids_of_query_terms)
lmbda = 0.6
for docID in relevant_doc_list:
total_prob = 1
score = 0.0
length_doc = self.doc_length[docID]
for termID in ids_of_query_terms.values():
tf = tf_doc(docID, termID, self.doc_offset)
try:
total_occur = query_stats[termID][1]
except KeyError:
total_occur = 0
term_prob = lmbda * (tf/length_doc) + (1- lmbda) * (total_occur/corpus_length)
total_prob = total_prob * term_prob
score = total_prob
with open('jm.txt', 'a') as jmfile:
jmfile.write(str(queryID) + '\t0\t' + self.docnames[docID] + '\t' + str(docID) + '\t' + str(score) + ' run1' + '\n')
print(queryID, ' 0 ', self.docnames[docID] ,'', docID, '', score, ' run1')
#this function takes a single query and computes scores for a single
#document using the Okapi-BM25 scoring function
def okapi_BM25(self, docID, query):
queryID = query[0]
tfquery = self.tfquery_list[queryID]
tfdoc = all_terms_in_doc(docID, self.doc_offset)
query_terms_IDs = get_IDs_of_query_terms(query, self.termIDs)
score = 0.0
D = len(self.doc_length)
k1=1.2
b=0.75
k2=500
try:
length_doc = self.doc_length[docID]
except:
length_doc = 0.0
K = k1 * ((1-b) + (b * (length_doc / self.average_doc_length)))
for term in tfquery.keys():
try:
termID = int(query_terms_IDs[term])
except:
termID = -1
try:
tfd = tfdoc[termID]
except KeyError:
tfd = 0.0
tfq = tfquery[term]
df = len(all_docs_containing_term(termID))
logvalue = math.log((D + 0.5)/(df + 0.5))
middleterm = ((1 + k1) * tfd) / (K + tfd)
rightterm = ((1 + k2) * tfq) / (k2 + tfq)
whole_term = logvalue * middleterm * rightterm
score = score + whole_term
with open('okbm.txt', 'a') as okbmfile:
okbmfile.write(str(queryID) + '\t0\t' + self.docnames[docID] + '\t' + str(docID) + '\t' + str(score) + ' run1' + '\n')
print(queryID, ' 0 ', self.docnames[docID] ,'', docID, '', score, ' run1')
return score
#this function takes a single query and computes scores for all
#relevant documents using the scoring function okapi-bm25
def okapi_BM25_all(self, query):
queryID = query[0]
query_text = query[1]
tfquery = self.tfquery_list[queryID]
score = 0
D = len(self.doc_length)
k1=1.2
b=0.75
k2=500
ids_of_query_terms = get_IDs_of_query_terms(query, self.termIDs)
term_docs, relevant_doc_list = query_relevant_docs(ids_of_query_terms)
print('query:',query)
print('ids_of_query_terms:', ids_of_query_terms)
print('relevant_doc_list:', relevant_doc_list)
query_okapi = self.whole_query_okapi(ids_of_query_terms, tfquery)
for docID in relevant_doc_list:
score = 0.0
length_doc = self.doc_length[docID]
K = k1 * ((1-b) + (b * (length_doc / self.average_doc_length)))
all_term_freq = all_terms_in_doc(docID, self.doc_offset)
for term, count in tfquery.items():
try:
termID = self.termIDs[term]
except KeyError:
termID = 0
try:
tfd = all_term_freq[int(termID)]
except KeyError:
tfd = 0.0
tfq = count
try:
df = len(term_docs[termID])
except KeyError:
df = 0.0
logvalue = math.log((D+0.5)/(df+0.5))
middleterm = ((1+k1) * tfd) / (K + tfd)
rightterm = ((1+k2) * tfq) / (k2 + tfq)
whole_term = logvalue * middleterm * rightterm
score = score + whole_term
print(queryID, ' 0 ', self.docnames[docID] ,'', docID, '', score, ' run1')
with open('okbm.txt', 'a') as okbmfile:
okbmfile.write(str(queryID) + '\t0\t' + self.docnames[docID] + '\t' + str(docID) + '\t' + str(score) + ' run1' + '\n')
#returns 1 dictionary, 1 list
#term_docs - {termID: all docs that contain this term}
#all_docs - [all docs that contain this term]
def query_relevant_docs(ids_of_query_terms):
term_docs={}
all_docs=[]
for termID in ids_of_query_terms.values():
relevant_docs = all_docs_containing_term(termID)
term_docs[termID] = relevant_docs
all_docs.extend(relevant_docs)
return term_docs, sorted(list(set(all_docs)))
#returns a list of all documents that contain a term
def all_docs_containing_term(termID):
newdoc = 0
doclist=[]
with open('term_info.txt', 'r', encoding='utf-8', errors='ignore') as term_info_file:
while (True):
line = term_info_file.readline().split()
if len(line) < 1:
break
if termID == int(line[0]):
offset_in_termindex = int(line[1])
break
with open("term_index.txt", 'r') as termindex_file:
try:
termindex_file.seek(offset_in_termindex)
except:
print('termID:',termID)
while (True):
line = termindex_file.readline().split()
try:
if (int(line[0]) == termID):
postings_list = line[1:]
for posting in postings_list:
doc = posting.split(":")
if (int(doc[0]) != 0):
newdoc = newdoc + int(doc[0])
doclist.append(newdoc)
break
except:
break
return doclist
#returns termID for each query term
def get_IDs_of_query_terms(query, termIDs):
tokenized_terms = [tokenize(term) for term in query[1]]
tokenized_terms = [term for term in tokenized_terms if len(term) > 1]
term_ids = {}
for term in tokenized_terms:
try:
term_ids[term] = termIDs[term]
except KeyError:
term_ids[term] = 0
term_ids = OrderedDict(sorted(term_ids.items(), key=lambda t: t[1]))
return term_ids
#function name explains what it does
def get_IDs_of_all_query_terms(queries, termIDs):
tokenized_terms = [tokenize(term) for term in query for query in queries]
term_ids = {term:termIDs[term] for term in tokenized_terms}
return term_ids
#returns the term frequency of term with termID in doc with docID
def tf_doc(docID, termID, doc_starts_at):
termfrequency=0
if termID!=0 and docID!=0:
with open("doc_index.txt",'r+') as doc_index_file:
offset_in_docindex = doc_starts_at[docID]
doc_index_file.seek(offset_in_docindex)
while (True):
line = doc_index_file.readline().split()
if len(line)<1:
break
if (docID<int(line[0])):
break
if int(line[0])==docID and int(line[1])==termID:
termfrequency=len(line[2:])
break
return termfrequency
#returns a dictionary of the form
#{termID: [#documents in which term exists, total occurence of term in whole corpus]}
def corpus_stats(term_ids):
stats={}
with open('term_info.txt') as termfile:
for termID in term_ids:
tfd = 0
overall_occurences=0
while (True):
line = termfile.readline().split()
if (len(line)<1):
break
if termID==int(line[0]):
overall_occurences=int(line[2])
tfd=int(line[3])
stats[termID]=[tfd, overall_occurences]
break
return stats
#removes punctuation and numbers, tokenizes strings
def tokenize(term):
term = ''.join(e for e in term if e.isalpha()) #clean punctuation
term = stemmer.stem(term.lower())
return term
def square(list):
return map(lambda x: x ** 2, list)
#returns a dictionary of a single document with words,
#and the total number of times the words occured in that document
def all_terms_in_doc(docID, doc_starts_at):
terms_len=OrderedDict()
done=False
with open ('doc_index.txt') as doc_index_file:
offset_in_docindex = doc_starts_at[docID]
doc_index_file.seek(offset_in_docindex)
while(done==False):
termID=0
tf=0
line = doc_index_file.readline().split()
if len(line)<1:
break
if docID == int(line[0]):
termID = int(line[1])
tf = len(line[2:])
terms_len[termID] = tf
if (docID < int(line[0])):
done = True
return terms_len
#returns dictionary of the format {termID: [all documents that have this term]}
def all_docs_containing_query_terms(ids_of_query_terms):
term_files={}
for termID in ids_of_query_terms:
term_files[termID] = all_docs_containing_term(termID)
return term_files
#returns common elements of 2 lists
def common_elements(list1, list2):
return list(set(list1) & set(list2))
#create a helper file, doc_index_details.txt - for offsets to locations in the doc_index.txt file
#where the details of a document are located
def generate_docindex_details():
open('doc_index_details.txt', 'w', encoding='utf-8', errors='ignore')
seenID=0
with open('doc_index.txt', 'r', encoding='utf-8', errors='ignore') as doc_index_file, open('doc_index_details.txt', 'r+', encoding='utf-8', errors='ignore') as details_file:
while(True):
line_start_offset = doc_index_file.tell()
line = doc_index_file.readline().split()
if len(line)<1:
break
docID = int(line[0])
if seenID != docID:
details_file.write(str(docID) +'\t'+ str(line_start_offset)+'\n')
seenID = docID
#retrieves the offsets of all documents
def get_docindex_details():
doc_starts_at = {}
with open('doc_index_details.txt', 'r+', encoding='utf-8', errors='ignore') as details_file:
while(True):
line = details_file.readline().split()
if len(line) < 1:
break
doc_starts_at[int(line[0])] = int(line[1])
return doc_starts_at
#loads the IDs of all terms in memory
def get_term_IDs():
termIDs={}
with open('termids.txt', 'r', encoding='utf-8', errors='ignore') as termidfile:
while(True):
line = termidfile.readline().split()
if len(line) < 1:
break
termIDs[line[1]]=int(line[0])
return termIDs
#helper function that loads miscelaneous information
def get_misc_info():
print('Loading Document Index details..')
if not os.path.isfile('doc_index_details.txt'):
generate_docindex_details()
offsets = get_docindex_details()
print('Loading term IDs..')
termIDs = get_term_IDs()
return termIDs, offsets
#all the scoring functions rank the documents that are relevant to the query
#output files have only scores of relevant documents. This function completes the
#output files with scores of all files.
def complete_output(docnames, filename):
open('updated'+filename, 'w')
docID = 1
with open(filename, 'r', encoding='utf-8', errors='ignore') as file_to_fix, open('updated'+filename, 'a', encoding='utf-8', errors='ignore') as updated_file:
while(True):
line = file_to_fix.readline().split()
if len(line) < 1:
break
lastdocID = docID
queryID = line[0]
docname = line[2]
docID = int(line[3])
score = line[4]
if (lastdocID < docID - 1 ):
for i in range(lastdocID, docID):
updated_file.write(str(queryID) + '\t0\t' + docnames[i] + '\t' + str(i) + '\t' + str(0.0) + ' run1' + '\n')
elif (lastdocID > docID and lastdocID < len(docnames)):
for i in range(lastdocID, len(docnames)):
updated_file.write(str(queryID) + '\t0\t' + docnames[i] + '\t' + str(i) + '\t' + str(0.0) + ' run1' + '\n')
else:
updated_file.write(str(queryID) + '\t0\t' + docname + '\t' + str(docID) + '\t' + str(score) + ' run1' + '\n')
if __name__ == '__main__':
if (len(sys.argv) > 2):
if (sys.argv[1]=='--score'):
sc = Scoring()
#done
if (sys.argv[2]=='TF-IDF'):
open('tfidf.txt', 'w')
for query in sc.queries_list:
sc.TF_IDF_all(query)
#done
elif (sys.argv[2]=='OK-TF'):
open('oktf.txt', 'w')
for query in sc.queries_list:
sc.okapi_TF_all(query)
#done
elif (sys.argv[2]=='OK-BM'):
open('okbm.txt', 'w')
for query in sc.queries_list:
sc.okapi_BM25_all(query)
#done
elif (sys.argv[2]=='JM-Smooth'):
open('jm.txt', 'w')
for query in sc.queries_list:
sc.jelinek_mercer_all(query)
elif (sys.argv[2]=='completeoutput'):
complete_output(sc.docnames, 'oktf.txt')
complete_output(sc.docnames, 'okbm.txt')
complete_output(sc.docnames, 'tfidf.txt')
complete_output(sc.docnames, 'jm.txt')
else:
print('Invalid scoring function.')
print('Scoring functions are:\n 1. TF-IDF\n 2. OK-TF\n 3. OK-BM\n 4. JM-Smooth')
exit()
else:
print('Usage is as follows\n "python query.py --score <scoringfunction>"')
print('Scoring functions are:\n 1. TF-IDF\n 2. OK-TF\n 3. OK-BM\n 4. JM-Smooth')
exit()
else:
exit()
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# author(s): Lijun Zhu
# the package
import altar
from altar.models.BayesianL2 import BayesianL2
# declaration
class Linear(BayesianL2, family="altar.models.regression.linear"):
"""
Linear Regression model y= ax +b
"""
# additional model properties
x_file = altar.properties.path(default='x.txt')
x_file.doc = "the input file for x variable"
@altar.export
def initialize(self, application):
"""
Initialize the state of the model
"""
# model specific initialization before superclass
# none for this model
# call the super class initialization
super().initialize(application=application)
# model specific initialization after superclass
# grab data
self.x = self.loadFile(self.x_file)
self.y = self.dataobs.dataobs
# set the return_residual flag
# forward model calculates the residual between prediction and data
self.return_residual = True
# all done, return self
return self
def forwardModel(self, theta, prediction):
"""
Forward Model
:param theta: sampling parameters for one sample
:param prediction: data prediction or residual (prediction - observation)
:return: none
"""
# grab the parameters from theta
slope = theta[0]
intercept = theta[1]
# calculate the residual between data prediction and observation
size = self.observations
for i in range(size):
prediction[i] = slope * self.x[i] + intercept - self.y[i]
# all done
return self
# private variables
x = None
y = None
# end of file
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Nov 15, 2018
'''
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from pprint import pprint
from weblyzard_api.client import OGER_API_URL
from weblyzard_api.client.ontogene import OgerClient
from weblyzard_api.client.recognize import Recognize
from weblyzard_api.client.jeremia import Jeremia
class TestOGER(unittest.TestCase):
def setUp(self):
url = OGER_API_URL
print(url)
self.client = OgerClient(url)
def test_raise_exception_if_service_urls_is_array(self):
with self.assertRaises(Exception) as context:
OgerClient(['http://localhost:8080', 'http://localhost:8081'])
self.assertTrue('Oger url cannot be an array' in context.exception)
def test_status(self):
self.assertTrue(self.client.status())
def test_annotate_text(self):
docid='99999999'
#doctext='Cancer, also called malignancy, is an abnormal growth of cells.'
doctext='Alzheimer\'s disease (AD), also referred to simply as Alzheimer\'s, is a chronic neurodegenerative disease that usually starts slowly and worsens over time.'
response = self.client.annotate_text(docid, doctext)
assert len(response), 'No items found for {}'.format(docid)
if __name__ == '__main__':
unittest.main() |
"""
refs:
https://github.com/wandb/examples/blob/master/examples/pytorch/pytorch-ddp/log-ddp.py
https://docs.wandb.ai/guides/track/advanced/distributed-training
https://github.com/wandb/examples/issues/88
https://community.wandb.ai/t/what-happens-if-the-code-crashes-in-the-middle-and-there-was-no-time-to-fo-a-finish/508
""" |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Trashman v1.5.0
# A Python trash manager.
# Copyright (C) 2011-2018, Chris Warrick.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the author of this software nor the names of
# contributors to this software may be used to endorse or promote
# products derived from this software without specific prior written
# consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
trashman
~~~~~~~~
A Python trash manager.
:Copyright: © 2011-2018, Chris Warrick.
:License: BSD (see /LICENSE).
"""
__title__ = 'Trashman'
__version__ = '1.5.0'
__author__ = 'Chris Warrick'
__license__ = '3-clause BSD'
__docformat__ = 'restructuredtext en'
import sys
import os
import gettext
__pyver__ = sys.version_info
G = gettext.translation('trashman', '/usr/share/locale', fallback='C')
_ = G.gettext
def size_dir(sdir):
"""Get the size of a directory. Based on code found online."""
size = os.path.getsize(sdir)
for item in os.listdir(sdir):
item = os.path.join(sdir, item)
if os.path.isfile(item):
size = size + os.path.getsize(item)
elif os.path.isdir(item):
size = size + size_dir(item)
return size
### TMError errors raised here ###
class TMError(Exception):
"""Exceptions raised by the Trashman."""
def __init__(self, src, info, msg):
"""TMError init."""
DS.log.error('(auto TMError ) [{}/{}]'.format(src, info) + msg)
self.src = src
self.info = info
self.msg = msg
def __str__(self):
"""You want to see error messages, don’t you?"""
return self.msg
from .tmds import TMDS
DS = TMDS()
|
import os
__path__ = [
os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'climate')]
|
# -*- coding: utf-8 -*-
"""Base class for all lexers."""
import re
from funcparserlib.lexer import Token
class LexerError(Exception):
"""General lexer error."""
def __init__(self, msg, pos, char, lnum, brace_level, line):
"""Initialise with information on where the error occurred."""
self.msg = msg
self.pos = pos
self.char = char
self.lnum = lnum
self.brace_level = brace_level
self.line = line
def __str__(self):
return "Failed at line {0}, char '{1}', position {2}, "\
"brace level {3}: {4} (line: '{5}')"\
.format(
self.lnum,
self.char,
self.pos,
self.brace_level,
self.msg,
self.line,
)
class BaseLexer:
"""Base class for all bibpy lexers."""
def __init__(self):
"""Initialise the lexer."""
self._modes = {}
self._patterns = None
def reset(self, string):
"""Reset the internal state of the lexer."""
self.pos = 0
self.lastpos = 0
self.maxpos = len(string)
self.char = 1
self.lnum = 1
self.last_lnum = 1
self.brace_level = 0
self.ignore_whitespace = False
self.string = string
def _compile_regexes(self, patterns):
"""Compile a set of patterns into regular expressions."""
# Save a copy of the patterns that respects the order. We could also
# use a collections.OrderedDict, but this actually affected performance
# ever so slighty
self._iter_patterns = [
(name, (re.compile(pattern), f)) for name, (pattern, f) in patterns
]
# This is used for lookups
self._patterns = dict(self._iter_patterns)
@property
def patterns(self):
"""All patterns recognised by the lexer."""
return self._patterns
@property
def mode(self):
"""Return the current mode of the lexer."""
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def modes(self):
"""Return all modes that the lexer has."""
return self._modes
@property
def eos(self):
"""Return True if we have reached the end of the string."""
return self.pos >= self.maxpos
@property
def current_char(self):
"""Return the current character or None if no such character."""
if self.string and self.pos >= 0 and not self.eos:
return self.string[self.pos]
return None
def advance(self, match):
"""Advance the internal state based on a successful match."""
self.lastpos = self.pos
self.last_lnum = self.lnum
matched = match.group(0)
newlines = matched.count('\n')
self.pos = match.start(0) + len(matched)
self.lnum += newlines
if newlines == 0:
self.char += len(matched)
else:
self.char = len(matched) - matched.rfind('\n') - 1
def raise_error(self, msg):
"""Raise a lexer error with the given message."""
errline = self.string.splitlines()[self.lnum - 1]
raise LexerError(
msg, self.pos, self.char, self.lnum, self.brace_level, errline
)
def raise_unexpected(self, token):
"""Raise an error for an unexpected token."""
self.raise_error("Did not find expected token '{0}'".format(token))
def raise_unbalanced(self):
"""Raise an error for unbalanced braces."""
self.raise_error('Unbalanced braces')
def expect(self, token, strip_whitespace=True):
"""Expect a token, fail otherwise."""
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if not m:
self.raise_unexpected(token)
self.advance(m)
token_value = m.group(0)
if self.ignore_whitespace:
token_value = token_value.strip()
return self.make_token(token, token_value)
def until(self, token):
"""Scan until a particular token is found.
Return the part of the string that was scanned past and the string
value of the token. The latter is the entire rest of the string if the
token was not found.
"""
if token == 'braces':
pattern = re.compile(r'{|}')
elif token == 'parens':
pattern = re.compile(r'\(|\)')
else:
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if m:
scanned = m.group(0)
self.advance(m)
return self.string[self.lastpos:self.pos - 1], scanned
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
return rest, ''
def make_token(self, token_type, value):
"""Create a token type with a value."""
return Token(
token_type,
value,
(self.last_lnum, self.lastpos),
(self.lnum, self.pos)
)
def lex_string(self, value):
"""Lex a string and return a single token for it."""
return self.make_token('string', value)
def scan(self, search_type='search'):
"""Scan until any token recognised by this lexer is found.
Return the part of the string that was scanned past and the token
itself. The latter is the entire rest of the string if the token was
not found.
"""
for token_type, (pattern, handler) in self._iter_patterns:
# Not the most elegant but re.Pattern only exists in Python 3.7+ so
# we cannot pass the method as an argument
m = getattr(pattern, search_type)(self.string, self.pos)
if m:
self.advance(m)
value = m.group(0)
if self.ignore_whitespace and token_type == 'space':
break
token = handler(value) if handler else\
self.make_token(token_type, value)
yield self.string[self.lastpos:self.pos - len(value)], token
break
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
yield rest, None
def lex(self, string):
"""Lex a string and generate tokens."""
self.reset(string)
while not self.eos:
yield from self.modes[self.mode]()
|
import abc
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn as nn
import jiant.proj.main.modeling.heads as heads
import jiant.utils.transformer_utils as transformer_utils
from jiant.proj.main.components.outputs import LogitsOutput, LogitsAndLossOutput
from jiant.utils.python.datastructures import take_one
from jiant.shared.model_resolution import ModelArchitectures
class Taskmodel(nn.Module, metaclass=abc.ABCMeta):
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
raise NotImplementedError
class ClassificationModel(Taskmodel):
def __init__(self, encoder, classification_head: heads.ClassificationHead):
super().__init__(encoder=encoder)
self.classification_head = classification_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
logits = self.classification_head(pooled=encoder_output.pooled)
if compute_loss:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.classification_head.num_labels), batch.label_id.view(-1),
)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class RegressionModel(Taskmodel):
def __init__(self, encoder, regression_head: heads.RegressionHead):
super().__init__(encoder=encoder)
self.regression_head = regression_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
# TODO: Abuse of notation - these aren't really logits (issue #1187)
logits = self.regression_head(pooled=encoder_output.pooled)
if compute_loss:
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), batch.label.view(-1))
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class MultipleChoiceModel(Taskmodel):
def __init__(self, encoder, num_choices: int, choice_scoring_head: heads.RegressionHead):
super().__init__(encoder=encoder)
self.num_choices = num_choices
self.choice_scoring_head = choice_scoring_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
input_ids = batch.input_ids
segment_ids = batch.segment_ids
input_mask = batch.input_mask
choice_score_list = []
encoder_output_other_ls = []
for i in range(self.num_choices):
encoder_output = get_output_from_encoder(
encoder=self.encoder,
input_ids=input_ids[:, i],
segment_ids=segment_ids[:, i],
input_mask=input_mask[:, i],
)
choice_score = self.choice_scoring_head(pooled=encoder_output.pooled)
choice_score_list.append(choice_score)
encoder_output_other_ls.append(encoder_output.other)
reshaped_outputs = []
if encoder_output_other_ls[0]:
for j in range(len(encoder_output_other_ls[0])):
reshaped_outputs.append(
[
torch.stack([misc[j][layer_i] for misc in encoder_output_other_ls], dim=1)
for layer_i in range(len(encoder_output_other_ls[0][0]))
]
)
reshaped_outputs = tuple(reshaped_outputs)
logits = torch.cat(
[choice_score.unsqueeze(1).squeeze(-1) for choice_score in choice_score_list], dim=1
)
if compute_loss:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_choices), batch.label_id.view(-1))
return LogitsAndLossOutput(logits=logits, loss=loss, other=reshaped_outputs)
else:
return LogitsOutput(logits=logits, other=reshaped_outputs)
class SpanComparisonModel(Taskmodel):
def __init__(self, encoder, span_comparison_head: heads.SpanComparisonHead):
super().__init__(encoder=encoder)
self.span_comparison_head = span_comparison_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
logits = self.span_comparison_head(unpooled=encoder_output.unpooled, spans=batch.spans)
if compute_loss:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.span_comparison_head.num_labels), batch.label_id.view(-1),
)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class SpanPredictionModel(Taskmodel):
def __init__(self, encoder, span_prediction_head: heads.TokenClassificationHead):
super().__init__(encoder=encoder)
self.offset_margin = 1000
# 1000 is a big enough number that exp(-1000) will be strict 0 in float32.
# So that if we add 1000 to the valid dimensions in the input of softmax,
# we can guarantee the output distribution will only be non-zero at those dimensions.
self.span_prediction_head = span_prediction_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
logits = self.span_prediction_head(unpooled=encoder_output.unpooled)
# Ensure logits in valid range is at least self.offset_margin higher than others
logits_offset = logits.max() - logits.min() + self.offset_margin
logits = logits + logits_offset * batch.selection_token_mask.unsqueeze(dim=2)
if compute_loss:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
logits.transpose(dim0=1, dim1=2).flatten(end_dim=1), batch.gt_span_idxs.flatten(),
)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class MultiLabelSpanComparisonModel(Taskmodel):
def __init__(self, encoder, span_comparison_head: heads.SpanComparisonHead):
super().__init__(encoder=encoder)
self.span_comparison_head = span_comparison_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
logits = self.span_comparison_head(unpooled=encoder_output.unpooled, spans=batch.spans)
if compute_loss:
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(
logits.view(-1, self.span_comparison_head.num_labels), batch.label_ids.float(),
)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class TokenClassificationModel(Taskmodel):
"""From RobertaForTokenClassification"""
def __init__(self, encoder, token_classification_head: heads.TokenClassificationHead):
super().__init__(encoder=encoder)
self.token_classification_head = token_classification_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
logits = self.token_classification_head(unpooled=encoder_output.unpooled)
if compute_loss:
loss_fct = nn.CrossEntropyLoss()
active_loss = batch.label_mask.view(-1) == 1
active_logits = logits.view(-1, self.token_classification_head.num_labels)[active_loss]
active_labels = batch.label_ids.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class QAModel(Taskmodel):
def __init__(self, encoder, qa_head: heads.QAHead):
super().__init__(encoder=encoder)
self.qa_head = qa_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
logits = self.qa_head(unpooled=encoder_output.unpooled)
if compute_loss:
loss = compute_qa_loss(
logits=logits,
start_positions=batch.start_position,
end_positions=batch.end_position,
)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class MLMModel(Taskmodel):
def __init__(self, encoder, mlm_head: heads.BaseMLMHead):
super().__init__(encoder=encoder)
self.mlm_head = mlm_head
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
masked_batch = batch.get_masked(
mlm_probability=task.mlm_probability, tokenizer=tokenizer, do_mask=task.do_mask,
)
encoder_output = get_output_from_encoder(
encoder=self.encoder,
input_ids=masked_batch.masked_input_ids,
segment_ids=masked_batch.segment_ids,
input_mask=masked_batch.input_mask,
)
logits = self.mlm_head(unpooled=encoder_output.unpooled)
if compute_loss:
loss = compute_mlm_loss(logits=logits, masked_lm_labels=masked_batch.masked_lm_labels)
return LogitsAndLossOutput(logits=logits, loss=loss, other=encoder_output.other)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
class EmbeddingModel(Taskmodel):
def __init__(self, encoder, pooler_head: heads.AbstractPoolerHead, layer):
super().__init__(encoder=encoder)
self.pooler_head = pooler_head
self.layer = layer
def forward(self, batch, task, tokenizer, compute_loss: bool = False):
with transformer_utils.output_hidden_states_context(self.encoder):
encoder_output = get_output_from_encoder_and_batch(encoder=self.encoder, batch=batch)
# A tuple of layers of hidden states
hidden_states = take_one(encoder_output.other)
layer_hidden_states = hidden_states[self.layer]
if isinstance(self.pooler_head, heads.MeanPoolerHead):
logits = self.pooler_head(unpooled=layer_hidden_states, input_mask=batch.input_mask)
elif isinstance(self.pooler_head, heads.FirstPoolerHead):
logits = self.pooler_head(layer_hidden_states)
else:
raise TypeError(type(self.pooler_head))
# TODO: Abuse of notation - these aren't really logits (issue #1187)
if compute_loss:
# TODO: make this optional? (issue #1187)
return LogitsAndLossOutput(
logits=logits,
loss=torch.tensor([0.0]), # This is a horrible hack
other=encoder_output.other,
)
else:
return LogitsOutput(logits=logits, other=encoder_output.other)
@dataclass
class EncoderOutput:
pooled: torch.Tensor
unpooled: torch.Tensor
other: Any = None
# Extend later with attention, hidden_acts, etc
def get_output_from_encoder_and_batch(encoder, batch) -> EncoderOutput:
"""Pass batch to encoder, return encoder model output.
Args:
encoder: bare model outputting raw hidden-states without any specific head.
batch: Batch object (containing token indices, token type ids, and attention mask).
Returns:
EncoderOutput containing pooled and unpooled model outputs as well as any other outputs.
"""
return get_output_from_encoder(
encoder=encoder,
input_ids=batch.input_ids,
segment_ids=batch.segment_ids,
input_mask=batch.input_mask,
)
def get_output_from_encoder(encoder, input_ids, segment_ids, input_mask) -> EncoderOutput:
"""Pass inputs to encoder, return encoder output.
Args:
encoder: bare model outputting raw hidden-states without any specific head.
input_ids: token indices (see huggingface.co/transformers/glossary.html#input-ids).
segment_ids: token type ids (see huggingface.co/transformers/glossary.html#token-type-ids).
input_mask: attention mask (see huggingface.co/transformers/glossary.html#attention-mask).
Raises:
RuntimeError if encoder output contains less than 2 elements.
Returns:
EncoderOutput containing pooled and unpooled model outputs as well as any other outputs.
"""
model_arch = ModelArchitectures.from_encoder(encoder)
if model_arch in [
ModelArchitectures.BERT,
ModelArchitectures.ROBERTA,
ModelArchitectures.ALBERT,
ModelArchitectures.XLM_ROBERTA,
]:
pooled, unpooled, other = get_output_from_standard_transformer_models(
encoder=encoder, input_ids=input_ids, segment_ids=segment_ids, input_mask=input_mask,
)
elif model_arch == ModelArchitectures.ELECTRA:
pooled, unpooled, other = get_output_from_electra(
encoder=encoder, input_ids=input_ids, segment_ids=segment_ids, input_mask=input_mask,
)
elif model_arch in [
ModelArchitectures.BART,
ModelArchitectures.MBART,
]:
pooled, unpooled, other = get_output_from_bart_models(
encoder=encoder, input_ids=input_ids, input_mask=input_mask,
)
else:
raise KeyError(model_arch)
# Extend later with attention, hidden_acts, etc
if other:
return EncoderOutput(pooled=pooled, unpooled=unpooled, other=other)
else:
return EncoderOutput(pooled=pooled, unpooled=unpooled)
def get_output_from_standard_transformer_models(encoder, input_ids, segment_ids, input_mask):
output = encoder(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
pooled, unpooled, other = output[1], output[0], output[2:]
return pooled, unpooled, other
def get_output_from_bart_models(encoder, input_ids, input_mask):
# BART and mBART and encoder-decoder architectures.
# As described in the BART paper and implemented in Transformers,
# for single input tasks, the encoder input is the sequence,
# the decode input is 1-shifted sequence, and the resulting
# sentence representation is the final decoder state.
# That's what we use for `unpooled` here.
dec_last, dec_all, enc_last, enc_all = encoder(
input_ids=input_ids, attention_mask=input_mask, output_hidden_states=True,
)
unpooled = dec_last
other = (enc_all + dec_all,)
bsize, slen = input_ids.shape
batch_idx = torch.arange(bsize).to(input_ids.device)
# Get last non-pad index
pooled = unpooled[batch_idx, slen - input_ids.eq(encoder.config.pad_token_id).sum(1) - 1]
return pooled, unpooled, other
def get_output_from_electra(encoder, input_ids, segment_ids, input_mask):
output = encoder(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
unpooled = output[0]
pooled = unpooled[:, 0, :]
return pooled, unpooled, output
def compute_mlm_loss(logits, masked_lm_labels):
vocab_size = logits.shape[-1]
loss_fct = nn.CrossEntropyLoss()
return loss_fct(logits.view(-1, vocab_size), masked_lm_labels.view(-1))
def compute_qa_loss(logits, start_positions, end_positions):
# Do we want to keep them as 1 tensor, or multiple?
# bs x 2 x seq_len x 1
start_logits, end_logits = logits[:, 0], logits[:, 1]
# Taken from: RobertaForQuestionAnswering
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
|
from gpiozero import LED
from time import sleep
def blink_led_once(blink_frequency, port):
"""turns the led on for 1/2 the time specified in blink_frequency, then it off for the same amount of time."""
myLED = LED(port)
myLED.on()
sleep(blink_frequency / 2)
myLED.off()
sleep(blink_frequency / 2)
def blink_led_forever(port):
"""blink the led once per second in a never ending loop"""
while True:
blink_led_once(1, port)
def race_led_up(blink_frequency):
led_ports = [18, 23, 25, 12, 16, 20, 21, 26, 19, 13]
for i in led_ports:
blink_led_once(blink_frequency, i)
#print("Activating port:", i)
def race_led_down(blink_frequency):
led_ports = reversed([18, 23, 25, 12, 16, 20, 21, 26, 19, 13])
for i in led_ports:
blink_led_once(blink_frequency, i)
#print("Activating port ", reversed(i))
def test_leds():
print("This will make the LED blink once")
blink_led_once(2, 18)
print("This will make them race")
race_led_up(.5)
race_led_down(.5)
print("Come on, we can go faster!")
race_led_up(.2)
race_led_down(.2)
race_led_up(.1)
race_led_down(.1)
if __name__ == "__main__":
test_leds()
|
class Leg(object):
pass
class Back(object):
pass
class Chair(object):
'''Compose a class out of other classes'''
def __init__(self, num_legs):
self.legs = [Leg() for leg in range(num_legs)]
self.back = Back()
def __repr__(self):
return 'I have {} legs and one back.'.format(len(self.legs))
print(Chair(4)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditLoanapplyElmCreditloanadmitQueryResponse(AlipayResponse):
def __init__(self):
super(MybankCreditLoanapplyElmCreditloanadmitQueryResponse, self).__init__()
self._admit_label = None
@property
def admit_label(self):
return self._admit_label
@admit_label.setter
def admit_label(self, value):
self._admit_label = value
def parse_response_content(self, response_content):
response = super(MybankCreditLoanapplyElmCreditloanadmitQueryResponse, self).parse_response_content(response_content)
if 'admit_label' in response:
self.admit_label = response['admit_label']
|
from __future__ import absolute_import
# coding=utf-8
import os
import sys
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
PY3 = sys.version_info[0] == 3
install_requires = []
dependency_links = []
req_files = ['requirements.txt']
if not PY3:
req_files.append('py2-requirements.txt')
for file in req_files:
with open(os.path.join(here, file)) as f:
lines = f.readlines()
install_requires.extend([p.strip() for p in lines if ' ' not in p])
dependency_links.extend([p.split()[-1].strip() for p in lines
if ' ' in p])
setup(
name='pythoncn',
description='A social forum for pythonista',
url='https://github.com/python-cn/firefly',
version='0.1.0',
author='Python-cn Team',
author_email='ciici123@gmail.com',
platforms='any',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages('.', exclude=('tests*')),
install_requires=install_requires,
dependency_links=dependency_links,
)
|
import paho.mqtt.client as paho
import time
broker = "localhost"
port = 9001
Keep_Alive_Interval = 10
def on_publish(client, userdata, mid):
print("data published \n")
pass
#print("mid:"+str(mid))
client=paho.Client()
client.on_publish=on_publish
client.connect(broker,int(port), int(Keep_Alive_Interval))
client.loop_start()
while True:
teksdikirim="Hallooo"
client.publish("/topik",teksdikirim)
time.sleep(1)
|
import logging
from constants import *
def logger_test():
logger = logging.getLogger(__name__)
logger.debug("Added Logger")
logger.info("Info message")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 15:15:50 2020
@author: Patrick
"""
"""Implementation of Fast Orthogonal Search"""
#==============================================
#candidates_generation.py
#==============================================
def CandidatePool_Generation(x_train, y_train, K, L):
Candidates = []
# x[n-l], l = 0,...,10 (11 Candidates)
for l in range(0, L+1):
zero_list = l * [0]
data_list = x_train[:len(x_train)-l]
xn_l = [*zero_list, *data_list]
Candidates.append(xn_l)
# y[n-k], l = 1,...,10 (10 Candidates)
for k in range(1, K+1):
zero_list = k * [0]
data_list = y_train[:len(y_train)-k]
yn_k = [*zero_list, *data_list]
Candidates.append(yn_k)
# x[n-l1]x[n-l2], l1 = 0,...,10
# l2 = l1,...,10 (66 Candidates)
for l1 in range(0, L+1):
for l2 in range(l1, L+1):
zero_list_l1 = l1 * [0]
zero_list_l2 = l2 * [0]
data_list_l1 = x_train[:len(x_train)-l1]
data_list_l2 = x_train[:len(x_train)-l2]
xn_l1 = [*zero_list_l1, *data_list_l1]
xn_l2 = [*zero_list_l2, *data_list_l2]
Candidates.append([i*j for i,j in zip(xn_l1,xn_l2)])
# y[n-l1]y[n-l2], k1 = 1,...,10
# k2 = k1,...,10 (55 Candidates)
for k1 in range(1, K+1):
for k2 in range(k1, K+1):
zero_list_k1 = k1 * [0]
zero_list_k2 = k2 * [0]
data_list_k1 = y_train[:len(y_train)-k1]
data_list_k2 = y_train[:len(y_train)-k2]
yn_k1 = [*zero_list_k1, *data_list_k1]
yn_k2 = [*zero_list_k2, *data_list_k2]
Candidates.append([i*j for i,j in zip(yn_k1,yn_k2)])
# x[n-l]y[n-k], l = 1,...,10
# k = 1,...,10 (110 Candidates)
for l in range(0, L+1):
for k in range(1, K+1):
zero_list_l = l * [0]
zero_list_k = k * [0]
data_list_l = x_train[:len(x_train)-l]
data_list_k = y_train[:len(y_train)-k]
xn_l = [*zero_list_l, *data_list_l]
yn_k = [*zero_list_k, *data_list_k]
Candidates.append([i*j for i,j in zip(xn_l,yn_k)])
return Candidates
#
|
from rest_framework import serializers
from exam.models import TblExamRecord
class RecordSerializer(serializers.ModelSerializer):
class Meta:
model = TblExamRecord
fields = '__all__'
read_only_fields = ('score', 'joiner', 'time_cost', 'category_score') |
import sys
import numpy as np
import pandas as pd
from unittest import TestCase, main
from timeseries.cv import TimeSeriesCV
class TestCV(TestCase):
def test_all_k(self):
idx = pd.date_range('2018-1-1 01:01', '2018-1-1 21:01', freq='1min')
n = len(idx)
df1 = pd.DataFrame(np.arange(1, n+1), index=idx, columns=['df1'])
df2 = pd.DataFrame(np.arange(1, n+1)*10, index=idx, columns=['df2'])
label_dt = '10min'
train_size = '100min'
test_size = '20min'
validation_size = '60min'
cv = TimeSeriesCV(train_size=train_size, validation_size=test_size, test_size=validation_size, label_dt=label_dt)
splits = cv.split(df1, df2)
self.assertEqual(len(splits), 34)
self.assertTrue(splits[0][0].start >= idx[0])
last_test_e = None
for train_slice, test_slice in splits:
train_s, train_e, test_s, test_e = train_slice.start, train_slice.stop, test_slice.start, test_slice.stop
#print('train ', train_s, train_e)
#print('test ', test_s, test_e)
self.assertEqual(train_e, train_s + pd.Timedelta(train_size))
self.assertEqual(test_s, train_e + pd.Timedelta(label_dt))
self.assertEqual(test_e, test_s + pd.Timedelta(test_size))
last_test_e = test_e
tr_slice, val_slice = cv.get_train_and_test()
tr_s, tr_e, val_s, val_e = tr_slice.start, tr_slice.stop, val_slice.start, val_slice.stop
self.assertEqual(last_test_e + pd.Timedelta(label_dt), val_s)
self.assertEqual(tr_e, tr_s + pd.Timedelta(train_size))
self.assertEqual(val_s, tr_e + pd.Timedelta(label_dt))
self.assertEqual(val_e, idx[-1])
self.assertEqual(val_s, idx[-1] - pd.Timedelta(validation_size))
def test_max_k(self):
idx = pd.date_range('2018-1-1 01:01', '2018-1-1 21:01', freq='1min')
n = len(idx)
df1 = pd.DataFrame(np.arange(1, n+1), index=idx, columns=['df1'])
df2 = pd.DataFrame(np.arange(1, n+1)*10, index=idx, columns=['df2'])
max_k = 2
label_dt = '10min'
train_size = '100min'
test_size = '20min'
validation_size = '60min'
cv = TimeSeriesCV(train_size=train_size, validation_size=test_size, test_size=validation_size, label_dt=label_dt, max_k=max_k)
splits = cv.split(df1, df2)
self.assertEqual(len(splits), max_k)
last_test_e = None
for train_slice, test_slice in splits:
train_s, train_e, test_s, test_e = train_slice.start, train_slice.stop, test_slice.start, test_slice.stop
self.assertEqual(train_e, train_s + pd.Timedelta(train_size))
self.assertEqual(test_s, train_e + pd.Timedelta(label_dt))
self.assertEqual(test_e, test_s + pd.Timedelta(test_size))
last_test_e = test_e
tr_slice, val_slice = cv.get_train_and_test()
tr_s, tr_e, val_s, val_e = tr_slice.start, tr_slice.stop, val_slice.start, val_slice.stop
self.assertEqual(last_test_e + pd.Timedelta(label_dt), val_s)
self.assertEqual(tr_e, tr_s + pd.Timedelta(train_size))
self.assertEqual(val_s, tr_e + pd.Timedelta(label_dt))
self.assertEqual(val_e, idx[-1])
self.assertEqual(val_s, idx[-1] - pd.Timedelta(validation_size))
def test_different_indices(self):
idx1 = pd.date_range('2018-1-1 01:01', '2018-1-1 19:01', freq='1min')
idx2 = pd.date_range('2018-1-1 12:01', '2018-1-1 21:01', freq='1min')
n1 = len(idx1)
n2 = len(idx2)
df1 = pd.DataFrame(np.arange(1, n1 + 1), index=idx1, columns=['df1'])
df2 = pd.DataFrame(np.arange(1, n2 + 1) * 10, index=idx2, columns=['df2'])
label_dt = '10min'
train_size = '100min'
test_size = '20min'
validation_size = '60min'
cv = TimeSeriesCV(train_size=train_size, validation_size=test_size, test_size=validation_size, label_dt=label_dt, max_k=5)
splits = cv.split(df1, df2)
last_test_e = None
for train_slice, test_slice in splits:
train_s, train_e, test_s, test_e = train_slice.start, train_slice.stop, test_slice.start, test_slice.stop
self.assertEqual(train_e, train_s + pd.Timedelta(train_size))
self.assertEqual(test_s, train_e + pd.Timedelta(label_dt))
self.assertEqual(test_e, test_s + pd.Timedelta('20min'))
last_test_e = test_e
tr_slice, val_slice = cv.get_train_and_test()
tr_s, tr_e, val_s, val_e = tr_slice.start, tr_slice.stop, val_slice.start, val_slice.stop
self.assertEqual(last_test_e + pd.Timedelta(label_dt), val_s)
self.assertEqual(tr_e, tr_s + pd.Timedelta(train_size))
self.assertEqual(val_s, tr_e + pd.Timedelta(label_dt))
self.assertEqual(val_e, idx2[-1])
self.assertEqual(val_s, idx2[-1] - pd.Timedelta(validation_size))
if __name__ == '__main__':
main()
|
"""Config file housing the database URI of the database to be used"""
DATABASE_URI = 'postgresql://username:password@port/database'
|
"""YuYuYu Dai Mankai no Shou"""
from typing import Callable, List
import psutil
import vapoursynth as vs
from debandshit import dumb3kdb
from lvsfunc.kernels import Bicubic, Bilinear, Catrom, Mitchell
from vardautomation import FileInfo
from vardautomation.config import PresetEAC3, PresetWEB
from vardefunc.mask import detail_mask, diff_rescale_mask, region_mask
from vardefunc.misc import DebugOutput, merge_chroma
from vardefunc.noise import Graigasm
from vardefunc.scale import nnedi3_upscale
from vardefunc.util import finalise_output, remap_rfs
from vsmask.edge import FDOG
from vsutil import depth, get_y
from vsutil.clips import split
from yuyuyu_common import Denoise, EncodingWeb, graigasm_args, Scale
p_handle = psutil.Process()
p_handle.cpu_affinity(list(range(0, 24, 2)))
core = vs.core
core.num_threads = 12
NUM = __file__[-5:-3]
WEB_AMZ_VBR = FileInfo(
f'eps/Yuuki Yuuna wa Yuusha de Aru - Dai Mankai no Shou - {NUM} (Amazon Prime VBR 1080p).mkv',
(24, -24), preset=[PresetWEB, PresetEAC3]
)
WEB_BGLOBAL = FileInfo(
f'eps/[NC-Raws] 结城友奈是勇者 大满开之章 - {NUM} [B-Global][WEB-DL][1080p][AVC AAC][ENG_TH_SRT][MKV].mkv'
)
OPSTART, OPEND = 32008, 34166
DEBUG = DebugOutput(WEB_AMZ_VBR.clip_cut)
@DEBUG.catch
@finalise_output
def filtering(debug: DebugOutput = DEBUG) -> vs.VideoNode:
edgemask = FDOG().get_mask(get_y(WEB_BGLOBAL.clip_cut), 25, multi=2.25).std.Binarize(35)
edgemask = edgemask.std.Maximum().std.Minimum().std.BoxBlur(0, 2, 2, 2, 2)
src = core.std.MaskedMerge(WEB_BGLOBAL.clip_cut, WEB_AMZ_VBR.clip_cut, edgemask)
_ef = [1, 0, 0]
edgefix = core.edgefixer.ContinuityFixer(src, *[_ef] * 4)
edgefix = remap_rfs(src, edgefix, [(222, 1081), (1888, 1930)])
out = edgefix
out = depth(out, 32)
denoise = Denoise.bm3d(out, [1.25, 2, 2], radius=1)
out = denoise
descalers: List[Callable[[vs.VideoNode, int, int], vs.VideoNode]] = [
Bilinear().descale,
Mitchell().descale,
Bicubic(-.5, .25).scale,
]
luma = get_y(out)
descales = [descaler(luma, 1600, 900) for descaler in descalers]
descale = core.std.Expr(descales, 'x y z min max y z max min z min')
# upscale = Bicubic(-.5, .25).scale(nnedi3_upscale(descale), 1920, 1080)
upscale = Bicubic(-.5, .25).scale(Scale.waifu2x(descale, 1), 1920, 1080)
# upscale_s1 = Bicubic(-.5, .25).scale(nnedi3_upscale(core.descale.Debicubic(luma, 1280, 720, 1/3, 1/3)), 1920, 1080)
upscale_s2 = Bicubic(-.5, .25).scale(nnedi3_upscale(core.descale.Debilinear(luma, 1280, 720)), 1920, 1080)
upscale = remap_rfs(upscale, upscale_s2, [(222, 1081), (1888, 1930)])
out = merge_chroma(upscale, out)
out = depth(out, 16)
credit = out
ref = depth(src, 16)
rescale_mask = diff_rescale_mask(ref.rgvs.RemoveGrain(3), 900, kernel=Catrom(), thr=65 * 256)
rescale_mask_s2 = diff_rescale_mask(ref.rgvs.RemoveGrain(3), 720, kernel=Bilinear(), thr=75 * 256)
rescale_mask_s2 = region_mask(rescale_mask_s2, 30, 30, 30, 30)
# debug <<= dict(rescale_mask=rescale_mask, rescale_mask_s2=rescale_mask_s2)
credit = remap_rfs(credit, ref, [(0, 222)])
credit = remap_rfs(
credit, core.std.MaskedMerge(credit, ref, rescale_mask),
[(1517, 1888), (1952, 2039), (32186, 34166)]
)
credit = remap_rfs(
credit, core.std.MaskedMerge(credit, ref, rescale_mask_s2),
[(261, 322), (507, 1022), (1100, 1194), (1888, 1930)]
)
# 1517
out = credit
deband_mask = core.std.Expr(
split(detail_mask(out, 3200, 5000).resize.Bilinear(format=vs.YUV444P16)),
'x y z max max'
).rgvs.RemoveGrain(3)
# debug <<= deband_mask
deband = dumb3kdb(out, threshold=[33, 49], grain=24)
deband = core.std.MaskedMerge(deband, out, deband_mask)
# debug <<= deband
out = deband
grain = Graigasm(**graigasm_args).graining(out) # type: ignore
out = grain
return out
# print(__name__)
if __name__ in ('__main__', '__vapoursynth__'):
del DEBUG
EncodingWeb(WEB_AMZ_VBR, filtering(), NUM, OPSTART, OPEND).run()
else:
# filtering()
pass
|
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("TACO_ROOT", "~/.taco/mainnet"))).resolve()
|
import logging
from synapse.http.client import SimpleHttpClient
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
)
from synapse.http.site import SynapseRequest
from synapse.rest.client.v2_alpha._base import client_patterns
logger = logging.getLogger(__name__)
class SMSCallback(RestServlet):
# PATTERNS = [re.compile("^/_matrix/client/sms/callback$")]
PATTERNS = client_patterns("/sms/callback$", v1=True)
def __init__(self, hs):
super().__init__()
self.hs = hs
# self.auth = hs.get_auth()
self.http_client = SimpleHttpClient(hs)
async def on_POST(self, request: SynapseRequest):
params = parse_json_object_from_request(request)
logger.info("-----smscallback-------param:%s" % (str(params)))
return 200, {}
def register_servlets(hs, http_server):
SMSCallback(hs).register(http_server)
|
import matplotlib.pyplot as plt
from PIL import Image
plt.rcParams['font.sans-serif']="SimHei"
img=Image.open("lena.tiff")
img_r,img_g,img_b=img.split()
img1=img_r.resize((50,50))
img21=img_g.transpose(Image.FLIP_LEFT_RIGHT)
img2=img21.transpose(Image.ROTATE_270)
img3=img_b.crop((0,0,150,150))
img4=Image.merge("RGB",[img_r,img_g,img_b])
img4.save("test.PNG")
plt.subplot(221)
plt.axis("off")
plt.imshow(img1,cmap="gray")
plt.title("R-缩放",fontsize=14)
plt.subplot(222)
plt.imshow(img2,cmap="gray")
plt.title("G-镜像+旋转",fontsize=14)
plt.subplot(223)
plt.axis("off")
plt.imshow(img3,cmap="gray")
plt.title("B-裁剪",fontsize=14)
plt.subplot(224)
plt.axis("off")
plt.imshow(img4)
plt.title("RGB",fontsize=14)
plt.suptitle("图像基本操作",fontsize=20,color="blue")
plt.tight_layout(rect=[0,0,1,0.9])
plt.show()
|
import requests
from bs4 import BeautifulSoup
import xlsxwriter
# User Input
url = raw_input("Page link:") or ""
totalPage = raw_input("Total Page[1]:") or 1
rowDiff = raw_input("Per Page Data[15]:") or 15
file_name = raw_input("Saved File Name:")
# -----
# Create an new Excel file.
dataBook = xlsxwriter.Workbook('upload/{}.xlsx'.format(file_name))
dataRow = dataBook.add_worksheet()
# Widen the first column to make the text clearer.
dataRow.set_column('A:B', 20)
dataRow.set_column('C:C', 60)
# Add a bold format to use to highlight cells.
bold = dataBook.add_format({'bold': True})
dataRow.write('A1', 'Title', bold)
dataRow.write('B1', 'Phone', bold)
dataRow.write('C1', 'Meta', bold)
rowID = 1
for i in range(int(totalPage)):
req = requests.get("{}/p/{}".format(str(url), (i*rowDiff)))
# req = requests.get("http://www.houzz.com/professionals/interior-designer/c/Washington--DC/p/0")
soup = BeautifulSoup(req.content)
totalRow = soup.find_all("div", { "class" : "whiteCard" })
# print(soup.prettify())
for idx, sr in enumerate(totalRow):
rowID += 1
title = sr.find_all("a", { "class" : 'pro-title' })[0].text
phone = sr.find_all("span", { "class" : 'pro-phone' })[0].text
description = sr.find_all("div", { "class" : 'pro-description' })[0].text
meta = sr.find_all("div", { "class" : 'pro-meta' })[0].text
dataRow.write('A{}'.format(rowID), title)
dataRow.write('B{}'.format(rowID), phone)
dataRow.write('C{}'.format(rowID), meta)
print "Page {} Complete...".format((i+1))
dataBook.close()
|
'''
Written by Thomas Munzer (tmunzer@juniper.net)
Github repository: https://github.com/tmunzer/Mist_library/
'''
import mlib as mist_lib
from mlib import cli
import org_conf_backup
import org_conf_deploy
import org_inventory_backup
import org_inventory_backup
import org_inventory_precheck
import org_inventory_restore
def _backup_org(source_mist_session, source_org_id, source_org_name):
try:
_print_new_step("Backuping SOURCE Org Configuration")
org_conf_backup.start_org_backup(source_mist_session, source_org_id, source_org_name)
except:
exit(255)
def _restore_org(dest_mist_session, dest_org_id, dest_org_name, source_org_name, check_org_name=False, in_backup_folder=False):
_print_new_step("Deploying Configuration to the DESTINATION Org")
org_conf_deploy.start_restore_org(dest_mist_session, dest_org_id, dest_org_name, source_org_name, check_org_name, in_backup_folder)
#######
#######
def _backup_inventory(source_mist_session, source_org_id, source_org_name, in_backup_folder=False):
_print_new_step("Backuping SOURCE Org Inventory")
org_inventory_backup.start_inventory_backup(source_mist_session, source_org_id, source_org_name, in_backup_folder)
def _precheck_inventory(dest_mist_session, dest_org_id, dest_org_name, source_org_name, in_backup_folder=False):
_print_new_step("Pre-check for INVENTORY restoration")
org_inventory_precheck.start_precheck(dest_mist_session, dest_org_id, dest_org_name,source_org_name, None, in_backup_folder)
def _restore_inventory(dest_mist_session, dest_org_id, dest_org_name, source_mist_session, source_org_name, source_org_id, check_org_name=False, in_backup_folder=False):
_print_new_step("Deploying Inventory to the DESTINATION Org")
org_inventory_restore.start_restore_inventory(dest_mist_session, dest_org_id, dest_org_name, source_mist_session, source_org_name, source_org_id, None, check_org_name, in_backup_folder)
#######
#######
def _print_new_step(message):
print()
print("".center(80,"#"))
print("#", "{0} ".format(message).center(76), "#")
print("".center(80,"#"))
print()
#######
#######
def _create_org(mist_session):
while True:
custom_dest_org_name = input("What is the new Organization name? ")
if custom_dest_org_name:
org = {
"name": custom_dest_org_name
}
try:
print()
print("Creating the organisation \"{0}\" in {1} ".format(custom_dest_org_name, mist_session.host).ljust(79, "."), end="", flush=True)
print("\033[92m\u2714\033[0m")
print()
except:
print('\033[31m\u2716\033[0m')
exit(10)
org_id = mist_lib.requests.orgs.orgs.create(mist_session, org)["result"]["id"]
return (mist_session, org_id, custom_dest_org_name)
def select_or_create_org(mist_session=None):
mist_session = mist_lib.Mist_Session()
while True:
res = input("Do you want to create a (n)ew organisation or (r)estore to an existing one? ")
if res.lower()=="r":
org_id = cli.select_org(mist_session)[0]
org_name = mist_lib.requests.orgs.info.get(mist_session, org_id)["result"]["name"]
return (mist_session, org_id, org_name)
elif res.lower()=="n":
return _create_org(mist_session)
#######
#######
def _select_org(mist_session=None, host=None):
mist_session = mist_lib.Mist_Session(host=host)
org_id = cli.select_org(mist_session)[0]
org_name = mist_lib.orgs.info.get(mist_session, org_id)["result"]["name"]
return (mist_session, org_id, org_name)
if __name__ == "__main__":
_print_new_step("Select the SOURCE Org")
source_mist_session, source_org_id, source_org_name = _select_org()
_print_new_step("Select the DESTINATION Org")
dest_mist_session, dest_org_id, dest_org_name = select_or_create_org()
_backup_org(source_mist_session, source_org_id, source_org_name)
_backup_inventory(source_mist_session, source_org_id, source_org_name, in_backup_folder=True)
_restore_org(dest_mist_session, dest_org_id, dest_org_name, source_org_name, in_backup_folder=True)
_precheck_inventory(dest_mist_session, dest_org_id, dest_org_name, source_org_name, in_backup_folder=True)
_restore_inventory(dest_mist_session, dest_org_id, dest_org_name, source_mist_session, source_org_name, source_org_id, in_backup_folder=True)
|
"""Convert Soldiworks BOM to McMaster order"""
import re
import argparse
import pandas as pd
def main(args):
"""Load a Bill of Materials CSV, and print a string which
can be copied into the mcmaster.com order form."""
data = pd.read_csv(args.sldbom)
print('I loaded the following data from the SolidWorks BOM:')
print(data)
print('\n\n')
print('To build a McMaster order for this BOM, copy the following')
print('into "mcmaster.com > Order > Paste products and quantities":\n')
for _, row in data.iterrows():
# Find the McMaster part number in the file name field of the BOM.
regex = re.compile(r'McMaster-([A-Z0-9]+)')
matches = regex.findall(row['SW-File Name(File Name)'])
if len(matches) == 1:
mcmaster_part_number = matches[0]
else:
continue
# print "part number, quantity", which is the format expected by
# the mcmaster.com order form.
print(mcmaster_part_number + ', {:d}'.format(row['QTY.']))
print('\n\n')
print('Note that many McMaster products are sold by the pack, not by "each",')
print('so check the quantities after you build your order. This program')
print('does not know how many items are in a pack.')
if __name__ == '__main__':
#pylint: disable=invalid-name
parser = argparse.ArgumentParser(
description='Create McMaster orders from SolidWorks Bill of Materials.' +
'\nSee https://github.com/mvernacc/solidworks2mcmaster for detailed instructions.')
parser.add_argument("sldbom", help='SolidWorks BOM exported as CSV file.')
arguments = parser.parse_args()
main(arguments)
|
import socket
import click
from aiohttp import web
from aiohttp_micro.core.tools.zipkin import create_tracer
def get_address(default: str = "127.0.0.1") -> str:
try:
ip_address = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 1))
ip_address = s.getsockname()[0]
except socket.gaierror:
ip_address = default
finally:
s.close()
return ip_address
@click.group()
@click.pass_context
def server(ctx):
pass
@server.command()
@click.option("--host", default=None, help="Specify application host")
@click.option("--port", default=5000, help="Specify application port")
@click.option("--tags", "-t", multiple=True, help="Specify tags for Consul Catalog")
@click.pass_context
def run(ctx, host, port, tags):
app = ctx.obj["app"]
try:
port = int(port)
if port < 1024 and port > 65535:
raise RuntimeError("Port should be from 1024 to 65535")
except ValueError:
raise RuntimeError("Port should be numeric")
if not host:
host = "127.0.0.1"
address = "127.0.0.1"
else:
address = get_address()
app.cleanup_ctx.append(create_tracer(host, port))
app["logger"].info(f"Application serving on http://{address}:{port}")
web.run_app(app, host=host, port=port, print=None)
app["logger"].info("Shutdown application")
|
import json
import logging
from collections import OrderedDict
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
import maya.cmds as cmds
from mop.ui.settings import get_settings
from mop.ui.signals import clear_all_signals, publish, subscribe
from mop.vendor.Qt import QtWidgets, QtCore
import mop.dag
from mop.core.rig import Rig
logger = logging.getLogger(__name__)
class mopParentSpaces(MayaQWidgetBaseMixin, QtWidgets.QMainWindow):
ui_name = "mop_parent_spaces"
space_types = OrderedDict(
(("Parent", "parent"), ("Orient", "orient"), ("Point", "point"))
)
def __init__(self, parent=None):
super(mopParentSpaces, self).__init__(parent)
self._nice_names = {}
self._current_driver = None
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("mop - Parent Spaces")
self.content = QtWidgets.QWidget()
self.child_content = QtWidgets.QWidget()
self.child = QtWidgets.QLineEdit()
self.pick_child_button = QtWidgets.QPushButton("Pick Selected")
self.space_type = QtWidgets.QComboBox()
self.parents_content = QtWidgets.QWidget()
self.parents = QtWidgets.QListView()
self.add_parent_button = QtWidgets.QPushButton("Add Selected")
self.remove_parents_button = QtWidgets.QPushButton("Remove")
self.nice_name = QtWidgets.QLineEdit()
self.update_button = QtWidgets.QPushButton("Create")
self.delete_button = QtWidgets.QPushButton("Delete All")
self.setCentralWidget(self.content)
layout = QtWidgets.QVBoxLayout()
self.content.setLayout(layout)
form = QtWidgets.QFormLayout()
layout.addLayout(form)
child_layout = QtWidgets.QHBoxLayout()
self.child_content.setLayout(child_layout)
child_layout.addWidget(self.child)
child_layout.addWidget(self.pick_child_button)
parents_layout = QtWidgets.QVBoxLayout()
self.parents_content.setLayout(parents_layout)
parents_layout.addWidget(self.parents)
parents_actions_layout = QtWidgets.QHBoxLayout()
parents_layout.addLayout(parents_actions_layout)
parents_actions_layout.addWidget(self.add_parent_button)
parents_actions_layout.addWidget(self.remove_parents_button)
form.addRow("Child Control:", self.child_content)
form.addRow("Space Type:", self.space_type)
form.addRow("Parent Transforms:", self.parents_content)
form.addRow("Nice Name:", self.nice_name)
actions_layout = QtWidgets.QHBoxLayout()
layout.addLayout(actions_layout)
actions_layout.addWidget(self.update_button)
actions_layout.addWidget(self.delete_button)
self.child.setEnabled(False)
self.space_type.addItems(self.space_types.keys())
self.space_type.setEnabled(False)
self.parents.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.parents.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.nice_name.setEnabled(False)
self.add_parent_button.setEnabled(False)
self.remove_parents_button.setEnabled(False)
self.update_button.setEnabled(False)
self.delete_button.setEnabled(False)
child_layout.setContentsMargins(0, 0, 0, 0)
parents_layout.setContentsMargins(0, 0, 0, 0)
self.model = QtCore.QStringListModel()
self.parents.setModel(self.model)
self.pick_child_button.released.connect(self.pick_child)
self.parents.selectionModel().currentChanged.connect(
self._on_current_parent_changed
)
self.nice_name.textChanged.connect(self._on_nice_name_changed)
self.add_parent_button.released.connect(self.add_parent)
self.remove_parents_button.released.connect(self.remove_parents)
self.update_button.released.connect(self.update)
self.delete_button.released.connect(self.delete_all)
def pick_child(self):
"""Pick the child from Maya's selection."""
selection = cmds.ls(selection=True)
if not selection:
logger.warning("Please select an mop control to start.")
return
control = selection[-1]
self.set_child(control)
self.space_type.setEnabled(True)
self.nice_name.setEnabled(True)
self.add_parent_button.setEnabled(True)
self.remove_parents_button.setEnabled(True)
self.update_button.setEnabled(True)
self.delete_button.setEnabled(True)
def set_child(self, control):
"""Set ``control`` as the child for the parent space operation.
This method will update the ``Child Control`` field if the GUI,
and fill the parents list if the control already have parents set.
:param control: Name of the control node to select.
:type control: str
"""
self.child.setText(control)
self._current_driver = None
self._nice_names = {}
# If the control has a parent, orient or point space,
# Then set the Space Type field and lock it.
# Also load the current space drivers.
space_type, drivers = self._control_configuration()
if space_type and drivers:
# Get the nice name of this space
name = {v: k for k, v in self.space_types.iteritems()}[space_type]
self.model.setStringList(drivers.values())
self.space_type.setCurrentText(name)
else:
self.model.setStringList([])
self.space_type.setCurrentText(self.space_types.keys()[0])
self._update_ui_state()
def add_parent(self):
"""Add a parent from Maya's selection."""
if not self.child.text():
logger.warning("Please pick a child control first.")
return
selection = cmds.ls(selection=True)
if not selection:
logger.warning("Please select parent transforms to start.")
return
parents = self.model.stringList()
selection = [p for p in selection if p not in parents]
self.model.setStringList(parents + selection)
def remove_parents(self):
"""Remove parents selected in the GUI."""
selection = self.parents.selectionModel().selectedRows()
parents = self.model.stringList()
remove = [self.model.data(index, QtCore.Qt.DisplayRole) for index in selection]
self.model.setStringList([p for p in parents if p not in remove])
def update(self):
"""Update the parent space data of the selected control.
This method will only update the parent space data contained in
the control, users will have to manually unbuild and rebuild the
rig in order for the parent spaces to be created.
"""
ctl = self.child.text()
if not ctl:
logger.warning("Please pick a child control first.")
return
prev_space_type, prev_drivers = self._control_configuration()
drivers = self.model.stringList()
name = self.space_type.currentText()
space_type = self.space_types[name]
# Transform the list of drivers to a mapping of
# nice name: transform name.
_drivers = OrderedDict()
for driver in drivers:
_drivers[self._nice_names.get(driver, driver)] = driver
drivers = _drivers
space_changed = space_type != prev_space_type
drivers_changed = drivers != prev_drivers.values()
if prev_drivers and (space_changed or drivers_changed):
mop.dag.remove_parent_spaces(ctl)
if drivers:
Rig.reset_pose()
mop.dag.create_space_switching(ctl, drivers, space_type)
data = json.dumps({space_type: drivers})
cmds.setAttr(ctl + ".parent_space_data", data, type="string")
self._update_ui_state()
def delete_all(self):
"""Deletes all parent spaces set on the selected control."""
ctl = self.child.text()
if not ctl:
logger.warning("Please pick a child control first.")
return
button = QtWidgets.QMessageBox.warning(
self,
"mop - Delete Parent Spaces",
"You are about to delete all parent spaces " "set on %s, continue ?" % ctl,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes,
)
if button != QtWidgets.QMessageBox.Yes:
return
mop.dag.remove_parent_spaces(ctl)
self.model.setStringList([])
cmds.setAttr(ctl + ".parent_space_data", "{}", type="string")
self._update_ui_state()
def _update_ui_state(self):
"""Update some ui elements depending on the control and parents."""
if self.model.stringList():
self.update_button.setText("Update")
else:
self.update_button.setText("Create")
def _control_configuration(self):
"""Return selected control current spaces data."""
ctl = self.child.text()
if not ctl:
return None, {}
data = cmds.getAttr(ctl + ".parent_space_data")
spaces = json.loads(data, object_pairs_hook=OrderedDict)
if not hasattr(spaces, "get"):
# Data is either corrupt or serialization method has changed.
return
for space_type in self.space_types.values():
drivers = spaces.get(space_type, {})
if drivers:
return space_type, drivers
return None, {}
def _on_current_parent_changed(self, index):
"""Update the nice name field.
:param index: New current index.
:type index: PySide2.QtCore.QModelIndex
"""
driver = self.model.data(index, QtCore.Qt.DisplayRole)
self._current_driver = driver
name = self._nice_names.get(driver, driver)
self.nice_name.setText(name)
def _on_nice_name_changed(self, name):
"""Update the internal nice name data.
:param name: New name to set on the current parent.
:type name: str
"""
self._nice_names[self._current_driver] = name
|
"""
This script creates a test that fails when garage.tf.algos.RL2TRPO
performance is too low.
"""
# yapf: disable
import pytest
from garage.envs import GymEnv, normalize
from garage.experiment import task_sampler
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2TRPO
from garage.tf.algos.rl2 import RL2Env, RL2Worker
from garage.tf.optimizers import (ConjugateGradientOptimizer,
FiniteDifferenceHVP, PenaltyLBFGSOptimizer)
from garage.tf.policies import GaussianGRUPolicy
from garage.trainer import TFTrainer
from tests.fixtures import snapshot_config, TfGraphTestCase
# yapf: enable
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
from garage.envs.mujoco import HalfCheetahDirEnv # isort:skip
@pytest.mark.mujoco
class TestRL2TRPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.meta_batch_size = 10
self.episode_per_task = 4
self.max_episode_length = 100
# Avoid pickling self
max_episode_length = 100
self.tasks = task_sampler.SetTaskSampler(
HalfCheetahDirEnv,
wrapper=lambda env, _: RL2Env(
normalize(GymEnv(env, max_episode_length=max_episode_length))))
self.env_spec = RL2Env(
normalize(
GymEnv(HalfCheetahDirEnv(),
max_episode_length=max_episode_length))).spec
self.policy = GaussianGRUPolicy(env_spec=self.env_spec,
hidden_dim=64,
state_include_action=False)
self.baseline = LinearFeatureBaseline(env_spec=self.env_spec)
self.sampler = LocalSampler(
agents=self.policy,
envs=self.tasks.sample(self.meta_batch_size),
max_episode_length=self.env_spec.max_episode_length,
is_tf_worker=True,
n_workers=self.meta_batch_size,
worker_class=RL2Worker)
def test_rl2_trpo_pendulum(self):
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
algo = RL2TRPO(
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
episodes_per_trial=self.episode_per_task,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHVP(
base_eps=1e-5)))
trainer.setup(algo, self.tasks.sample(self.meta_batch_size))
last_avg_ret = trainer.train(n_epochs=1,
batch_size=self.episode_per_task *
self.max_episode_length *
self.meta_batch_size)
assert last_avg_ret > -40
def test_rl2_trpo_pendulum_default_optimizer(self):
with TFTrainer(snapshot_config, sess=self.sess):
algo = RL2TRPO(meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
kl_constraint='hard',
episodes_per_trial=self.episode_per_task,
discount=0.99,
max_kl_step=0.01)
assert isinstance(algo._inner_algo._optimizer,
ConjugateGradientOptimizer)
def test_ppo_pendulum_default_optimizer2(self):
with TFTrainer(snapshot_config, sess=self.sess):
algo = RL2TRPO(meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
kl_constraint='soft',
episodes_per_trial=self.episode_per_task,
discount=0.99,
max_kl_step=0.01)
assert isinstance(algo._inner_algo._optimizer,
PenaltyLBFGSOptimizer)
def test_rl2_trpo_pendulum_invalid_kl_constraint(self):
with TFTrainer(snapshot_config, sess=self.sess):
with pytest.raises(ValueError):
RL2TRPO(meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
kl_constraint='xyz',
episodes_per_trial=self.episode_per_task,
discount=0.99,
max_kl_step=0.01)
|
import logging
import os
from errno import ENOTDIR
import shutil
logger = logging.getLogger(__name__)
# Create the directory @param(path) and return the path after creation [Error safe]
def make_dir(path):
# Avoid the raise of IOError exception by checking if the directory exists first
try:
os.mkdir(path)
except OSError as e:
if e.errno != 17:
logger.warning(u'Exception in make_dir(%s): %s' % (e.filename, repr(e)))
return path
# Create the directorise @param(path) and return the directory_path after creation [Error safe]
def make_dirs(path):
# Avoid the raise of IOError exception by checking if the directory exists first
path += os.sep
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17:
logger.warning(u'Exception in make_dir(%s): %s' % (e.filename, repr(e)))
return path
def delete_dir(path, include_root=True):
"""deletes the path entirely"""
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if include_root:
os.rmdir(path)
def listdir_abs(parent):
return [os.path.join(parent, child) for child in os.listdir(parent)]
def get_size(file_name):
return os.path.getsize(os.path.abspath(file_name))
def get_dir_size(dir_name):
# TODO : Write unite test for that method
return sum([get_size(os.path.join(dir_name, x)) for x in os.listdir(dir_name)]) if os.path.exists(dir_name) else 0
def safe_delete(path):
if os.path.exists(path):
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def dict_apply(path, dictionnary, symlink_method=None):
'''
This method expect a dict with any depth where leaf are a list of tuple (name, path) where a symlink is going to be created
following the path in the tree to match the patch in the file system.
{'a': {'b': {'c': [('a', '/path_to/a')]}}} is going to create path/a/b/c/a (where a is a symlink to /path_to/a)
:param dictionnary:
:return:
'''
if not dictionnary:
return
path_content = set(os.listdir(path))
dictionarry_keys = set(dictionnary.keys())
to_remove = path_content - dictionarry_keys
for remove in to_remove:
full_remove = os.path.join(path, remove)
safe_delete(full_remove)
for root, leaf in dictionnary.items():
full_leaf = os.path.join(path, root)
if not leaf:
safe_delete(full_leaf)
continue
current_path = make_dir(os.path.join(path, root))
current_path_content = set(os.listdir(current_path))
if isinstance(leaf, list):
for name, abs_path_to_name in leaf:
new_one = os.path.join(current_path, name)
if name not in current_path_content:
try:
if not symlink_method:
os.symlink(abs_path_to_name, new_one)
else:
symlink_method(abs_path_to_name, new_one)
except OSError as e:
logger.error(u'Tried to symlink: "%s" to "%s/%s"' % (abs_path_to_name,
current_path,
name))
logger.error(u'Error: %s' % e)
else:
current_path_content.remove(name)
if get_dir_size(abs_path_to_name) != get_dir_size(new_one):
safe_delete(new_one)
try:
if not symlink_method:
os.symlink(abs_path_to_name, new_one)
else:
symlink_method(abs_path_to_name, new_one)
except OSError as e:
logger.error(u'Tried to symlink: "%s" to "%s/%s"' % (abs_path_to_name,
current_path,
name))
logger.error(u'Error: %s' % e)
if current_path_content:
for content in current_path_content:
full_content = os.path.join(current_path, content)
safe_delete(full_content)
else:
dict_apply(current_path, leaf, symlink_method=symlink_method)
|
"""
The Python compiler only supports {:extern} code on a module level, so the
entire module must be supplied.
"""
import sys, _dafny
assert "Library" == __name__
Library = sys.modules[__name__]
class LibClass:
@staticmethod
def CallMeInt(x):
y = x + 1
z = y + y
return (y, z)
@staticmethod
def CallMeNative(x, b):
if b:
y = x + 1
else:
y = x - 1
return y
class OtherClass:
@staticmethod
def CallMe():
return "OtherClass.CallMe"
class AllDafny:
@staticmethod
def M():
_dafny.print(_dafny.Seq("AllDafny.M\n"))
class Mixed:
def ctor__(self):
pass
@staticmethod
def M():
_dafny.print(_dafny.Seq("Extern static method says: "))
Library.Mixed.P()
@staticmethod
def P():
_dafny.print(_dafny.Seq("Mixed.P\n"))
def IM(self):
_dafny.print(_dafny.Seq("Extern instance method says: "))
(self).IP()
def IP(self):
_dafny.print(_dafny.Seq("Mixed.IP\n"))
@staticmethod
def F():
return (1000) + (Library.Mixed.G())
@staticmethod
def G():
return 1
def IF(self):
return (2000) + ((self).IG())
def IG(self):
return 2
class AllExtern:
@staticmethod
def P():
_dafny.print(_dafny.Seq("AllExtern.P\n"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.