hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfbc87c00bf2a37ff48fcd479715aa6d88ac62f | 1,827 | py | Python | doc/source/conf.py | franckalbinet/spectrai | 3458bc64672077ebeee98fa53c6716a23231ba7e | [
"BSD-3-Clause"
] | 1 | 2020-09-13T10:05:41.000Z | 2020-09-13T10:05:41.000Z | doc/source/conf.py | franckalbinet/spectrai | 3458bc64672077ebeee98fa53c6716a23231ba7e | [
"BSD-3-Clause"
] | 4 | 2020-11-13T18:56:11.000Z | 2022-02-10T01:52:45.000Z | doc/source/conf.py | franckalbinet/spectrai | 3458bc64672077ebeee98fa53c6716a23231ba7e | [
"BSD-3-Clause"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'spectrai'
copyright = '2020, Franck Albinet'
author = 'Franck Albinet'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 35.134615 | 79 | 0.662288 |
acfbc93e4951a56e6705f442bf8ca9d6e9c8ac06 | 5,694 | py | Python | test/unit/infra/media/test_downloader.py | Tastyep/Pi-OpenCast | 1ed07130c26e6dd70184446af3c9143094d0c9a0 | [
"MIT"
] | 31 | 2019-10-05T14:23:10.000Z | 2022-02-27T19:38:55.000Z | test/unit/infra/media/test_downloader.py | Tastyep/Pi-OpenCast | 1ed07130c26e6dd70184446af3c9143094d0c9a0 | [
"MIT"
] | 199 | 2020-08-30T16:33:03.000Z | 2022-03-28T04:10:06.000Z | test/unit/infra/media/test_downloader.py | Tastyep/Pi-OpenCast | 1ed07130c26e6dd70184446af3c9143094d0c9a0 | [
"MIT"
] | 1 | 2019-10-05T16:00:49.000Z | 2019-10-05T16:00:49.000Z | from test.util import TestCase
from unittest.mock import Mock, patch
from OpenCast.domain.service.identity import IdentityService
from OpenCast.infra.media.downloader import (
Downloader,
DownloadError,
DownloadSuccess,
Logger,
)
class LoggerTest(TestCase):
def setUp(self):
self.impl = Mock()
self.logger = Logger(self.impl)
def test_downloading_log_missing_status(self):
data = {}
self.logger.log_download_progress(data)
self.impl.debug.assert_not_called()
self.impl.error.assert_not_called()
def test_downloading_log(self):
data = {
"status": "downloading",
"filename": "/tmp/media.mp4",
"downloaded_bytes": 50,
"total_bytes": 100,
"speed": 10,
}
self.logger.log_download_progress(data)
self.impl.debug.assert_called_with(
"Downloading", filename="/tmp/media.mp4", ratio="50.00%", speed="10 bytes/s"
)
def test_downloading_log_missing_data(self):
data = {
"status": "downloading",
}
self.logger.log_download_progress(data)
self.impl.debug.assert_called_with(
"Downloading", filename="unknown", ratio="N/A", speed="0 bytes/s"
)
def test_error_log(self):
data = {
"status": "error",
"filename": "/tmp/media.mp4",
}
self.logger.log_download_progress(data)
self.impl.error.assert_called_with(
"Downloading error", filename="/tmp/media.mp4", error=data
)
def test_error_log_missing_data(self):
data = {
"status": "error",
}
self.logger.log_download_progress(data)
self.impl.error.assert_called_with(
"Downloading error", filename="unknown", error=data
)
def test_finished_log(self):
data = {
"status": "finished",
"filename": "/tmp/media.mp4",
"total_bytes": 100,
}
self.logger.log_download_progress(data)
self.impl.debug.assert_called_with(
"Downloading success", filename="/tmp/media.mp4", size="100 bytes"
)
def test_finished_log_missing_data(self):
data = {
"status": "finished",
}
self.logger.log_download_progress(data)
self.impl.debug.assert_called_with(
"Downloading success", filename="unknown", size="0 bytes"
)
class DownloaderTest(TestCase):
def setUp(self):
patcher = patch("OpenCast.infra.media.downloader.YoutubeDL")
self.addCleanup(patcher.stop)
ydl_cls_mock = patcher.start()
self.ydl = ydl_cls_mock.return_value
def execute_handler(handler, *args):
handler(*args)
self.executor = Mock()
self.executor.submit = Mock(side_effect=execute_handler)
self.dispatcher = Mock()
self.downloader = Downloader(self.executor, self.dispatcher)
@patch("OpenCast.infra.media.downloader.Path")
def test_download_video(self, path_cls):
path_mock = path_cls.return_value
path_mock.exists.return_value = True
op_id = IdentityService.random()
self.downloader.download_video(op_id, "url", "/tmp/media.mp4")
self.dispatcher.dispatch.assert_called_with(DownloadSuccess(op_id))
def test_download_video_error(self):
self.ydl.download.side_effect = RuntimeError("error")
op_id = IdentityService.random()
self.downloader.download_video(op_id, "url", "/tmp/media.mp4")
self.dispatcher.dispatch.assert_called_with(DownloadError(op_id, "error"))
@patch("OpenCast.infra.media.downloader.Path")
def test_download_video_missing(self, path_cls):
path_mock = path_cls.return_value
path_mock.exists.return_value = False
op_id = IdentityService.random()
self.downloader.download_video(op_id, "url", "/tmp/media.mp4")
self.dispatcher.dispatch.assert_called_with(
DownloadError(op_id, "video path points to non existent file")
)
def test_download_subtitle(self):
subtitle = "/tmp/media.en.vtt"
self.assertEqual(
subtitle,
self.downloader.download_subtitle(
"url", dest="/tmp/media", lang="eng", exts=["vtt"]
),
)
def test_download_subtitle_not_found(self):
self.ydl.download.side_effect = RuntimeError()
self.assertEqual(
None,
self.downloader.download_subtitle(
"url", dest="/tmp/media", lang="eng", exts=["vtt", "srt"]
),
)
def test_download_subtitle_second_choice(self):
step = 0
def raise_once(*args, **kwargs):
nonlocal step
step += 1
if step == 1:
raise RuntimeError()
subtitle = "/tmp/media.en.srt"
self.ydl.download.side_effect = raise_once
self.assertEqual(
subtitle,
self.downloader.download_subtitle(
"url", dest="/tmp/media", lang="eng", exts=["vtt", "srt"]
),
)
def test_download_metadata(self):
metadata = {"url": "url", "title": "title"}
self.ydl.extract_info.return_value = metadata
self.assertEqual(
metadata, self.downloader.download_metadata("url", process_ie_data=True)
)
def test_download_metadata_error(self):
self.ydl.extract_info.side_effect = RuntimeError()
self.assertEqual(
None, self.downloader.download_metadata("url", process_ie_data=True)
)
| 30.449198 | 88 | 0.60713 |
acfbc9e4b933275d55bc6f64d77133e1aaebf622 | 569 | py | Python | build/lib/boids/code/command_line.py | jscott6/boids | 0d02ba6ecfbc10ce56811c5fe45a55114faf5a33 | [
"MIT"
] | null | null | null | build/lib/boids/code/command_line.py | jscott6/boids | 0d02ba6ecfbc10ce56811c5fe45a55114faf5a33 | [
"MIT"
] | null | null | null | build/lib/boids/code/command_line.py | jscott6/boids | 0d02ba6ecfbc10ce56811c5fe45a55114faf5a33 | [
"MIT"
] | null | null | null |
from .controller import Controller
from argparse import ArgumentParser
from matplotlib import pyplot as plt
import yaml
from os.path import dirname, join
def main():
config = yaml.load(open(dirname(__file__) + '/config.yaml'))
parser = ArgumentParser()
parser.add_argument('--size', '-s', type = int, default = config['size'])
arguments = parser.parse_args()
contl = Controller(size = arguments.size,
init_data = config['init_data'],
params = config['params'])
anim = contl.go()
plt.show()
| 27.095238 | 77 | 0.639719 |
acfbca86a6e8e4c225ed115ee01bafef8fffe7b5 | 3,541 | py | Python | app/api/api_v1/endpoints/auth.py | oscarine/oscarine-api | ed4760724e42ac13aeaa3a566d19bf31113c9b8f | [
"MIT"
] | 7 | 2019-09-18T19:45:46.000Z | 2020-05-18T20:07:07.000Z | app/api/api_v1/endpoints/auth.py | oscarine/oscarine-api | ed4760724e42ac13aeaa3a566d19bf31113c9b8f | [
"MIT"
] | 252 | 2019-09-18T20:25:03.000Z | 2022-03-25T11:23:50.000Z | app/api/api_v1/endpoints/auth.py | oscarine/oscarine-api | ed4760724e42ac13aeaa3a566d19bf31113c9b8f | [
"MIT"
] | 8 | 2019-09-18T11:02:45.000Z | 2021-05-18T17:08:51.000Z | from datetime import datetime, timedelta
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from app.api.utils.db import get_db
from app.core import config
from app.core.jwt import create_access_token
from app.crud.auth import (
get_owner_by_email,
get_user_by_email,
owner_authenticate,
owner_email_verified,
user_authenticate,
user_email_verified,
)
from app.models.auth import EmailVerifyResponse, Login, VerifyEmail
from app.models.token import Token
router = APIRouter()
@router.post("/auth/owner/login", response_model=Token)
def owner_login_access_token(data: Login, db: Session = Depends(get_db)):
"""
OAuth2 compatible token login for owners, get an access token for future requests
"""
owner = owner_authenticate(db, email=data.email, password=data.password)
if not owner:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": create_access_token(
data={"owner_id": owner.id}, expires_delta=access_token_expires
)
}
@router.post("/auth/user/login", response_model=Token)
def user_login_access_token(data: Login, db: Session = Depends(get_db)):
"""
OAuth2 compatible token login, get an access token for future requests
"""
user = user_authenticate(db, email=data.email, password=data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": create_access_token(
data={"user_id": user.id}, expires_delta=access_token_expires
)
}
@router.post("/auth/owner/verify-email", response_model=EmailVerifyResponse)
async def verify_owner_email_otp(*, data: VerifyEmail, db: Session = Depends(get_db)):
owner = get_owner_by_email(db, email=data.email)
if owner and (owner.otp == data.otp):
expiry_time = owner.otp_created_at + timedelta(
minutes=config.OTP_EXPIRY_MINUTES
)
if expiry_time >= datetime.utcnow():
owner = owner_email_verified(db, owner=owner)
if owner.email_verified:
return EmailVerifyResponse(
verified=True, message="Your email has been verified."
)
raise HTTPException(
status_code=401, detail="Cannot verify your otp or it may have been expired."
)
@router.post("/auth/user/verify-email", response_model=EmailVerifyResponse)
async def verify_user_email_otp(*, data: VerifyEmail, db: Session = Depends(get_db)):
user = get_user_by_email(db, email=data.email)
if user and (user.otp == data.otp):
expiry_time = user.otp_created_at + timedelta(minutes=config.OTP_EXPIRY_MINUTES)
if expiry_time >= datetime.utcnow():
user = user_email_verified(db, user=user)
if user.email_verified:
return EmailVerifyResponse(
verified=True, message="Your email has been verified."
)
raise HTTPException(
status_code=401, detail="Cannot verify your otp or it may have been expired."
)
| 37.273684 | 88 | 0.684552 |
acfbcb0dfd6f44ad05c4ae2e784336d87d4dd814 | 5,268 | py | Python | NLU/RPI/RPI3/TF2/Classes/Engine.py | AIIAL/HIAS-GeniSysAI-NLU-Agent | 07379e00707ecc77da69e74fc481d7088216fdfd | [
"MIT"
] | 2 | 2020-08-25T01:20:21.000Z | 2020-09-04T01:36:25.000Z | NLU/RPI/RPI3/TF2/Classes/Engine.py | AdamMiltonBarker/GeniSysAI-1 | 5771c88522b430dad2d946245cfe1ce1fcdf087d | [
"MIT"
] | 3 | 2021-08-05T12:59:46.000Z | 2021-08-05T13:33:23.000Z | NLU/RPI/RPI3/TF2/Classes/Engine.py | AdamMiltonBarker/GeniSysAI-1 | 5771c88522b430dad2d946245cfe1ce1fcdf087d | [
"MIT"
] | 2 | 2020-08-24T00:32:26.000Z | 2020-09-02T02:57:29.000Z | ######################################################################################################
#
# Organization: Asociacion De Investigacion En Inteligencia Artificial Para La Leucemia Peter Moss
# Repository: GeniSysAI
# Project: Natural Language Understanding Engine
#
# Author: Adam Milton-Barker (AdamMiltonBarker.com)
#
# Title: Engine Class
# Description: Core functions for the NLU Engine.
# License: MIT License
# Last Modified: 2020-10-01
#
######################################################################################################
import sys
import os
import random
import json
from Classes.Helpers import Helpers
from Classes.Context import Context
from Classes.Data import Data
from Classes.Mitie import Entities
from Classes.Extensions import Extensions
from Classes.Mitie import Entities
from Classes.Model import Model
from Classes.TTS import TTS
class Engine():
""" Engine Class
Core functions for the NLU Engine.
"""
def __init__(self, isAudio):
""" Initializes the class. """
self.Helpers = Helpers("Engine")
self.ner = None
self.user = {}
self.data()
self.entities()
self.model()
self.session()
self.thresholds()
if isAudio:
self.speech()
self.Helpers.logger.info("Engine class initialized.")
def data(self):
""" Initializes the data. """
self.Data = Data()
self.trainingData = self.Data.loadTrainingData()
self.trainedData = self.Data.loadTrainedData()
self.trainedWords = self.trainedData["words"]
self.trainedClasses = self.trainedData["classes"]
self.x = self.trainedData["x"]
self.y = self.trainedData["y"]
self.intentMap = self.trainedData["intentMap"][0]
def doExtension(self, extension, entities, exEntities, extensionResponses):
""" Executes an extension. """
classParts = extension.split(".")
classFolder = classParts[0]
className = classParts[1]
theEntities = None
if exEntities != False:
theEntities = entities
module = __import__(
classParts[0]+"."+classParts[1], globals(), locals(), [className])
extensionClass = getattr(module, className)()
response = getattr(extensionClass, classParts[2])(
extensionResponses, theEntities)
return response
def entities(self):
""" Initializes the entities. """
self.entityController = Entities()
self.ner = self.entityController.restoreNER()
def entitiesCheck(self, entityHolder, theIntent, clearEntities):
""" Checks entities. """
if not len(entityHolder) and len(theIntent["entities"]):
response, entities = self.entityController.replaceResponseEntities(
random.choice(theIntent["fallbacks"]), entityHolder)
extension, extensionResponses, exEntities = self.Extensions.setExtension(
theIntent)
elif clearEntities:
entities = []
response = random.choice(theIntent["responses"])
extension, extensionResponses, exEntities = self.Extensions.setExtension(
theIntent)
else:
response, entities = self.entityController.replaceResponseEntities(
random.choice(theIntent["responses"]), entityHolder)
extension, extensionResponses, exEntities = self.Extensions.setExtension(
theIntent)
return response, entities, extension, extensionResponses, exEntities
def fallbackCheck(self, fallback, theIntent, entityHolder):
""" Checks if fallback. """
if fallback and fallback in theIntent and len(theIntent["fallbacks"]):
response, entities = self.entityController.replaceResponseEntities(
random.choice(theIntent["fallbacks"]), entityHolder)
extension, extensionResponses, exEntities = None, [], None
else:
response, entities = self.entityController.replaceResponseEntities(
random.choice(theIntent["responses"]), entityHolder)
extension, extensionResponses, exEntities = self.Extensions.setExtension(
theIntent)
return response, entities, extension, extensionResponses, exEntities
def model(self):
""" Initializes the model. """
self.Model = Model()
self.Context = Context()
self.Extensions = Extensions()
self.tmodel = self.Model.buildDNN(self.x, self.y)
def session(self):
""" Initializes a NLU sesiion.
Initiates empty guest user session, GeniSys will ask the user
verify their GeniSys user by speaking or typing if it does
not know who it is speaking to.
"""
self.userID = 0
if not self.userID in self.user:
self.user[self.userID] = {}
self.user[self.userID]["history"] = {}
def respond(self, status, sentence, intent, confidence,
response, cIn, cOut, cCurrent, extension, entities):
""" Forms the response. """
return {
"Response": status,
"ResponseData": [{
"Received": sentence,
"Intent": intent,
"Confidence": confidence,
"Response": response,
"Context": [{
"In": cIn,
"Out": cOut,
"Current": cCurrent
}],
"Extension": extension,
"Entities": entities
}]
}
def speech(self):
""" Initializes the TTS feature. """
self.TTS = TTS()
def thresholds(self):
""" Sets thresholds
Sets the threshold for the NLU engine, this can be changed
using arguments to commandline programs or paramters for
API calls.
"""
self.threshold = self.Helpers.confs["NLU"]["Threshold"]
self.entityThrshld = self.Helpers.confs["NLU"]["Mitie"]["Threshold"]
| 28.171123 | 102 | 0.685649 |
acfbcb2adb8836d7bf5ad1a020c9cd67d75c60ce | 23,827 | py | Python | preprocess_for_lambdamart_no_flags.py | loganlebanoff/correct_summarization | cec0d5401ddb5f7c33aca14f31da68b2f8092c53 | [
"BSD-3-Clause"
] | 2 | 2019-07-20T14:57:39.000Z | 2020-06-01T11:14:40.000Z | preprocess_for_lambdamart_no_flags.py | loganlebanoff/correct_summarization | cec0d5401ddb5f7c33aca14f31da68b2f8092c53 | [
"BSD-3-Clause"
] | null | null | null | preprocess_for_lambdamart_no_flags.py | loganlebanoff/correct_summarization | cec0d5401ddb5f7c33aca14f31da68b2f8092c53 | [
"BSD-3-Clause"
] | null | null | null | import scipy
import time
import itertools
import convert_data
import numpy as np
import data
from tqdm import tqdm
import util
from absl import flags
from absl import app
import sys
import os
import hashlib
import struct
import subprocess
import collections
import glob
from tensorflow.core.example import example_pb2
from scipy import sparse
from scoop import futures
from collections import defaultdict
import pickle
# from multiprocessing.dummy import Pool as ThreadPool
# pool = ThreadPool(12)
if 'singles_and_pairs' in flags.FLAGS:
flags_already_done = True
else:
flags_already_done = False
FLAGS = flags.FLAGS
if 'singles_and_pairs' not in flags.FLAGS:
flags.DEFINE_string('singles_and_pairs', 'singles', 'Whether to run with only single sentences or with both singles and pairs. Must be in {singles, both}.')
if 'dataset_name' not in flags.FLAGS:
flags.DEFINE_string('dataset_name', 'cnn_dm', 'Whether to run with only single sentences or with both singles and pairs. Must be in {singles, both}.')
if 'dataset_split' not in flags.FLAGS:
flags.DEFINE_string('dataset_split', 'train_val', 'Which dataset split to use. Must be one of {train, val, test}')
if 'use_pair_criteria' not in flags.FLAGS:
flags.DEFINE_boolean('use_pair_criteria', False, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if 'pca' not in flags.FLAGS:
flags.DEFINE_boolean('pca', False, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if 'tfidf_limit' not in flags.FLAGS:
flags.DEFINE_integer('tfidf_limit', -1, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if 'num_instances' not in flags.FLAGS:
flags.DEFINE_integer('num_instances', -1, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if 'sent_position_criteria' not in flags.FLAGS:
flags.DEFINE_boolean('sent_position_criteria', True, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if 'special_xsum_balance' not in flags.FLAGS:
flags.DEFINE_boolean('special_xsum_balance', True, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if 'lr' not in flags.FLAGS:
flags.DEFINE_boolean('lr', False, 'Which mode to run in. Must be in {write_to_file, generate_summaries}.')
if not flags_already_done:
FLAGS(sys.argv)
exp_name = 'reference'
num_instances = -1,
random_seed = 123
max_sent_len_feat = 20
balance = True
importance = True
real_values = True
# singles_and_pairs = 'singles'
include_sents_dist = True
include_tfidf_vec = True
min_matched_tokens = 1
data_dir = os.path.expanduser('~') + '/data/tf_data/with_coref_and_ssi'
log_dir = 'logs/'
out_dir = 'data/to_lambdamart'
tfidf_vec_path = 'data/tfidf/' + 'all' + '_tfidf_vec_5.pkl'
pca_vec_path = 'data/tfidf/' + 'all' + '_pca.pkl'
temp_dir = 'data/temp'
max_enc_steps = 100000
min_dec_steps = 100
max_dec_steps = 120
dm_single_close_quote = '\u2019' # unicode
dm_double_close_quote = '\u201d'
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', dm_single_close_quote, dm_double_close_quote, ")"] # acceptable ways to end a sentence
names_to_types = [('raw_article_sents', 'string_list'), ('similar_source_indices', 'delimited_list_of_lists'), ('summary_text', 'string'), ('corefs', 'json'), ('doc_indices', 'delimited_list')]
print('Loading TFIDF vectorizer')
with open(tfidf_vec_path, 'rb') as f:
tfidf_vectorizer = pickle.load(f)
if FLAGS.pca:
print('Loading LSA model')
with open(pca_vec_path, 'rb') as f:
pca = pickle.load(f)
else:
pca = None
def convert_to_one_hot(value, bins, range):
hist, _ = np.histogram(value, bins=bins, range=range)
return hist.tolist()
def does_start_with_quotation_mark(sent_tokens):
if len(sent_tokens) == 0:
return False
return sent_tokens[0] == "`" or sent_tokens[0] == "``"
max_num_sents = 30
def get_single_sent_features(sent_idx, sent_term_matrix, article_sent_tokens, mmr, rel_sent_idx):
abs_sent_idx = rel_sent_idx + 1.0
norm_sent_idx = (rel_sent_idx + 1.0) / max_num_sents # POSSIBLY A BUG, NEED TO DO MIN(REL_SENT_IDX, MAX_NUM_SENTS)
# doc_similarity = util.cosine_similarity(sent_term_matrix[sent_idx], doc_vector)[0][0]
sent_len = len(article_sent_tokens[sent_idx])
sent_len = min(max_sent_len_feat, sent_len)
starts_with_quote = int(does_start_with_quotation_mark(article_sent_tokens[sent_idx])) + 1
my_mmr = mmr[sent_idx]
if scipy.sparse.issparse(sent_term_matrix):
tfidf_vec = sent_term_matrix[sent_idx].toarray()[0].tolist()
else:
tfidf_vec = sent_term_matrix[sent_idx].tolist()
if real_values:
features = [abs_sent_idx, norm_sent_idx, sent_len, starts_with_quote, my_mmr]
if include_tfidf_vec:
features.extend(tfidf_vec)
return features
else:
sent_idx, _ = np.histogram(min(sent_idx, max_num_sents), bins=10, range=(0,max_num_sents))
# doc_similarity, _ = np.histogram(doc_similarity, bins=5, range=(0,1))
sent_len, _ = np.histogram(sent_len, bins=10, range=(1,max_sent_len_feat))
my_mmr = convert_to_one_hot(my_mmr, 5, (0,1))
return sent_idx.tolist() + sent_len.tolist() + [starts_with_quote] + my_mmr
def get_pair_sent_features(similar_source_indices, sent_term_matrix, article_sent_tokens, mmr, my_rel_sent_indices):
features = []
# features.append(1) # is_sent_pair
sent_idx1, sent_idx2 = similar_source_indices[0], similar_source_indices[1]
rel_sent_idx1, rel_sent_idx2 = my_rel_sent_indices[0], my_rel_sent_indices[1]
sent1_features = get_single_sent_features(sent_idx1,
sent_term_matrix, article_sent_tokens, mmr, rel_sent_idx1)
features.extend(sent1_features) # sent_idx, doc_similarity, sent_len
sent2_features = get_single_sent_features(sent_idx2,
sent_term_matrix, article_sent_tokens, mmr, rel_sent_idx2)
features.extend(sent2_features) # sent_idx, doc_similarity, sent_len
average_mmr = (mmr[sent_idx1] + mmr[sent_idx2])/2
sent1_row = sent_term_matrix[sent_idx1]
sent2_row = sent_term_matrix[sent_idx2]
if FLAGS.pca:
sent1_row = sent1_row.reshape(1, -1)
sent2_row = sent2_row.reshape(1, -1)
sents_similarity = util.cosine_similarity(sent1_row, sent2_row)[0][0]
sents_dist = abs(rel_sent_idx1 - rel_sent_idx2)
if real_values:
features.extend([average_mmr, sents_similarity])
if include_sents_dist:
features.append(sents_dist)
else:
features.extend(convert_to_one_hot(average_mmr, 5, (0,1)))
features.extend(convert_to_one_hot(sents_similarity, 5, (0,1))) # sents_similarity
if include_sents_dist:
features.extend(convert_to_one_hot(min(sents_dist, max_num_sents), 10, (0,max_num_sents))) # sents_dist
return features
def get_features(similar_source_indices, sent_term_matrix, article_sent_tokens, rel_sent_indices, single_feat_len,
pair_feat_len, mmr, singles_and_pairs):
features = []
if len(similar_source_indices) == 1:
if singles_and_pairs == 'pairs':
return None
sent_idx = similar_source_indices[0]
rel_sent_idx = rel_sent_indices[sent_idx]
features = get_single_sent_features(sent_idx, sent_term_matrix, article_sent_tokens, mmr, rel_sent_idx)
if singles_and_pairs == 'both':
features = [2] + features
features.extend([0]*pair_feat_len)
elif len(similar_source_indices) == 2:
if singles_and_pairs == 'singles':
return None
if singles_and_pairs == 'both':
features = [1] + features
features.extend([0]*single_feat_len)
my_rel_sent_indices = [rel_sent_indices[similar_source_indices[0]], rel_sent_indices[similar_source_indices[1]]]
features.extend(get_pair_sent_features(similar_source_indices, sent_term_matrix, article_sent_tokens, mmr, my_rel_sent_indices))
elif len(similar_source_indices) == 0:
return None
else:
print(similar_source_indices)
raise Exception("Shouldn't be here")
return features
def format_to_lambdamart(inst, single_feat_len):
features, relevance, query_id, source_indices, inst_id = inst.features, inst.relevance, inst.qid, inst.source_indices, inst.inst_id
if query_id == 0:
a=0
if features is None or len(features) == 0:
raise Exception('features has no elements')
is_single_sent = features[0]
out_str = str(relevance) + ' qid:' + str(query_id)
for feat_idx, feat in enumerate(features):
# if singles_and_pairs == 'singles' or singles_and_pairs == 'pairs' or feat_idx == 0 or \
# (is_single_sent and feat_idx < single_feat_len) or (not is_single_sent and feat_idx >= single_feat_len):
if feat != 0 or feat_idx==len(features)-1:
out_str += ' %d:%f' % (feat_idx+1, feat)
# else:
# out_str += ' %d:%f' % (feat_idx + 1, -100)
# for feat_idx, feat in enumerate(features):
# if feat != 0 or feat_idx == len(features)-1:
# out_str += ' %d:%f' % (feat_idx+1, feat)
out_str += ' #source_indices:'
for idx, source_idx in enumerate(source_indices):
out_str += str(source_idx)
if idx != len(source_indices) - 1:
out_str += ' '
out_str += ',inst_id:' + str(inst_id)
return out_str
class Lambdamart_Instance:
def __init__(self, features, relevance, qid, source_indices):
self.features = features
self.relevance = relevance
self.qid = qid
self.source_indices = source_indices
self.inst_id = -1
def assign_inst_ids(instances):
qid_cur_inst_id = defaultdict(int)
for instance in instances:
instance.inst_id = qid_cur_inst_id[instance.qid]
qid_cur_inst_id[instance.qid] += 1
def sentences_have_overlap(article_sent_tokens, s1, s2, min_matched_tokens):
nonstopword_matches, _ = util.matching_unigrams(article_sent_tokens[s1], article_sent_tokens[s2], should_remove_stop_words=True)
if len(nonstopword_matches) >= min_matched_tokens:
return True
else:
return False
def filter_by_overlap(article_sent_tokens, possible_pairs):
new_possible_pairs = []
for s1, s2 in possible_pairs:
if sentences_have_overlap(article_sent_tokens, s1, s2, min_matched_tokens):
new_possible_pairs.append((s1, s2))
return new_possible_pairs
def get_coref_pairs(corefs):
coref_pairs = set()
for coref in corefs:
sent_indices = set()
for m in coref:
sent_idx = m['sentNum'] - 1
sent_indices.add(sent_idx)
pairs = list(itertools.combinations(sorted(list(sent_indices)), 2))
coref_pairs = coref_pairs.union(pairs)
return coref_pairs
def filter_by_entites(article_sent_tokens, possible_pairs, corefs):
coref_pairs = get_coref_pairs(corefs)
new_possible_pairs = coref_pairs.intersection(set(possible_pairs))
return new_possible_pairs
def filter_pairs_by_criteria(raw_article_sents, possible_pairs, corefs):
article_sent_tokens = [sent.split(' ') for sent in raw_article_sents]
overlap_pairs = filter_by_overlap(article_sent_tokens, possible_pairs)
entity_pairs = filter_by_entites(article_sent_tokens, possible_pairs, corefs)
new_possible_pairs = list(set(overlap_pairs).union(set(entity_pairs)))
return new_possible_pairs
def convert_article_to_lambdamart_features(ex):
# example_idx += 1
# if num_instances != -1 and example_idx >= num_instances:
# break
example, example_idx, single_feat_len, pair_feat_len, singles_and_pairs, out_path = ex
print(example_idx)
raw_article_sents, similar_source_indices_list, summary_text, corefs, doc_indices = util.unpack_tf_example(example, names_to_types)
article_sent_tokens = [util.process_sent(sent) for sent in raw_article_sents]
if doc_indices is None:
doc_indices = [0] * len(util.flatten_list_of_lists(article_sent_tokens))
doc_indices = [int(doc_idx) for doc_idx in doc_indices]
if len(doc_indices) != len(util.flatten_list_of_lists(article_sent_tokens)):
doc_indices = [0] * len(util.flatten_list_of_lists(article_sent_tokens))
rel_sent_indices, _, _ = util.get_rel_sent_indices(doc_indices, article_sent_tokens)
if FLAGS.singles_and_pairs == 'singles':
sentence_limit = 1
else:
sentence_limit = 2
similar_source_indices_list = util.enforce_sentence_limit(similar_source_indices_list, sentence_limit)
summ_sent_tokens = [sent.strip().split() for sent in summary_text.strip().split('\n')]
# sent_term_matrix = util.get_tfidf_matrix(raw_article_sents)
article_text = ' '.join(raw_article_sents)
sent_term_matrix = util.get_doc_substituted_tfidf_matrix(tfidf_vectorizer, raw_article_sents, article_text, pca)
doc_vector = np.mean(sent_term_matrix, axis=0)
out_str = ''
# ssi_idx_cur_inst_id = defaultdict(int)
instances = []
if importance:
importances = util.special_squash(util.get_tfidf_importances(tfidf_vectorizer, raw_article_sents, pca))
possible_pairs = [x for x in list(itertools.combinations(list(range(len(raw_article_sents))), 2))] # all pairs
if FLAGS.use_pair_criteria:
possible_pairs = filter_pairs_by_criteria(raw_article_sents, possible_pairs, corefs)
if FLAGS.sent_position_criteria:
possible_pairs = filter_pairs_by_sent_position(possible_pairs, rel_sent_indices)
possible_singles = [(i,) for i in range(len(raw_article_sents))]
possible_combinations = possible_pairs + possible_singles
positives = [ssi for ssi in similar_source_indices_list]
negatives = [ssi for ssi in possible_combinations if not (ssi in positives or ssi[::-1] in positives)]
negative_pairs = [x for x in possible_pairs if not (x in similar_source_indices_list or x[::-1] in similar_source_indices_list)]
negative_singles = [x for x in possible_singles if not (x in similar_source_indices_list or x[::-1] in similar_source_indices_list)]
random_negative_pairs = np.random.permutation(len(negative_pairs)).tolist()
random_negative_singles = np.random.permutation(len(negative_singles)).tolist()
qid = example_idx
for similar_source_indices in positives:
# True sentence single/pair
relevance = 1
features = get_features(similar_source_indices, sent_term_matrix, article_sent_tokens, rel_sent_indices, single_feat_len, pair_feat_len, importances, singles_and_pairs)
if features is None:
continue
instances.append(Lambdamart_Instance(features, relevance, qid, similar_source_indices))
a=0
if FLAGS.dataset_name == 'xsum' and FLAGS.special_xsum_balance:
neg_relevance = 0
num_negative = 4
if FLAGS.singles_and_pairs == 'singles':
num_neg_singles = num_negative
num_neg_pairs = 0
else:
num_neg_singles = num_negative/2
num_neg_pairs = num_negative/2
for _ in range(num_neg_singles):
if len(random_negative_singles) == 0:
continue
negative_indices = negative_singles[random_negative_singles.pop()]
neg_features = get_features(negative_indices, sent_term_matrix, article_sent_tokens, rel_sent_indices, single_feat_len, pair_feat_len, importances, singles_and_pairs)
if neg_features is None:
continue
instances.append(Lambdamart_Instance(neg_features, neg_relevance, qid, negative_indices))
for _ in range(num_neg_pairs):
if len(random_negative_pairs) == 0:
continue
negative_indices = negative_pairs[random_negative_pairs.pop()]
neg_features = get_features(negative_indices, sent_term_matrix, article_sent_tokens, rel_sent_indices, single_feat_len, pair_feat_len, importances, singles_and_pairs)
if neg_features is None:
continue
instances.append(Lambdamart_Instance(neg_features, neg_relevance, qid, negative_indices))
elif balance:
# False sentence single/pair
is_pair = len(similar_source_indices) == 2
if is_pair:
if len(random_negative_pairs) == 0:
continue
negative_indices = negative_pairs[random_negative_pairs.pop()]
else:
if len(random_negative_singles) == 0:
continue
negative_indices = negative_singles[random_negative_singles.pop()]
neg_relevance = 0
neg_features = get_features(negative_indices, sent_term_matrix, article_sent_tokens, rel_sent_indices, single_feat_len, pair_feat_len, importances, singles_and_pairs)
if neg_features is None:
continue
instances.append(Lambdamart_Instance(neg_features, neg_relevance, qid, negative_indices))
if not balance:
for negative_indices in negatives:
neg_relevance = 0
neg_features = get_features(negative_indices, sent_term_matrix, article_sent_tokens, single_feat_len, pair_feat_len, importances, singles_and_pairs)
if neg_features is None:
continue
instances.append(Lambdamart_Instance(neg_features, neg_relevance, qid, negative_indices))
sorted_instances = sorted(instances, key=lambda x: (x.qid, x.source_indices))
assign_inst_ids(sorted_instances)
if FLAGS.lr:
return sorted_instances
else:
for instance in sorted_instances:
lambdamart_str = format_to_lambdamart(instance, single_feat_len)
out_str += lambdamart_str + '\n'
with open(os.path.join(out_path, '%06d.txt' % example_idx), 'wb') as f:
f.write(out_str)
# print out_str
# return out_str
def example_generator_extended(example_generator, total, single_feat_len, pair_feat_len, singles_and_pairs, out_path):
example_idx = -1
for example in tqdm(example_generator, total=total):
# for example in example_generator:
example_idx += 1
if num_instances != -1 and example_idx >= num_instances:
break
yield (example, example_idx, single_feat_len, pair_feat_len, singles_and_pairs, out_path)
# ####Delete all flags before declare#####
#
# def del_all_flags(FLAGS):
# flags_dict = _flags()
# keys_list = [keys for keys in flags_dict]
# for keys in keys_list:
# __delattr__(keys)
# del_all_flags(FLAGS)
def main(unused_argv):
print('Running statistics on %s' % exp_name)
if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
raise Exception("Problem with flags: %s" % unused_argv)
if FLAGS.singles_and_pairs == 'both':
in_dataset = FLAGS.dataset_name
out_dataset = FLAGS.dataset_name + '_both'
else:
in_dataset = FLAGS.dataset_name + '_singles'
out_dataset = FLAGS.dataset_name + '_singles'
if FLAGS.lr:
out_dataset = FLAGS.dataset_name + '_lr'
start_time = time.time()
np.random.seed(random_seed)
source_dir = os.path.join(data_dir, in_dataset)
ex_sents = ['single .', 'sentence .']
article_text = ' '.join(ex_sents)
sent_term_matrix = util.get_doc_substituted_tfidf_matrix(tfidf_vectorizer, ex_sents, article_text, pca)
if FLAGS.singles_and_pairs == 'pairs':
single_feat_len = 0
else:
single_feat_len = len(get_single_sent_features(0, sent_term_matrix, [['single','.'],['sentence','.']], [0,0], 0))
if FLAGS.singles_and_pairs == 'singles':
pair_feat_len = 0
else:
pair_feat_len = len(get_pair_sent_features([0,1], sent_term_matrix, [['single','.'],['sentence','.']], [0,0], [0, 0]))
util.print_vars(single_feat_len, pair_feat_len)
util.create_dirs(temp_dir)
if FLAGS.dataset_split == 'all':
dataset_splits = ['test', 'val', 'train']
elif FLAGS.dataset_split == 'train_val':
dataset_splits = ['val', 'train']
else:
dataset_splits = [FLAGS.dataset_split]
for split in dataset_splits:
source_files = sorted(glob.glob(source_dir + '/' + split + '*'))
out_path = os.path.join(out_dir, out_dataset, split)
if FLAGS.pca:
out_path += '_pca'
util.create_dirs(os.path.join(out_path))
total = len(source_files)*1000 if ('cnn' in in_dataset or 'newsroom' in in_dataset or 'xsum' in in_dataset) else len(source_files)
example_generator = data.example_generator(source_dir + '/' + split + '*', True, False, should_check_valid=False)
# for example in tqdm(example_generator, total=total):
ex_gen = example_generator_extended(example_generator, total, single_feat_len, pair_feat_len, FLAGS.singles_and_pairs, out_path)
print('Creating list')
ex_list = [ex for ex in ex_gen]
if FLAGS.num_instances != -1:
ex_list = ex_list[:FLAGS.num_instances]
print('Converting...')
# all_features = pool.map(convert_article_to_lambdamart_features, ex_list)
# all_features = ray.get([convert_article_to_lambdamart_features.remote(ex) for ex in ex_list])
if FLAGS.lr:
all_instances = list(futures.map(convert_article_to_lambdamart_features, ex_list))
all_instances = util.flatten_list_of_lists(all_instances)
x = [inst.features for inst in all_instances]
x = np.array(x)
y = [inst.relevance for inst in all_instances]
y = np.expand_dims(np.array(y), 1)
x_y = np.concatenate((x, y), 1)
np.save(writer, x_y)
else:
list(futures.map(convert_article_to_lambdamart_features, ex_list))
# writer.write(''.join(all_features))
# all_features = []
# for example in tqdm(ex_gen, total=total):
# all_features.append(convert_article_to_lambdamart_features(example))
# all_features = util.flatten_list_of_lists(all_features)
# num1 = sum(x == 1 for x in all_features)
# num2 = sum(x == 2 for x in all_features)
# print 'Single sent: %d instances. Pair sent: %d instances.' % (num1, num2)
# for example in tqdm(ex_gen, total=total):
# features = convert_article_to_lambdamart_features(example)
# writer.write(features)
final_out_path = out_path + '.txt'
file_names = sorted(glob.glob(os.path.join(out_path, '*')))
writer = open(final_out_path, 'wb')
for file_name in tqdm(file_names):
with open(file_name) as f:
text = f.read()
writer.write(text)
writer.close()
util.print_execution_time(start_time)
if __name__ == '__main__':
app.run(main)
| 44.453358 | 194 | 0.669073 |
acfbcb6a29c85c7b7ab2fdc4778e6e59b90cfaa0 | 13,138 | py | Python | DQMOffline/Trigger/python/MuonMonitor_cff.py | snessis/cmssw | f84137335f59a16d4cf9979547f0d77180f4e7e1 | [
"Apache-2.0"
] | null | null | null | DQMOffline/Trigger/python/MuonMonitor_cff.py | snessis/cmssw | f84137335f59a16d4cf9979547f0d77180f4e7e1 | [
"Apache-2.0"
] | null | null | null | DQMOffline/Trigger/python/MuonMonitor_cff.py | snessis/cmssw | f84137335f59a16d4cf9979547f0d77180f4e7e1 | [
"Apache-2.0"
] | 1 | 2021-11-23T09:25:45.000Z | 2021-11-23T09:25:45.000Z | import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.MuonMonitor_cfi import hltMuonmonitoring
TrkMu12_DoubleTrkMu5NoFiltersNoVtx_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/TrkMu12_DoubleTrkMu5NoFiltersNoVtx/'
)
TrkMu12_DoubleTrkMu5NoFiltersNoVtx_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_TrkMu12_DoubleTrkMu5NoFiltersNoVtx_v*")
TrkMu12_DoubleTrkMu5NoFiltersNoVtx_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFJet40_v*","HLT_PFJet60_v*","HLT_PFJet80_v*")
TrkMu16_DoubleTrkMu6NoFiltersNoVtx_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/TrkMu16_DoubleTrkMu6NoFiltersNoVtx/'
)
TrkMu16_DoubleTrkMu6NoFiltersNoVtx_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_TrkMu16_DoubleTrkMu6NoFiltersNoVtx_v*")
TrkMu16_DoubleTrkMu6NoFiltersNoVtx_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_TrkMu12_DoubleTrkMu5NoFiltersNoVtx_v*")
TrkMu17_DoubleTrkMu8NoFiltersNoVtx_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/TrkMu17_DoubleTrkMu8NoFiltersNoVtx/'
)
TrkMu17_DoubleTrkMu8NoFiltersNoVtx_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_TrkMu17_DoubleTrkMu8NoFiltersNoVtx_v*")
TrkMu17_DoubleTrkMu8NoFiltersNoVtx_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_TrkMu12_DoubleTrkMu5NoFiltersNoVtx_v*")
DoubleMu43NoFiltersNoVtx_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/DoubleMu43NoFiltersNoVtx/',
nmuons = 2
)
DoubleMu43NoFiltersNoVtx_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleMu43NoFiltersNoVtx_v*")
DoubleMu43NoFiltersNoVtx_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
DoubleMu48NoFiltersNoVtx_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/DoubleMu48NoFiltersNoVtx/',
nmuons = 2
)
DoubleMu48NoFiltersNoVtx_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleMu48NoFiltersNoVtx_v*")
DoubleMu48NoFiltersNoVtx_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
DoubleMu33NoFiltersNoVtxDisplaced_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/DoubleMu33NoFiltersNoVtxDisplaced/',
nmuons = 2
)
DoubleMu33NoFiltersNoVtxDisplaced_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleMu33NoFiltersNoVtxDisplaced_v*")
DoubleMu33NoFiltersNoVtxDisplaced_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
DoubleMu40NoFiltersNoVtxDisplaced_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/DoubleMu40NoFiltersNoVtxDisplaced/',
nmuons = 2
)
DoubleMu40NoFiltersNoVtxDisplaced_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleMu40NoFiltersNoVtxDisplaced_v*")
DoubleMu40NoFiltersNoVtxDisplaced_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
#--------------------------------------------------
DoubleL2Mu23NoVtx_2Cha_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/DoubleL2Mu23NoVtx_2Cha/',
nmuons = 2
)
DoubleL2Mu23NoVtx_2Cha_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleL2Mu23NoVtx_2Cha_v*")
DoubleL2Mu23NoVtx_2Cha_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
DoubleL2Mu23NoVtx_2Cha_CosmicSeed_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/DoubleL2Mu23NoVtx_2Cha_CosmicSeed/',
nmuons = 2
)
DoubleL2Mu23NoVtx_2Cha_CosmicSeed_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleL2Mu23NoVtx_2Cha_CosmicSeed_v*")
DoubleL2Mu23NoVtx_2Cha_CosmicSeed_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
#--------------------------------------------------
Mu43NoFiltersNoVtx_Photon43_CaloIdL_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu43NoFiltersNoVtx_Photon43_CaloIdL/',
nmuons = 1,
nelectrons = 1
)
Mu43NoFiltersNoVtx_Photon43_CaloIdL_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu43NoFiltersNoVtx_Photon43_CaloIdL_v*")
Mu43NoFiltersNoVtx_Photon43_CaloIdL_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu43NoFiltersNoVtx_Photon43_CaloIdL_MuLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu43NoFiltersNoVtx_Photon43_CaloIdL_MuLeg/',
nmuons = 1,
nelectrons = 1,
eleSelection = 'pt > 43'
)
Mu43NoFiltersNoVtx_Photon43_CaloIdL_MuLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu43NoFiltersNoVtx_Photon43_CaloIdL_v*")
Mu43NoFiltersNoVtx_Photon43_CaloIdL_MuLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu43NoFiltersNoVtx_Photon43_CaloIdL_EleLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu43NoFiltersNoVtx_Photon43_CaloIdL_EleLeg/',
nmuons = 1,
nelectrons = 1,
muonSelection = 'pt > 43'
)
Mu43NoFiltersNoVtx_Photon43_CaloIdL_EleLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu43NoFiltersNoVtx_Photon43_CaloIdL_v*")
Mu43NoFiltersNoVtx_Photon43_CaloIdL_EleLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu48NoFiltersNoVtx_Photon48_CaloIdL_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu48NoFiltersNoVtx_Photon48_CaloIdL/',
nmuons = 1,
nelectrons = 1
)
Mu48NoFiltersNoVtx_Photon48_CaloIdL_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu48NoFiltersNoVtx_Photon48_CaloIdL_v*")
Mu48NoFiltersNoVtx_Photon48_CaloIdL_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu48NoFiltersNoVtx_Photon48_CaloIdL_MuLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu48NoFiltersNoVtx_Photon48_CaloIdL_MuLeg/',
nmuons = 1,
nelectrons = 1,
eleSelection = 'pt > 48'
)
Mu48NoFiltersNoVtx_Photon48_CaloIdL_MuLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu48NoFiltersNoVtx_Photon48_CaloIdL_v*")
Mu48NoFiltersNoVtx_Photon48_CaloIdL_MuLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu48NoFiltersNoVtx_Photon48_CaloIdL_EleLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu48NoFiltersNoVtx_Photon48_CaloIdL_EleLeg/',
nmuons = 1,
nelectrons = 1,
muonSelection = 'pt > 48'
)
Mu48NoFiltersNoVtx_Photon48_CaloIdL_EleLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu48NoFiltersNoVtx_Photon48_CaloIdL_v*")
Mu48NoFiltersNoVtx_Photon48_CaloIdL_EleLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL/',
nmuons = 1,
nelectrons = 1
)
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_v*")
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_MuLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_MuLeg/',
nmuons = 1,
nelectrons = 1,
eleSelection = 'pt > 38'
)
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_MuLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_v*")
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_MuLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_EleLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_EleLeg/',
nmuons = 1,
nelectrons = 1,
muonSelection = 'pt > 38'
)
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_EleLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_v*")
Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_EleLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL/',
nmuons = 1,
nelectrons = 1
)
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_v*")
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_MuLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_MuLeg/',
nmuons = 1,
nelectrons = 1,
eleSelection = 'pt > 43'
)
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_MuLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_v*")
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_MuLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_EleLeg_monitoring = hltMuonmonitoring.clone(
FolderName = 'HLT/EXO/Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_EleLeg/',
nmuons = 1,
nelectrons = 1,
muonSelection = 'pt > 43'
)
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_EleLeg_monitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_v*")
Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_EleLeg_monitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFMET120_PFMHT120_IDTight_v*","HLT_PFMETTypeOne120_PFMHT120_IDTight_v*","HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v*")
exoHLTMuonmonitoring = cms.Sequence(
TrkMu12_DoubleTrkMu5NoFiltersNoVtx_monitoring
+ TrkMu16_DoubleTrkMu6NoFiltersNoVtx_monitoring
+ TrkMu17_DoubleTrkMu8NoFiltersNoVtx_monitoring
+ DoubleMu43NoFiltersNoVtx_monitoring
+ DoubleMu48NoFiltersNoVtx_monitoring
+ DoubleMu33NoFiltersNoVtxDisplaced_monitoring
+ DoubleMu40NoFiltersNoVtxDisplaced_monitoring
+ Mu43NoFiltersNoVtx_Photon43_CaloIdL_monitoring
+ Mu48NoFiltersNoVtx_Photon48_CaloIdL_monitoring
+ Mu43NoFiltersNoVtx_Photon43_CaloIdL_MuLeg_monitoring
+ Mu48NoFiltersNoVtx_Photon48_CaloIdL_MuLeg_monitoring
+ Mu43NoFiltersNoVtx_Photon43_CaloIdL_EleLeg_monitoring
+ Mu48NoFiltersNoVtx_Photon48_CaloIdL_EleLeg_monitoring
+ Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_monitoring
+ Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_monitoring
+ Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_MuLeg_monitoring
+ Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_MuLeg_monitoring
+ Mu38NoFiltersNoVtxDisplaced_Photon38_CaloIdL_EleLeg_monitoring
+ Mu43NoFiltersNoVtxDisplaced_Photon43_CaloIdL_EleLeg_monitoring
+ DoubleL2Mu23NoVtx_2Cha_monitoring
+ DoubleL2Mu23NoVtx_2Cha_CosmicSeed_monitoring
)
| 63.468599 | 252 | 0.863602 |
acfbcbf2cae50ae7e2f13aee4acbe16bd973c4f7 | 39,236 | py | Python | tensorflow/python/distribute/parameter_server_strategy_v2.py | TOT0RoKR/tensorflow | 12c2babf7dccc00c13d6e297c0f792f89f7408aa | [
"Apache-2.0"
] | 2 | 2021-07-03T07:55:19.000Z | 2021-07-03T08:05:15.000Z | tensorflow/python/distribute/parameter_server_strategy_v2.py | TOT0RoKR/tensorflow | 12c2babf7dccc00c13d6e297c0f792f89f7408aa | [
"Apache-2.0"
] | 2 | 2021-12-16T09:59:49.000Z | 2022-02-10T06:41:28.000Z | tensorflow/python/distribute/parameter_server_strategy_v2.py | TOT0RoKR/tensorflow | 12c2babf7dccc00c13d6e297c0f792f89f7408aa | [
"Apache-2.0"
] | 1 | 2020-06-18T07:58:29.000Z | 2020-06-18T07:58:29.000Z | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parameter server strategy V2 class.
This is currently under development and the API is subject to change.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_run
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import ps_values
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.distribute import values
from tensorflow.python.eager import remote
from tensorflow.python.framework import config
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
ALLOWED_TASK_TYPES = ("chief", "worker", "ps")
@tf_export("distribute.experimental.ParameterServerStrategy", v1=[])
class ParameterServerStrategyV2(distribute_lib.Strategy):
"""An multi-worker tf.distribute strategy with parameter servers.
Parameter server training is a common data-parallel method to scale up a
machine learning model on multiple machines. A parameter server training
cluster consists of workers and parameter servers. Variables are created on
parameter servers and they are read and updated by workers in each step.
By default, workers read and update these variables independently without
synchronizing with each other. Under this configuration, it is known as
asynchronous training.
In TensorFlow 2, we recommend an architecture based on central coordination
for parameter server training. Each worker and parameter server runs a
`tf.distribute.Server`, and on top of that, a coordinator task is responsible
for creating resources on workers and parameter servers, dispatching
functions, and coordinating the training. The coordinator uses a
`tf.distribute.experimental.coordinator.ClusterCoordinator` to coordinate the
cluster, and a `tf.distribute.experimental.ParameterServerStrategy` to define
variables on parameter servers and computation on workers.
For the training to work, the coordinator dispatches `tf.function`s to be
executed on remote workers. Upon receiving requests from the coordinator, a
worker executes the `tf.function` by reading the variables from parameter
servers, executing the ops, and updating the variables on the parameter
servers. Each of the worker only processes the requests from the coordinator,
and communicates with parameter servers, without direct interactions with
other workers in the cluster.
As a result, failures of some workers do not prevent the cluster from
continuing the work, and this allows the cluster to train with instances that
can be occasionally unavailable (e.g. preemptible or spot instances). The
coordinator and parameter servers though, must be available at all times for
the cluster to make progress.
Note that the coordinator is not one of the training workers. Instead, it
creates resources such as variables and datasets, dispatchs `tf.function`s,
saves checkpoints and so on. In addition to workers, parameter servers and
the coordinator, an optional evaluator can be run on the side that
periodically reads the checkpoints saved by the coordinator and runs
evaluations against each checkpoint.
`ParameterServerStrategy` is supported with two training APIs: [Custom
Training Loop (CTL)]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
and [Keras Training API, also known as `Model.fit`]
(https://www.tensorflow.org/tutorials/distribute/keras). CTL is recommended
when users prefer to define the details of their training loop, and
`Model.fit` is recommended when users prefer a high-level abstraction and
handling of training.
When using a CTL, `ParameterServerStrategy` has to work in conjunction with a
`tf.distribute.experimental.coordinator.ClusterCoordinator` object.
When using `Model.fit`, currently only the
`tf.keras.utils.experimental.DatasetCreator` input type is supported.
__Example code for coordinator__
This section provides code snippets that are intended to be run on (the only)
one task that is designated as the coordinator. Note that `cluster_resolver`,
`variable_partitioner`, and `dataset_fn` arguments are explained in the
following "Cluster setup", "Variable partitioning", and "Dataset preparation"
sections.
With a CTL,
```python
# Prepare a strategy to use with the cluster and variable partitioning info.
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...,
variable_partitioner=...)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy=strategy)
# Prepare a distribute dataset that will place datasets on the workers.
distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn=...)
with strategy.scope():
model = ...
optimizer, metrics = ... # Keras optimizer/metrics are great choices
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
# `load_checkpoint` infers initial epoch from `optimizer.iterations`.
initial_epoch = load_checkpoint(checkpoint_manager) or 0
@tf.function
def worker_fn(iterator):
def replica_fn(inputs):
batch_data, labels = inputs
# calculate gradient, applying gradient, metrics update etc.
strategy.run(replica_fn, args=(next(iterator),))
for epoch in range(initial_epoch, num_epoch):
distributed_iterator = iter(distributed_dataset) # Reset iterator state.
for step in range(steps_per_epoch):
# Asynchronously schedule the `worker_fn` to be executed on an arbitrary
# worker. This call returns immediately.
coordinator.schedule(worker_fn, args=(distributed_iterator,))
# `join` blocks until all scheduled `worker_fn`s finish execution. Once it
# returns, we can read the metrics and save checkpoints as needed.
coordinator.join()
logging.info('Metric result: %r', metrics.result())
train_accuracy.reset_states()
checkpoint_manager.save()
```
With `Model.fit`,
```python
# Prepare a strategy to use with the cluster and variable partitioning info.
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...,
variable_partitioner=...)
# A dataset function takes a `input_context` and returns a `Dataset`
def dataset_fn(input_context):
dataset = tf.data.Dataset.from_tensors(...)
return dataset.repeat().shard(...).batch(...).prefetch(...)
# With `Model.fit`, a `DatasetCreator` needs to be used.
input = tf.keras.utils.experimental.DatasetCreator(dataset_fn=...)
with strategy.scope():
model = ... # Make sure the `Model` is created within scope.
model.compile(optimizer="rmsprop", loss="mse", steps_per_execution=..., ...)
# Optional callbacks to checkpoint the model, back up the progress, etc.
callbacks = [tf.keras.callbacks.ModelCheckpoint(...), ...]
# `steps_per_epoch` is required with `ParameterServerStrategy`.
model.fit(input, epochs=..., steps_per_epoch=..., callbacks=callbacks)
```
__Example code for worker and parameter servers__
In addition to the coordinator, there should be tasks designated as
"worker" or "ps". They should run the following code to start a TensorFlow
server, waiting for coordinator's requests:
```python
# Provide a `tf.distribute.cluster_resolver.ClusterResolver` that serves
# the cluster information. See below "Cluster setup" section.
cluster_resolver = ...
server = tf.distribute.Server(
cluster_resolver.cluster_spec(),
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol="grpc")
# Blocking the process that starts a server from exiting.
server.join()
```
__Cluster setup__
In order for the tasks in the cluster to know other tasks' addresses,
a `tf.distribute.cluster_resolver.ClusterResolver` is required to be used
in coordinator, worker, and ps. The
`tf.distribute.cluster_resolver.ClusterResolver` is responsible for providing
the cluster information, as well as the task type and id of the current task.
See `tf.distribute.cluster_resolver.ClusterResolver` for more information.
If `TF_CONFIG` environment variable is set, a
`tf.distribute.cluster_resolver.TFConfigClusterResolver` should be used as
well.
Since there are assumptions in
`tf.distribute.experimental.ParameterServerStrategy` around the naming of the
task types, "chief", "ps", and "worker" should be used in the
`tf.distribute.cluster_resolver.ClusterResolver` to refer to the coordinator,
parameter servers, and workers, respectively.
The following example demonstrates setting `TF_CONFIG` for the task designated
as a parameter server (task type "ps") and index 1 (the second task), in a
cluster with 1 chief, 2 parameter servers, and 3 workers. Note that it needs
to be set before the use of
`tf.distribute.cluster_resolver.TFConfigClusterResolver`.
Example code for cluster setup:
```python
os.environ['TF_CONFIG'] = '''
{
"cluster": {
"chief": ["chief.example.com:2222"],
"ps": ["ps0.example.com:2222", "ps1.example.com:2222"],
"worker": ["worker0.example.com:2222", "worker1.example.com:2222",
"worker2.example.com:2222"]
},
"task": {
"type": "ps",
"index": 1
}
}
'''
```
If you prefer to run the same binary for all tasks, you will need to let the
binary branch into different roles at the beginning of the program:
```python
# If coordinator, create a strategy and start the training program.
if cluster_resolver.task_type == 'chief':
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
...
# If worker/ps, create a server
elif cluster_resolver.task_type in ("worker", "ps"):
server = tf.distribute.Server(...)
...
```
Alternatively, you can also start a bunch of TensorFlow servers in advance and
connect to them later. The coordinator can be in the same cluster or on any
machine that has connectivity to workers and parameter servers. This is
covered in our guide and tutorial.
__Variable creation with `strategy.scope()`__
`tf.distribute.experimental.ParameterServerStrategy` follows the
`tf.distribute` API contract where variable creation is expected to be inside
the context manager returned by `strategy.scope()`, in order to be correctly
placed on parameter servers in a round-robin manner:
```python
# In this example, we're assuming having 3 ps.
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy=strategy)
# Variables should be created inside scope to be placed on parameter servers.
# If created outside scope such as `v1` here, it would be placed on the
# coordinator.
v1 = tf.Variable(initial_value=0.0)
with strategy.scope():
v2 = tf.Variable(initial_value=1.0)
v3 = tf.Variable(initial_value=2.0)
v4 = tf.Variable(initial_value=3.0)
v5 = tf.Variable(initial_value=4.0)
# v2 through v5 are created in scope and are distributed on parameter servers.
# Default placement is round-robin but the order should not be relied on.
assert v2.device == "/job:ps/replica:0/task:0/device:CPU:0"
assert v3.device == "/job:ps/replica:0/task:1/device:CPU:0"
assert v4.device == "/job:ps/replica:0/task:2/device:CPU:0"
assert v5.device == "/job:ps/replica:0/task:0/device:CPU:0"
```
See `distribute.Strategy.scope` for more information.
__Variable partitioning__
Having dedicated servers to store variables means being able to divide up, or
"shard" the variables across the ps. Partitioning large variable among ps is a
commonly used technique to boost training throughput and mitigate memory
constraints. It enables parallel computations and updates on different shards
of a variable, and often yields better load balancing across parameter
servers. Without sharding, models with large variables (e.g, embeddings) that
can't fit into one machine's memory would otherwise be unable to train.
With `tf.distribute.experimental.ParameterServerStrategy`, if a
`variable_partitioner` is provided to `__init__` and certain conditions are
satisfied, the resulting variables created in scope are sharded across the
parameter servers, in a round-robin fashion. The variable reference returned
from `tf.Variable` becomes a type that serves as the container of the sharded
variables. One can access `variables` attribute of this container for the
actual variable components. If building model with `tf.Module` or Keras,
the variable components are collected in the `variables` alike attributes.
It is recommended to use size-based partitioners like
`tf.distribute.experimental.partitioners.MinSizePartitioner` to avoid
partitioning small variables, which could have negative impact on model
training speed.
```python
# Partition the embedding layer into 2 shards.
variable_partitioner = (
tf.distribute.experimental.partitioners.MinSizePartitioner(
min_shard_bytes=(256 << 10),
max_shards = 2))
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...,
variable_partitioner = variable_partitioner)
with strategy.scope():
embedding = tf.keras.layers.Embedding(input_dim=1024, output_dim=1024)
assert len(embedding.variables) == 2
assert isinstance(embedding.variables[0], tf.Variable)
assert isinstance(embedding.variables[1], tf.Variable)
assert embedding.variables[0].shape == (512, 1024)
assert embedding.variables[1].shape == (512, 1024)
```
The sharded variable container can be converted to a `Tensor` via
`tf.convert_to_tensor`. This means the container can be directly used in most
Python Ops where such `Tensor` conversion automatically happens. For example,
in the above code snippet, `x * self.w` would implicitly apply the said tensor
conversion. Note that such conversion can be expensive, as the variable
components need to be transferred from multiple parameter servers to where
the value is used.
`tf.nn.embedding_lookup` on the other hand doesn't apply the tensor
conversion, and performs parallel lookups on the variable components instead.
This is crucial to scale up embedding lookups when the embedding table
variable is large.
When a partitioned variable is saved to a `SavedModel`, it will be saved as if
it is one single variable. This improves serving efficiency by eliminating
a number of Ops that handle the partiton aspects.
Known limitations of variable partitioning:
* Number of partitions must not change across Checkpoint saving/loading.
* After saving partitioned variables to a SavedModel, the SavedModel can't be
loaded via `tf.saved_model.load`.
* Partition variable doesn't directly work with `tf.GradientTape`, please use
the `variables` attributes to get the actual variable components and use
them in gradient APIs instead.
__Dataset preparation__
With `tf.distribute.experimental.ParameterServerStrategy`, a dataset is
created in each of the workers to be used for training. This is done by
creating a `dataset_fn` that takes no argument and returns a
`tf.data.Dataset`, and passing the `dataset_fn` into
`tf.distribute.experimental.coordinator.
ClusterCoordinator.create_per_worker_dataset`. We recommend the dataset to be
shuffled and repeated to have the examples run through the training as evenly
as possible.
```python
def dataset_fn():
filenames = ...
dataset = tf.data.Dataset.from_tensor_slices(filenames)
# Dataset is recommended to be shuffled, and repeated.
return dataset.shuffle(buffer_size=...).repeat().batch(batch_size=...)
coordinator =
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy=...)
distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)
```
__Limitations__
* `tf.distribute.experimental.ParameterServerStrategy` in TF2 is experimental,
and the API is subject to further changes.
* When using `Model.fit`, `tf.distribute.experimental.ParameterServerStrategy`
must be used with a `tf.keras.utils.experimental.DatasetCreator`, and
`steps_per_epoch` must be specified.
"""
# pyformat: disable
def __init__(self, cluster_resolver, variable_partitioner=None):
"""Initializes the TF2 parameter server strategy.
This initializes the `tf.distribute.experimental.ParameterServerStrategy`
object to be ready for use with
`tf.distribute.experimental.coordinator.ClusterCoordinator`.
Args:
cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`
object.
variable_partitioner:
a `distribute.experimental.partitioners.Partitioner` that specifies
how to partition variables. If `None`, variables will not be
partitioned.
* Predefined partitioners in `tf.distribute.experimental.partitioners`
can be used for this argument. A commonly used partitioner is
`MinSizePartitioner(min_shard_bytes = 256 << 10, max_shards = num_ps)`,
which allocates at least 256K per shard, and each ps gets at most one
shard.
* `variable_partitioner` will be called for each variable created under
strategy `scope` to instruct how the variable should be partitioned.
Variables that have only one partition along the partitioning axis
(i.e., no need for partition) will be created as a normal `tf.Variable`.
* Only the first / outermost axis partitioning is supported.
* Div partition strategy is used to partition variables. Assuming we
assign consecutive integer ids along the first axis of a variable, then
ids are assigned to shards in a contiguous manner, while attempting to
keep each shard size identical. If the ids do not evenly divide the
number of shards, each of the first several shards will be assigned one
more id. For instance, a variable whose first dimension is 13 has 13
ids, and they are split across 5 shards as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.
* Variables created under `strategy.extended.colocate_vars_with` will
not be partitioned.
"""
# pyformat: enable
self._cluster_resolver = cluster_resolver
self._verify_args_and_config(cluster_resolver)
self._cluster_coordinator = None
logging.info(
"`tf.distribute.experimental.ParameterServerStrategy` is initialized "
"with cluster_spec: %s", cluster_resolver.cluster_spec())
# TODO(b/167894802): Make coordinator, worker, and ps names customizable.
self._connect_to_cluster(coordinator_name="chief")
self._extended = ParameterServerStrategyV2Extended(self, cluster_resolver,
variable_partitioner)
super(ParameterServerStrategyV2, self).__init__(self._extended)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"ParameterServerStrategy")
self._should_use_with_coordinator = True
# Used while constructing distributed iterators.
self._canonicalize_devices = False
def _connect_to_cluster(self, coordinator_name):
if coordinator_name in ["worker", "ps"]:
raise ValueError("coordinator name should not be 'worker' or 'ps'.")
cluster_spec = self._cluster_resolver.cluster_spec()
self._num_workers = len(cluster_spec.as_dict().get("worker", ()))
self._num_ps = len(cluster_spec.as_dict().get("ps", ()))
device_filters = server_lib.ClusterDeviceFilters()
# For any worker, only the devices on ps and coordinator nodes are visible
for i in range(self._num_workers):
device_filters.set_device_filters(
"worker", i, ["/job:ps", "/job:%s" % coordinator_name])
# Similarly for any ps, only the devices on workers and coordinator are
# visible
for i in range(self._num_ps):
device_filters.set_device_filters(
"ps", i, ["/job:worker", "/job:%s" % coordinator_name])
# Allow at most one outstanding RPC for each worker at a certain time. This
# is to simplify worker failure handling in the runtime
os.environ["TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"] = "False"
logging.info("%s is now connecting to cluster with cluster_spec: %r",
self.__class__.__name__, cluster_spec)
remote.connect_to_cluster(
cluster_spec,
job_name=coordinator_name,
protocol=self._cluster_resolver.rpc_layer,
cluster_device_filters=device_filters)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"ps_strategy_num_workers").set(self._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"ps_strategy_num_ps").set(self._num_ps)
def _verify_args_and_config(self, cluster_resolver):
if not cluster_resolver.cluster_spec():
raise ValueError("Cluster spec must be non-empty in "
"`tf.distribute.cluster_resolver.ClusterResolver`.")
cluster_spec = cluster_resolver.cluster_spec()
# The following checks if the task types are allowed (chief, ps, worker).
multi_worker_util._validate_cluster_spec( # pylint: disable=protected-access
cluster_spec,
cluster_resolver.task_type,
cluster_resolver.task_id)
if multi_worker_util.task_count(cluster_spec, "ps") < 1:
raise ValueError("There must be at least one ps.")
if multi_worker_util.task_count(cluster_spec, "worker") < 1:
raise ValueError("There must be at least one worker.")
class ParameterServerStrategyV2Extended(
parameter_server_strategy.ParameterServerStrategyExtended):
"""Extended class for ParameterServerStrategyV2.
Please see `tf.distribute.StrategyExtended` doc for more information.
"""
def __init__(self, container_strategy, cluster_resolver,
variable_partitioner):
"""Initialization of ParameterServerStrategyV2Extended."""
super(ParameterServerStrategyV2Extended, self).__init__(container_strategy)
self._num_ps = len(cluster_resolver.cluster_spec().as_dict().get("ps", []))
self._num_workers = len(cluster_resolver.cluster_spec().as_dict().get(
"worker", []))
self._variable_count = 0
self._variable_partitioner = variable_partitioner
# The following two attrs are to verify that `ParameterServerStrategy`
# methods are properly used with a `ClusterCoordinator`.
self._used_with_coordinator = False
self._being_scheduled = False
self._set_num_gpus()
# Don't canonicalize the devices here since this code is executed on Chief,
# but we want the reduce evaluation to be done on each worker. Placer will
# automatically choose the right device based on current context.
# TODO(ishark): Use select_cross_device_ops instead.
self._cross_device_ops = cross_device_ops_lib.ReductionToOneDevice(
reduce_to_device="/device:CPU:0")
self._cross_device_ops._canonicalize_devices = False # pylint: disable=protected-access
self._allow_run_without_coordinator = False
def _set_num_gpus(self):
devices = config.list_logical_devices("GPU")
per_worker_gpus = {}
for d in devices:
d_spec = tf_device.DeviceSpec.from_string(d.name)
if d_spec.device_type == "GPU" and d_spec.job == "worker":
# TODO(b/167894802): update if worker name is customizable
job_spec = d_spec.replace(device_type=None, device_index=None)
per_worker_gpus[job_spec] = per_worker_gpus.get(job_spec, 0) + 1
num_gpus = 0
for _, count in per_worker_gpus.items():
if num_gpus > 0 and count != num_gpus:
raise ValueError("Mismatched number of GPUs per worker")
num_gpus = count
self._num_gpus_per_worker = num_gpus
logging.info(f"Number of GPUs on workers: {self._num_gpus_per_worker}")
@property
def _num_replicas_in_sync(self):
return self._num_gpus_per_worker or 1
def _create_var_creator(self, next_creator, **kwargs):
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
def var_creator(**kwargs):
"""Create an AggregatingVariable."""
# Create and wrap the variable.
v = next_creator(**kwargs)
wrapped_v = ps_values.CachingVariable(v)
wrapped = ps_values.AggregatingVariable(self._container_strategy(),
wrapped_v, aggregation)
return wrapped
if self._num_replicas_in_sync > 1:
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
return var_creator
else:
def variable_creator_single_replica(**kwargs):
v = next_creator(**kwargs)
return ps_values.CachingVariable(v)
return variable_creator_single_replica
def _create_variable(self, next_creator, **kwargs):
"""Implements StrategyExtendedV2._create_variable.
Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be
created if satisfying all the following criteria:
1. `self._variable_partitioner` results in more than one partition on the
first axis.
2. variable's rank is greater than 0.
3. variable is not colocated with another variable.
Otherwise a `Variable` will be created.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
**kwargs: Passed through to the next creator.
Returns:
A `Variable` or `ShardedVariable`.
"""
var_creator = self._create_var_creator(next_creator, **kwargs)
if "colocate_with" in kwargs: # Never partition colocated_with variables.
colocate_with = kwargs["colocate_with"]
# Clear the variable scope to avoid possible conflicts between device
# scope and colocation scope.
with ops.device(None):
with ops.colocate_with(colocate_with):
var = var_creator(**kwargs)
logging.debug(
"Creating variable (name:%s, shape:%r) that colocates with %s",
var.name, var.shape, kwargs["colocate_with"].name)
return var
if self._variable_partitioner is None:
return self._create_variable_round_robin(var_creator, **kwargs)
name = kwargs.get("name", None)
initial_value = kwargs.get("initial_value", None)
if initial_value is None:
raise ValueError(
"It looks like you are using `ParameterServerStrategy` with a "
"`variable_partitioner`, and trying to create a variable without "
"specifying `initial_value`. This is not allowed. Please specify the "
"`initial_value`. This can also happen if you are trying to load a "
"saved_model within a `ParameterServerStrategy` scope. Loading a "
"saved_model with `variable_partitioner` is not supported.")
# Two cases where initial_value can be a callable:
# 1. initial_value is passed as a callable, e.g, an `initializer` class.
# 2. restoring from checkpoint, initial_value is a
# "CheckpointInitialValueCallable".
init_from_fn = callable(initial_value)
dtype = kwargs.get("dtype", None)
shape = kwargs.get("shape", None)
if init_from_fn and (shape is None or dtype is None):
init_from_fn = False
initial_value = initial_value()
if not init_from_fn:
# The initial_value is created on coordinator, it will need to be sent to
# ps for variable initialization, which can be inefficient and can
# potentially hit the 2GB limit on protobuf serialization.
initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
dtype = initial_value.dtype
shape = initial_value.shape
else:
shape = tensor_shape.as_shape(shape)
if shape.rank == 0: # Skip partitioning rank-0 variable.
return self._create_variable_round_robin(var_creator, **kwargs)
num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)
if not num_partitions or num_partitions[0] == 0 or any(
v != 1 for v in num_partitions[1:]):
raise ValueError(
"variable_partitioner must return a list/tuple whose elements are 1"
" besides the first element (non-zero), got: %r" % num_partitions)
if num_partitions[0] == 1: # no partition
return self._create_variable_round_robin(var_creator, **kwargs)
# Use "div" partition strategy to partition the variable.
num_partitions = min(num_partitions[0], shape[0])
base = shape[0] // num_partitions
extra = shape[0] % num_partitions
# An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]
# offsets: [0, 3, 6, 8, 10]
offsets = []
for i in range(num_partitions):
if i == 0:
offsets.append(0)
else:
prev_shard_size = base + (1 if i - 1 < extra else 0)
offsets.append(offsets[i - 1] + prev_shard_size)
offsets.append(shape[0])
def init_shard_fn(shard_index):
if not init_from_fn:
logging.log_if(
logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and
shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
return initial_value[offsets[shard_index]:offsets[shard_index + 1]]
partition_shape = (offsets[shard_index + 1] -
offsets[shard_index],) + shape[1:]
partition_offset = (offsets[shard_index],) + (0,) * len(shape[1:])
arg_spec = tf_inspect.getfullargspec(initial_value)
if ("shard_info" not in arg_spec.args and
"shard_info" not in arg_spec.kwonlyargs):
try:
value = initial_value(
partition_shape=partition_shape,
partition_offset=partition_offset)
except (TypeError, ValueError):
# TypeError: Initializer doesn't accept kwargs
# ValueError: Initializer doesn't accept partition kwargs
# In both cases we go ahead creating the full value and then slice.
value = initial_value()
if value.shape == partition_shape:
# Initializer supports partition: value is the partition value.
return value
else:
# Initializer doesn't support partition: value is the full value
# and needs to be sliced to get the partition value.
logging.log_if(
logging.WARN, _INEFFICIENT_INIT_WARNING % name,
shard_index == 0 and
shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
return value[offsets[shard_index]:offsets[shard_index + 1]]
else:
# For compatibility with `CheckpointInitialValueCallable`.
return initial_value(
shard_info=trackable.ShardInfo(
shape=tensor_shape.as_shape(partition_shape),
offset=partition_offset))
var_list = []
for i in range(num_partitions):
kwargs["shape"] = (offsets[i + 1] - offsets[i],) + shape[1:]
kwargs["initial_value"] = lambda: init_shard_fn(i)
if name is not None:
kwargs["name"] = "{}/part_{}".format(name, i)
var_list.append(self._create_variable_round_robin(var_creator, **kwargs))
result = sharded_variable.ShardedVariable(var_list)
return result
def _create_variable_round_robin(self, next_creator, **kwargs):
# Clear the colocation scope to avoid possible conflicts between device
# scope and colocation scope.
with ops.colocate_with(None, ignore_existing=True):
# Explicitly set CPU:0 device for PS in case create variable is called
# inside replica_fn and worker has with GPU:0 scope.
with ops.device("/job:ps/task:%d/device:CPU:0" %
(self._variable_count % self._num_ps)):
var = next_creator(**kwargs)
logging.debug(
"Creating variable (name:%s, shape:%r) on "
"/job:ps/task:%d/device:CPU:0",
var.name, var.shape, (self._variable_count % self._num_ps))
self._variable_count += 1
return var
def _assert_used_with_cluster_coordinator(self):
if (not self._used_with_coordinator and
not self._allow_run_without_coordinator):
raise NotImplementedError(
"`tf.distribute.experimental.ParameterServerStrategy` must be used "
"with `tf.distribute.experimental.coordinator.ClusterCoordinator` in "
"a custom training loop. If you are using `Model.fit`, please supply "
"a dataset function directly to a "
"`tf.keras.utils.experimental.DatasetCreator` instead.")
def _assert_being_scheduled_by_cluster_coordinator(self):
if not self._being_scheduled and not self._allow_run_without_coordinator:
logging.warning(
"It is detected that a function used with "
"`tf.distribute.experimental.ParameterServerStrategy` "
"is executed locally on the coordinator. This is inefficient but may "
"be valid for one-off tasks such as inferring output signature. "
"To properly distribute functions to run on workers, `run` or "
"`reduce` should be used within a function passed to `"
"tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`."
)
# options is not used right now. But we may want to support options while
# creating InputWorkers in future, similar to MirroredStrategy.
def _input_workers_with_options(self, options=None):
input_workers_devices = (
("/device:CPU:0", self.worker_devices),)
return input_lib.InputWorkers(
input_workers_devices, canonicalize_devices=False)
def _experimental_distribute_dataset(self, dataset, options):
self._assert_used_with_cluster_coordinator()
if not ops.get_default_graph().building_function:
raise ValueError(
"The `experimental_distribute_dataset` method must be called inside "
"a `tf.function` passed to `create_per_worker_dataset` of "
"`tf.distribute.experimental.coordinator.ClusterCoordinator`")
input_workers_devices = self._input_workers_with_options()
return input_lib.get_distributed_dataset(
dataset,
input_workers_devices,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
self._assert_used_with_cluster_coordinator()
if not ops.get_default_graph().building_function:
raise ValueError(
"The `distribute_datasets_from_function` method must be called "
"inside a `tf.function` passed to `create_per_worker_dataset` of "
"`tf.distribute.experimental.coordinator.ClusterCoordinator`")
# There is no synchronization beyond a worker and thus, the number of
# input pipelines in sync is only 1 per worker.
input_pipeline_id_in_sync = 0
num_input_pipelines_in_sync = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines_in_sync,
input_pipeline_id=input_pipeline_id_in_sync,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_lib.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options),
[input_context],
self._container_strategy(),
options=options)
@property
def worker_devices(self):
num_gpus = self._num_gpus_per_worker
if num_gpus > 0:
compute_devices = tuple("/device:GPU:%d" % (i,) for i in range(num_gpus))
else:
compute_devices = ("/device:CPU:0",)
return compute_devices
def _call_for_each_replica(self, fn, args, kwargs):
self._assert_being_scheduled_by_cluster_coordinator()
return mirrored_run.call_for_each_replica(self._container_strategy(), fn,
args, kwargs)
def _reduce(self, reduce_op, value):
self._assert_being_scheduled_by_cluster_coordinator()
dst = device_util.current() or self._default_device or "/device:CPU:0"
destinations = device_util.canonicalize_without_job_and_task(dst)
result = self._local_results(
self.reduce_to(reduce_op, value, destinations))[0]
return result
def _reduce_to(self, reduce_op, value, destinations, options):
self._assert_being_scheduled_by_cluster_coordinator()
def get_values(x):
if isinstance(x, values.DistributedValues):
return self._cross_device_ops.reduce(
reduce_op, x, destinations=destinations) # pylint: disable=protected-access
return x
return nest.map_structure(get_values, value)
# The warning that will be logged if the way we initialize sharded variables
# is memory-inefficient.
_INEFFICIENT_INIT_WARNING = (
"Large variable %s is partitioned but not initialized in a "
"memory-efficient way. On each shard, the full value is first being "
"created and then sliced into smaller values. To reduce the memory "
"footprint, explicitly specify `dtype` and `shape` when creating "
"variables, and use `tf.initializers` to initialize the variable. "
"Note that some initializers (e.g., orthogonal) don't support "
"memory-efficient initialization and there is not much you can do here.")
_LARGE_VARIABLE_NUM_ELEMENTS = 1e9
| 43.888143 | 92 | 0.726068 |
acfbcc1fe54db1df26014b4a11b3d13eaf6932cf | 873 | py | Python | Project_2-Data_Visualization/exercises/rw_visual.py | Vandeilsonln/Python-Crash-Course | 39b4f421504618f947672304a8e97edf7bc7f13d | [
"MIT"
] | null | null | null | Project_2-Data_Visualization/exercises/rw_visual.py | Vandeilsonln/Python-Crash-Course | 39b4f421504618f947672304a8e97edf7bc7f13d | [
"MIT"
] | null | null | null | Project_2-Data_Visualization/exercises/rw_visual.py | Vandeilsonln/Python-Crash-Course | 39b4f421504618f947672304a8e97edf7bc7f13d | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from random_walk import RandomWalk
# Keep making new walks, as long as the progrm is active.
while True:
# Make a random walk, and plot the points.
rw = RandomWalk(10000)
rw.fill_walk()
# Set the size of the plotting window
plt.figure(figsize=(10, 6))
point_numbers = list(range(0, rw.num_points))
plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Wistia, edgecolor='none', s=2)
# Emphasize the first and last points
plt.scatter(0, 0, c='green', edgecolors='none', s=20)
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none', s=20)
# remove the axis
plt.axes().get_xaxis().set_visible(False)
plt.axes().get_yaxis().set_visible(False)
plt.show()
keep_running = input('Make another walk? (y/n): ')
if keep_running == 'n':
break
| 30.103448 | 101 | 0.666667 |
acfbcc9a50bc063ea3fb031a4e8b35d40a12f944 | 675 | py | Python | fiasco/util/decorators.py | eblur/fiasco | 4ab14756f285e3551bd52cf47a1c9b49eda8b798 | [
"BSD-3-Clause"
] | 1 | 2018-06-02T09:50:25.000Z | 2018-06-02T09:50:25.000Z | fiasco/util/decorators.py | eblur/fiasco | 4ab14756f285e3551bd52cf47a1c9b49eda8b798 | [
"BSD-3-Clause"
] | null | null | null | fiasco/util/decorators.py | eblur/fiasco | 4ab14756f285e3551bd52cf47a1c9b49eda8b798 | [
"BSD-3-Clause"
] | null | null | null | """
Useful function/method decorators
"""
from functools import wraps
__all__ = ['needs_dataset']
def needs_dataset(*names, default=None):
"""
Decorator for skipping methods when the needed atomic data is not available
"""
non_ion_datasets = ['abundance', 'ip', 'ioneq']
names = [f'_{n}' if n not in non_ion_datasets else f'{n}' for n in names]
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
if any([args[0].__getattribute__(n) is None for n in names]):
return default
else:
return func(*args, **kwargs)
return func_wrapper
return decorator
| 27 | 79 | 0.614815 |
acfbccca1176885d8d2beebda27ce5939049b23d | 7,832 | py | Python | site/search-index/build.py | vishalbelsare/neupy | 684313cdaddcad326f2169384fb15ec3aa29d991 | [
"MIT"
] | null | null | null | site/search-index/build.py | vishalbelsare/neupy | 684313cdaddcad326f2169384fb15ec3aa29d991 | [
"MIT"
] | null | null | null | site/search-index/build.py | vishalbelsare/neupy | 684313cdaddcad326f2169384fb15ec3aa29d991 | [
"MIT"
] | null | null | null | import os
import re
import json
import pickle
import logging
import argparse
from textwrap import dedent
from collections import defaultdict, namedtuple
import nltk
import numpy as np
import scipy.sparse as sp
import ujson as json
from six.moves.urllib.parse import urljoin, urlparse
from pagerank import pagerank
from webgraph import WebPageGraph, Link
from htmltools import iter_html_files, ParseHTML
logging.basicConfig(format='[%(levelname)-5s] %(message)s',
level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--for-deploy", action="store_true",
help=("save output in a javascript file that will "
"be used for deployment"))
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
PYTHON_INDEX_DIR = os.path.join(CURRENT_DIR, 'index-files')
PYTHON_INDEX_FILE = os.path.join(PYTHON_INDEX_DIR, 'index.pickle')
JS_INDEX_DIR = os.path.join(CURRENT_DIR, '..', 'blog', 'html', '_static', 'js')
JS_INDEX_FILE = os.path.join(JS_INDEX_DIR, 'searchindex.js')
SITE_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '..', 'blog', 'html'))
SITE_ROOT = 'http://neupy.com'
def make_url_from_file(filepath):
_, url = filepath.split(SITE_DIR)
return urljoin(SITE_ROOT, url)
def ignore_link(link):
patterns = [
# Base pages
'/rss.html',
'/index.html',
'/master.html',
'/search.html',
'/archive.html',
'/py-modindex.html',
# Other pages
'/pages/home.html',
'/apidocs/modules.html',
'/apidocs/neupy.html',
# Pages that has collected information
r'/page\d{1,}.html',
r'.*tags/.+\.html',
r'.*cheatsheet\.html',
# Static files
r'.+(css|js|jpg|png)$',
r'/_images/.+',
r'.*\.tar\.gz',
]
uri = urlparse(link)
for pattern in patterns:
if re.match(pattern, uri.path):
return True
if uri.fragment in ('subpackages', 'submodules'):
return True
if uri.fragment.endswith('-package'):
return True
return False
def url_filter(links):
filtered_links = []
for link in links:
if not ignore_link(link.uri):
filtered_links.append(link)
return filtered_links
def save_python_index(data):
if not os.path.exists(PYTHON_INDEX_DIR):
os.mkdir(PYTHON_INDEX_DIR)
with open(PYTHON_INDEX_FILE, 'wb') as f:
pickle.dump(data, f)
def remove_useless_keys(documents):
useless_keys = ('filepath', 'filename', 'links', 'html', 'text')
for document in documents:
for useless_key in useless_keys:
if useless_key in document:
del document[useless_key]
def save_js_index(documents, vocabulary, tf, idf, rank):
if not os.path.exists(JS_INDEX_DIR):
os.mkdir(JS_INDEX_DIR)
output_template = dedent("""
var searchIndex = {{
documents: {documents},
idf: {idf},
tf: {{
col: {tf_col},
row: {tf_row},
data: {tf_value}
}},
vocabulary: {vocabulary},
rank: {rank}
}}
""")
remove_useless_keys(documents)
tf = tf.tocoo()
with open(JS_INDEX_FILE, 'w') as f:
f.write(
output_template.format(
documents=json.dumps(documents, indent=4),
idf=idf.tolist(),
tf_col=tf.col.tolist(),
tf_row=tf.row.tolist(),
tf_value=tf.data.tolist(),
vocabulary=json.dumps(vocabulary),
rank=rank.tolist(),
))
def page_tagging(url):
parsed_uri = urlparse(url)
tagging_rules = {
'algorithm': r'^/apidocs/neupy.algorithms',
'layer': r'^/apidocs/neupy.layers',
'plot': r'^/apidocs/neupy.plots',
'documentation': r'^/docs/',
'article': r'^/\d{4}/\d{2}/\d{2}/',
'tutorial': r'^/\d{4}/\d{2}/\d{2}/',
}
for tag, tagging_rule in tagging_rules.items():
tagging_rule_regex = re.compile(tagging_rule)
if tagging_rule_regex.match(parsed_uri.path):
return tag
return None
def collect_documents(directory):
logging.info("Collecting documents from the directory (%s)", directory)
Document = namedtuple("Document", "filename filepath uri links "
"html text title tag snippet")
documents = []
for filepath in iter_html_files(directory):
current_page_url = make_url_from_file(filepath)
filename = os.path.basename(filepath)
if ignore_link(current_page_url):
logging.debug('Skip "%s", bacause file is defined in the '
'ignore list', filename)
continue
html = ParseHTML.fromfile(filepath, current_page_url)
tag = page_tagging(current_page_url)
text = html.text()
if not text:
logging.debug('Skip "%s", because text is missed', filename)
continue
for subdocument in html.subdocuments():
if ignore_link(subdocument.uri):
logging.debug('Skip "%s", because URL is defined in the '
'ignore list', subdocument.uri)
else:
doc = Document(filename, filepath, subdocument.uri,
url_filter(subdocument.links),
subdocument.html, subdocument.text,
subdocument.title, tag, subdocument.snippet)
documents.append(doc)
return documents
if __name__ == '__main__':
logging.info("Started building index")
args = parser.parse_args()
documents = []
vocabulary = {}
term_frequency = defaultdict(int)
index_pointers = [0]
indeces = []
data = []
logging.info("Collecting documents")
all_documents = collect_documents(SITE_DIR)
logging.info("Define relations between documents")
webgraph = WebPageGraph.create_from_documents(all_documents)
for document in all_documents:
logging.debug('Processing "%s"', document.uri)
text = document.text
text = text.lower().replace('.', ' ').replace('=', ' ')
anchor_texts = []
for _, link in webgraph.page_linked_by(Link(document.uri)):
if link.text:
anchor_texts.append(link.text)
text = ' '.join([text] + anchor_texts)
for term in nltk.word_tokenize(text):
if term not in vocabulary:
vocabulary[term] = len(vocabulary)
termid = vocabulary[term]
term_frequency[termid] += 1
indeces.append(termid)
data.append(1)
index_pointers.append(len(indeces))
documents.append(document._asdict())
n_documents = len(documents)
n_terms = len(vocabulary)
if n_documents == 0:
raise OSError("Cannot find site documents. Probably site "
"hasn't been build yet.")
logging.info("Found {} documents".format(n_documents))
logging.info("Found {} terms".format(n_terms))
logging.info("Calculation TF and IDF")
frequencies = sp.csr_matrix((data, indeces, index_pointers),
shape=(n_documents, n_terms))
df = (frequencies >= 1).sum(axis=0)
idf = np.log((n_documents / df) + 1)
idf = np.asarray(idf)[0]
tf = np.log1p(frequencies)
tf.data += 1
logging.info("Applying PageRank")
rank = webgraph.pagerank()
logging.info("Saving index")
if args.for_deploy:
save_js_index(documents, vocabulary, tf, idf, rank)
else:
save_python_index([documents, vocabulary, tf, idf, rank])
logging.info("Index build was finished succesfuly")
| 27.871886 | 79 | 0.595123 |
acfbcd83e820dfd1ff1e6eb9be8af961d5ae0084 | 16,799 | py | Python | algortimoGA/virtualenv/lib/python3.8/site-packages/tornado/concurrent.py | lsbloo/GeradorHorariosUfpb | 599db5ca382424dfc05fad039880b4717612ac44 | [
"MIT"
] | 6 | 2020-08-04T13:12:42.000Z | 2020-08-16T13:26:19.000Z | algortimoGA/virtualenv/lib/python3.8/site-packages/tornado/concurrent.py | lsbloo/GeradorHorariosUfpb | 599db5ca382424dfc05fad039880b4717612ac44 | [
"MIT"
] | null | null | null | algortimoGA/virtualenv/lib/python3.8/site-packages/tornado/concurrent.py | lsbloo/GeradorHorariosUfpb | 599db5ca382424dfc05fad039880b4717612ac44 | [
"MIT"
] | 3 | 2017-04-07T12:02:22.000Z | 2020-03-23T12:11:55.000Z | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package (this package has also
been backported to older versions of Python and can be installed with
``pip install futures``). Tornado will use `concurrent.futures.Future` if
it is available; otherwise it will use a compatible class defined in this
module.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import platform
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442).
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self):
if self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
def result(self, timeout=None):
"""If the operation succeeded, return its result. If it failed,
re-raise its exception.
"""
self._clear_tb_log()
if self._result is not None:
return self._result
if self._exc_info is not None:
raise_exc_info(self._exc_info)
self._check_done()
return self._result
def exception(self, timeout=None):
"""If the operation raised an exception, return the `Exception`
object. Otherwise returns None.
"""
self._clear_tb_log()
if self._exc_info is not None:
return self._exc_info[1]
else:
self._check_done()
return None
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
"""Sets the result of a ``Future``.
It is undefined to call any of the ``set`` methods more than once
on the same object.
"""
self._result = result
self._set_done()
def set_exception(self, exception):
"""Sets the exception of a ``Future.``"""
self.set_exc_info(
(exception.__class__,
exception,
getattr(exception, '__traceback__', None)))
def exc_info(self):
"""Returns a tuple in the same format as `sys.exc_info` or None.
.. versionadded:: 4.0
"""
self._clear_tb_log()
return self._exc_info
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
try:
cb(self)
except Exception:
app_log.exception('exception calling callback %r for %r',
cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
# the PEP 442.
if _GC_CYCLE_FINALIZERS:
def __del__(self):
if not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
tb = traceback.format_exception(*self._exc_info)
app_log.error('Future %r exception was never retrieved: %s',
self, ''.join(tb).rstrip())
TracebackFuture = Future
if futures is None:
FUTURES = Future
else:
FUTURES = (futures.Future, Future)
def is_future(x):
return isinstance(x, FUTURES)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(fn):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
This decorator should be used only on methods of objects with attributes
``executor`` and ``io_loop``.
"""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = self.executor.submit(fn, self, *args, **kwargs)
if callback:
self.io_loop.add_future(future,
lambda future: callback(future.result()))
return future
return wrapper
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
future.result()
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
"""
def copy(future):
assert future is a
if b.done():
return
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
and a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
| 36.440347 | 80 | 0.650396 |
acfbcde4c70a7dff48e8203e679d8f7ec0e7948b | 297 | py | Python | Course_3/Week_1/rearrange2.py | internetworksio/Google-ITAutomation-Python | 6027750a33e8df883d762223bb0c4a5a95395bc0 | [
"MIT"
] | 2 | 2021-03-23T16:02:32.000Z | 2022-03-13T09:32:56.000Z | Course_3/Week_1/rearrange2.py | internetworksio/Google-ITAutomation-Python | 6027750a33e8df883d762223bb0c4a5a95395bc0 | [
"MIT"
] | null | null | null | Course_3/Week_1/rearrange2.py | internetworksio/Google-ITAutomation-Python | 6027750a33e8df883d762223bb0c4a5a95395bc0 | [
"MIT"
] | 7 | 2021-01-14T05:39:54.000Z | 2022-03-13T09:33:01.000Z | #!/usr/bin/env python3
"""
This script is used for course notes.
Author: Erick Marin
Date: 01/06/2021
"""
import re
def rearrange_name(name):
result = re.search(r"^([\w .-]*), ([\w .-]*)$", name)
if result is None:
return result
return "{} {}".format(result[2], result[2])
| 17.470588 | 57 | 0.592593 |
acfbcf0140124e8674e73f987840a9745804fd83 | 1,082 | py | Python | Pizza Ordering Program/main.py | vanshdamania/Python-Projects | 981bff33b1a828cca430f4ebae22c55f9e6edde7 | [
"Apache-2.0"
] | null | null | null | Pizza Ordering Program/main.py | vanshdamania/Python-Projects | 981bff33b1a828cca430f4ebae22c55f9e6edde7 | [
"Apache-2.0"
] | null | null | null | Pizza Ordering Program/main.py | vanshdamania/Python-Projects | 981bff33b1a828cca430f4ebae22c55f9e6edde7 | [
"Apache-2.0"
] | null | null | null | print("-" * 50)
print("\t Welcome to Damania's Pizza")
print("-" * 50)
SmallPizza = 15
MediumPizza = 20
LargePizza = 25
PepperoniSmallPizza = 2
PepperoniMLPizza = 3
ExtraCheese = 1
Bill = 0
print("Small size pizza: $15")
print("Medium size pizza: $20")
print("Large size pizza: $25\n")
print("Pepperoni for Small Pizza: +$2")
print("Pepperoni for Medium or Large Pizza: +$3")
print("Extra cheese for any size pizza: + $1\n")
Size = str(input("Enter the size of the pizza: ")).lower()
Pepperoni = str(input("You want to add pepperoni in your pizza: ")).lower()
Cheese = str(input("You want to add extra cheese in your pizza: ")).lower()
if Size == 's' or Size == 'small':
Bill = SmallPizza
elif Size == 'm' or Size == 'medium':
Bill = MediumPizza
elif Size == 'l' or Size == 'large':
Bill = LargePizza
if Pepperoni == 'Y' or Pepperoni == 'y':
if Size == 's' or Size == 'small':
Bill += PepperoniSmallPizza
else:
Bill += PepperoniMLPizza
if Cheese == 'y' or Cheese == 'yes':
Bill += ExtraCheese
print(f"Your final bill is: $ {Bill}")
| 25.162791 | 75 | 0.639556 |
acfbcfaed3619cd06fd2d962f0da400d9573d199 | 1,536 | py | Python | flowctl/actions/update_action.py | rexengineering/metaflow | fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8 | [
"Apache-2.0"
] | null | null | null | flowctl/actions/update_action.py | rexengineering/metaflow | fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8 | [
"Apache-2.0"
] | null | null | null | flowctl/actions/update_action.py | rexengineering/metaflow | fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import yaml
from flowlib import flow_pb2
from flowlib.flowd_utils import get_flowd_connection
__help__ = 'Update a workflow deployment. Currently supports setting ingress for start events.'
def __refine_args__(parser: argparse.ArgumentParser):
parser.add_argument(
'-o',
'--output',
action='store_true',
help='Output response data to stdout.'
)
parser.add_argument(
'bpmn_spec',
nargs='+',
help='sufficiently annotated BPMN file(s)'
)
return parser
def update_action(namespace: argparse.Namespace, *args, **kws):
responses = dict()
with get_flowd_connection(namespace.flowd_host, namespace.flowd_port) as flowd:
for spec in namespace.bpmn_spec:
with open(spec, 'r') as spec_file_obj:
responses[spec] = flowd.UpdateWorkflow(
flow_pb2.UpdateRequest(
update_spec=spec_file_obj.read()
)
)
status = 0
for spec, response in responses.items():
if response.status < 0:
logging.error(
f'Error from server: {response.status}, "{response.message}"'
)
if status >= 0:
status = response.status
else:
logging.info(
f'Got response: {response.status}, "{response.message}", {response.data}'
)
if namespace.output:
print(response.data)
return status
| 28.981132 | 95 | 0.589193 |
acfbd02e1580cb4404682189966b6c3667c0027d | 5,969 | py | Python | curvetime/bc/blockchain.py | Dasudian/curvetime | 9d3211aa1d2c7a24a3379152a95033586573c4d8 | [
"Apache-2.0"
] | 5 | 2021-05-16T12:52:57.000Z | 2022-02-25T10:12:31.000Z | curvetime/bc/blockchain.py | Dasudian/curvetime | 1fb388c14bfddbe0c473da8f4dc0caea35ab154e | [
"Apache-2.0"
] | null | null | null | curvetime/bc/blockchain.py | Dasudian/curvetime | 1fb388c14bfddbe0c473da8f4dc0caea35ab154e | [
"Apache-2.0"
] | 1 | 2022-03-30T16:31:50.000Z | 2022-03-30T16:31:50.000Z | from hashlib import sha256
from uuid import getnode
import json
import time
from curvetime.db import couch
class Block:
def __init__(self, index, timestamp, previous_hash, transactions=[], action=None, state=None, reward=None):
"""
The init function for a block
index: the index of the block in the chain
transactions: data/events that need to be stored onto the blockchain
timestame: the time (milliseconds) when the block is created
previous_hash: the hash value of the previous block
action: the action that the AI agent chose at last time step,
state: the state of evnironment enter after action
reward: the reward that AI agent gets from action
"""
self.index = index
self.transactions = transactions
self.timestamp = timestamp
self.previous_hash = previous_hash
self.state = state
self.action = action
self.reward = reward
def compute_hash(self):
"""
A function that return the hash of the block contents.
"""
block_string = json.dumps(self.__dict__, sort_keys=True)
return sha256(block_string.encode()).hexdigest()
def to_json(self):
return json.dumps(self.__dict__, sort_keys=True)
class Blockchain:
def __init__(self, agent):
self.unconfirmed_transactions = []
self.agent = agent
couch.init_db()
self.miner = self.miner_address()
self.chain = couch.keys()
if not self.chain:
self.chain = []
self.create_genesis_block()
def miner_address(self):
mac = str(getnode())
return sha256(mac.encode()).hexdigest()
def create_genesis_block(self):
"""
A function to generate genesis block and appends it to
the chain. The block has index 0, previous_hash as 0, and
a valid hash.
"""
timestamp = time.time()
a, s, r = self.agent.step()
genesis_block = Block(0, timestamp, "0", [], a, s, r)
genesis_block_hash = genesis_block.compute_hash()
Blockchain.store_block(genesis_block_hash, genesis_block)
self.chain.append(genesis_block_hash)
@staticmethod
def store_block(block_hash, block):
couch.put(block_hash, block.__dict__)
@staticmethod
def fetch_block(hash):
block = couch.get(hash)
return Block(**block)
@property
def last_block_hash(self):
return self.chain[-1]
def add_block(self, block, proof):
"""
A function that adds the block to the chain after verification.
Verification includes:
* Checking if the proof is valid.
* The previous_hash referred in the block and the hash of latest block
in the chain match.
"""
previous_hash = self.last_block_hash
if previous_hash != block.previous_hash:
return False
if not Blockchain.is_valid_proof(block, proof):
return False
self.chain.append(proof)
Blockchain.store_block(proof, block)
return True
def proof_of_work(self, block):
"""
Function that chooses an action from the last state
and transforms to a new state.
"""
previous_hash = self.last_block_hash
last_block = Blockchain.fetch_block(previous_hash)
state = last_block.state
a, s, r = self.agent.step(state)
block.state = s
block.action = a
block.reward = r
block.transactions.append({"transaction": "mine",
"address": self.miner,
"reward": r})
computed_hash = block.compute_hash()
return computed_hash, block
def add_new_transaction(self, transaction):
self.unconfirmed_transactions.append(transaction)
@classmethod
def is_valid_proof(cls, block, block_hash):
"""
Check if block_hash is valid hash of block and satisfies
the difficulty criteria.
"""
return (block.state is not None and
block.action is not None and
block.reward is not None and
block_hash == block.compute_hash())
@classmethod
def check_chain_validity(cls, chain):
result = True
previous_hash = "0"
for block_hash in chain:
# remove the hash field to recompute the hash again
# using `compute_hash` method.
block = Blockchain.fetch_block(block_hash)
if not cls.is_valid_proof(block, block_hash) or \
previous_hash != block.previous_hash:
result = False
break
previous_hash = block_hash
return result
def mine(self):
"""
This function serves as an interface to add the pending
transactions to the blockchain by adding them to the block
and figuring out Proof Of Work.
"""
if not self.unconfirmed_transactions:
return False
last_block_hash = self.last_block_hash
last_block = Blockchain.fetch_block(last_block_hash)
new_block = Block(index=last_block.index + 1,
transactions=self.unconfirmed_transactions,
timestamp=time.time(),
previous_hash=last_block_hash)
proof, new_block = self.proof_of_work(new_block)
self.add_block(new_block, proof)
self.unconfirmed_transactions = []
return True
def copy_to(self):
blocks = []
for hash in self.chain:
blocks.append(Blockchain.fetch_block(hash))
return self.chain, blocks
def copy_from(self, hash_list, block_list):
self.chain = hash_list
for i in range(len(hash_list)):
Blockchain.store_block(hash_list[i], block_list[i])
| 30.299492 | 111 | 0.61032 |
acfbd079de7fe2b788ba22fa647b56923742fb50 | 1,423 | py | Python | courses/api/serializers.py | antonnifo/E-Soma | 93d49b27dedbff58d19f8245a79693762fc819d5 | [
"MIT"
] | 1 | 2022-02-09T06:28:04.000Z | 2022-02-09T06:28:04.000Z | courses/api/serializers.py | antonnifo/E-Soma | 93d49b27dedbff58d19f8245a79693762fc819d5 | [
"MIT"
] | null | null | null | courses/api/serializers.py | antonnifo/E-Soma | 93d49b27dedbff58d19f8245a79693762fc819d5 | [
"MIT"
] | 1 | 2022-02-09T06:29:11.000Z | 2022-02-09T06:29:11.000Z | from rest_framework import serializers
from ..models import Subject, Course, Module, Content
class SubjectSerializer(serializers.ModelSerializer):
class Meta:
model = Subject
fields = ['id', 'title', 'slug']
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = ['order', 'title', 'description']
class CourseSerializer(serializers.ModelSerializer):
modules = ModuleSerializer(many=True, read_only=True)
class Meta:
model = Course
fields = ['id', 'subject', 'title', 'slug', 'overview',
'created', 'owner', 'modules']
class ItemRelatedField(serializers.RelatedField):
def to_representation(self, value):
return value.render()
class ContentSerializer(serializers.ModelSerializer):
item = ItemRelatedField(read_only=True)
class Meta:
model = Content
fields = ['order', 'item']
class ModuleWithContentsSerializer(serializers.ModelSerializer):
contents = ContentSerializer(many=True)
class Meta:
model = Module
fields = ['order', 'title', 'description', 'contents']
class CourseWithContentsSerializer(serializers.ModelSerializer):
modules = ModuleWithContentsSerializer(many=True)
class Meta:
model = Course
fields = ['id', 'subject', 'title', 'slug',
'overview', 'created', 'owner', 'modules'] | 30.934783 | 64 | 0.664793 |
acfbd0a9f23e73d2d41a82805a092927960f6d41 | 14,816 | py | Python | src/lib/lis3dh.py | galileorobotics/croissant-node | 5c0384787db389f912b825ce9c74147790297aab | [
"MIT"
] | 2 | 2020-05-20T16:30:32.000Z | 2020-08-13T16:38:36.000Z | src/lib/lis3dh.py | galileorobotics/croissant-node | 5c0384787db389f912b825ce9c74147790297aab | [
"MIT"
] | 1 | 2020-06-17T16:12:56.000Z | 2020-06-17T16:12:56.000Z | src/lib/lis3dh.py | galileorobotics/croissant-node | 5c0384787db389f912b825ce9c74147790297aab | [
"MIT"
] | 1 | 2020-05-20T18:00:16.000Z | 2020-05-20T18:00:16.000Z | # Adafruit LIS3DH Accelerometer Micropython Driver
# Based on the Arduino LIS3DH driver from:
# https://github.com/adafruit/Adafruit_LIS3DH/
# and the Circuitpython Port by Tony DiCola
#Port by Julian Finn
# License: MIT License (https://en.wikipedia.org/wiki/MIT_License)
"""
`adafruit_lis3dh`
====================================================
Micropython driver for the LIS3DH accelerometer. Based on the CircuitPython version by Tony DiCola
Port by Julian Finn
Implementation Notes
--------------------
**Hardware:**
* `Adafruit LIS3DH Triple-Axis Accelerometer Breakout
<https://www.adafruit.com/product/2809>`_
* `Circuit Playground Express <https://www.adafruit.com/product/3333>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
import time
import math
try:
from collections import namedtuple
except ImportError:
from ucollections import namedtuple
try:
import struct
except ImportError:
import ustruct as struct
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_LIS3DH.git"
# Register addresses:
# pylint: disable=bad-whitespace
_REG_OUTADC1_L = const(0x08)
_REG_WHOAMI = const(0x0F)
_REG_TEMPCFG = const(0x1F)
_REG_CTRL1 = const(0x20)
_REG_CTRL3 = const(0x22)
_REG_CTRL4 = const(0x23)
_REG_CTRL5 = const(0x24)
_REG_OUT_X_L = const(0x28)
_REG_INT1SRC = const(0x31)
_REG_CLICKCFG = const(0x38)
_REG_CLICKSRC = const(0x39)
_REG_CLICKTHS = const(0x3A)
_REG_TIMELIMIT = const(0x3B)
_REG_TIMELATENCY = const(0x3C)
_REG_TIMEWINDOW = const(0x3D)
# Register value constants:
RANGE_16_G = const(0b11) # +/- 16g
RANGE_8_G = const(0b10) # +/- 8g
RANGE_4_G = const(0b01) # +/- 4g
RANGE_2_G = const(0b00) # +/- 2g (default value)
DATARATE_1344_HZ = const(0b1001) # 1.344 KHz
DATARATE_400_HZ = const(0b0111) # 400Hz
DATARATE_200_HZ = const(0b0110) # 200Hz
DATARATE_100_HZ = const(0b0101) # 100Hz
DATARATE_50_HZ = const(0b0100) # 50Hz
DATARATE_25_HZ = const(0b0011) # 25Hz
DATARATE_10_HZ = const(0b0010) # 10 Hz
DATARATE_1_HZ = const(0b0001) # 1 Hz
DATARATE_POWERDOWN = const(0)
DATARATE_LOWPOWER_1K6HZ = const(0b1000)
DATARATE_LOWPOWER_5KHZ = const(0b1001)
# Other constants
STANDARD_GRAVITY = 9.806
# pylint: enable=bad-whitespace
# the named tuple returned by the class
AccelerationTuple = namedtuple("acceleration", ("x", "y", "z"))
class LIS3DH:
"""Driver base for the LIS3DH accelerometer."""
def __init__(self, int1=None, int2=None):
# Check device ID.
device_id = self._read_register_byte(_REG_WHOAMI)
print("device id")
print (device_id)
#if device_id != 0x33:
# raise RuntimeError('Failed to find LIS3DH!')
# Reboot
self._write_register_byte(_REG_CTRL5, 0x80)
time.sleep(0.01) # takes 5ms
# Enable all axes, normal mode.
self._write_register_byte(_REG_CTRL1, 0x07)
# Set 400Hz data rate.
self.data_rate = DATARATE_400_HZ
# High res & BDU enabled.
self._write_register_byte(_REG_CTRL4, 0x88)
# Enable ADCs.
self._write_register_byte(_REG_TEMPCFG, 0x80)
# Latch interrupt for INT1
self._write_register_byte(_REG_CTRL5, 0x08)
# Initialise interrupt pins
self._int1 = int1
self._int2 = int2
if self._int1:
self._int1.direction = machine.Pin.IN
self._int1.pull = machine.Pin.PULL_UP
@property
def data_rate(self):
"""The data rate of the accelerometer. Can be DATA_RATE_400_HZ, DATA_RATE_200_HZ,
DATA_RATE_100_HZ, DATA_RATE_50_HZ, DATA_RATE_25_HZ, DATA_RATE_10_HZ,
DATA_RATE_1_HZ, DATA_RATE_POWERDOWN, DATA_RATE_LOWPOWER_1K6HZ, or
DATA_RATE_LOWPOWER_5KHZ."""
ctl1 = self._read_register_byte(_REG_CTRL1)
return (ctl1 >> 4) & 0x0F
@data_rate.setter
def data_rate(self, rate):
ctl1 = self._read_register_byte(_REG_CTRL1)
ctl1 &= ~(0xF0)
ctl1 |= rate << 4
self._write_register_byte(_REG_CTRL1, ctl1)
@property
def range(self):
"""The range of the accelerometer. Can be RANGE_2_G, RANGE_4_G, RANGE_8_G, or
RANGE_16_G."""
ctl4 = self._read_register_byte(_REG_CTRL4)
return (ctl4 >> 4) & 0x03
@range.setter
def range(self, range_value):
ctl4 = self._read_register_byte(_REG_CTRL4)
ctl4 &= ~0x30
ctl4 |= range_value << 4
self._write_register_byte(_REG_CTRL4, ctl4)
@property
def acceleration(self):
"""The x, y, z acceleration values returned in a 3-tuple and are in m / s ^ 2."""
divider = 1
accel_range = self.range
if accel_range == RANGE_16_G:
divider = 1365
elif accel_range == RANGE_8_G:
divider = 4096
elif accel_range == RANGE_4_G:
divider = 8190
elif accel_range == RANGE_2_G:
divider = 16380
x, y, z = struct.unpack('<hhh', self._read_register(_REG_OUT_X_L | 0x80, 6))
# convert from Gs to m / s ^ 2 and adjust for the range
x = (x / divider) * STANDARD_GRAVITY
y = (y / divider) * STANDARD_GRAVITY
z = (z / divider) * STANDARD_GRAVITY
return AccelerationTuple(x, y, z)
def shake(self, shake_threshold=30, avg_count=10, total_delay=0.1):
"""
Detect when the accelerometer is shaken. Optional parameters:
:param shake_threshold: Increase or decrease to change shake sensitivity. This
requires a minimum value of 10. 10 is the total
acceleration if the board is not moving, therefore
anything less than 10 will erroneously report a constant
shake detected. (Default 30)
:param avg_count: The number of readings taken and used for the average
acceleration. (Default 10)
:param total_delay: The total time in seconds it takes to obtain avg_count
readings from acceleration. (Default 0.1)
"""
shake_accel = (0, 0, 0)
for _ in range(avg_count):
# shake_accel creates a list of tuples from acceleration data.
# zip takes multiple tuples and zips them together, as in:
# In : zip([-0.2, 0.0, 9.5], [37.9, 13.5, -72.8])
# Out: [(-0.2, 37.9), (0.0, 13.5), (9.5, -72.8)]
# map applies sum to each member of this tuple, resulting in a
# 3-member list. tuple converts this list into a tuple which is
# used as shake_accel.
shake_accel = tuple(map(sum, zip(shake_accel, self.acceleration)))
time.sleep(total_delay / avg_count)
avg = tuple(value / avg_count for value in shake_accel)
total_accel = math.sqrt(sum(map(lambda x: x * x, avg)))
return total_accel > shake_threshold
def read_adc_raw(self, adc):
"""Retrieve the raw analog to digital converter value. ADC must be a
value 1, 2, or 3.
"""
if adc < 1 or adc > 3:
raise ValueError('ADC must be a value 1 to 3!')
return struct.unpack('<h', self._read_register((_REG_OUTADC1_L+((adc-1)*2)) | 0x80, 2))[0]
def read_adc_mV(self, adc): # pylint: disable=invalid-name
"""Read the specified analog to digital converter value in millivolts.
ADC must be a value 1, 2, or 3. NOTE the ADC can only measure voltages
in the range of ~900-1200mV!
"""
raw = self.read_adc_raw(adc)
# Interpolate between 900mV and 1800mV, see:
# https://learn.adafruit.com/adafruit-lis3dh-triple-axis-accelerometer-breakout/wiring-and-test#reading-the-3-adc-pins
# This is a simplified linear interpolation of:
# return y0 + (x-x0)*((y1-y0)/(x1-x0))
# Where:
# x = ADC value
# x0 = -32512
# x1 = 32512
# y0 = 1800
# y1 = 900
return 1800+(raw+32512)*(-900/65024)
@property
def tapped(self):
"""
True if a tap was detected recently. Whether its a single tap or double tap is
determined by the tap param on ``set_tap``. ``tapped`` may be True over
multiple reads even if only a single tap or single double tap occurred if the
interrupt (int) pin is not specified.
The following example uses ``i2c`` and specifies the interrupt pin:
.. code-block:: python
import adafruit_lis3dh
import digitalio
i2c = busio.I2C(board.SCL, board.SDA)
int1 = digitalio.DigitalInOut(board.D11) # pin connected to interrupt
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, int1=int1)
lis3dh.range = adafruit_lis3dh.RANGE_8_G
"""
if self._int1 and not self._int1.value:
return False
raw = self._read_register_byte(_REG_CLICKSRC)
return raw & 0x40 > 0
def set_tap(self, tap, threshold, *,
time_limit=10, time_latency=20, time_window=255, click_cfg=None):
"""
The tap detection parameters.
.. note:: Tap related registers are called ``CLICK_`` in the datasheet.
:param int tap: 0 to disable tap detection, 1 to detect only single
taps, and 2 to detect only double taps.
:param int threshold: A threshold for the tap detection. The higher the value
the less sensitive the detection. This changes based on
the accelerometer range. Good values are 5-10 for 16G,
10-20 for 8G, 20-40 for 4G, and 40-80 for 2G.
:param int time_limit: TIME_LIMIT register value (default 10).
:param int time_latency: TIME_LATENCY register value (default 20).
:param int time_window: TIME_WINDOW register value (default 255).
:param int click_cfg: CLICK_CFG register value.
"""
if (tap < 0 or tap > 2) and click_cfg is None:
raise ValueError('Tap must be 0 (disabled), 1 (single tap), or 2 (double tap)!')
if threshold > 127 or threshold < 0:
raise ValueError('Threshold out of range (0-127)')
ctrl3 = self._read_register_byte(_REG_CTRL3)
if tap == 0 and click_cfg is None:
# Disable click interrupt.
self._write_register_byte(_REG_CTRL3, ctrl3 & ~(0x80)) # Turn off I1_CLICK.
self._write_register_byte(_REG_CLICKCFG, 0)
return
else:
self._write_register_byte(_REG_CTRL3, ctrl3 | 0x80) # Turn on int1 click output
if click_cfg is None:
if tap == 1:
click_cfg = 0x15 # Turn on all axes & singletap.
if tap == 2:
click_cfg = 0x2A # Turn on all axes & doubletap.
# Or, if a custom click configuration register value specified, use it.
self._write_register_byte(_REG_CLICKCFG, click_cfg)
self._write_register_byte(_REG_CLICKTHS, 0x80 | threshold)
self._write_register_byte(_REG_TIMELIMIT, time_limit)
self._write_register_byte(_REG_TIMELATENCY, time_latency)
self._write_register_byte(_REG_TIMEWINDOW, time_window)
def _read_register_byte(self, register):
# Read a byte register value and return it.
return self._read_register(register, 1)[0]
def _read_register(self, register, length):
# Read an arbitrarily long register (specified by length number of
# bytes) and return a bytearray of the retrieved data.
# Subclasses MUST implement this!
raise NotImplementedError
def _write_register_byte(self, register, value):
# Write a single byte register at the specified register address.
# Subclasses MUST implement this!
raise NotImplementedError
"""
class LIS3DH_I2C(LIS3DH):
def __init__(self, i2c, *, address=0x18, int1=None, int2=None):
from machine import I2C
self.address = address
self._i2c = i2c
self._buffer = bytearray(6)
super().__init__(int1=int1, int2=int2)
def _read_register(self, register, length):
self._buffer[0] = register & 0xFF
self._i2c.writeto(self.address, self._buffer[0:1])
self._i2c.readfrom_into(self.address, self._buffer[0:length])
return self._buffer
def _write_register_byte(self, register, value):
self._buffer[0] = register & 0xFF
self._buffer[1] = value & 0xFF
self._i2c.writeto(self.address, self._buffer[0:2])
"""
class LIS3DH_I2C(LIS3DH):
"""Driver for the LIS3DH accelerometer connected over I2C."""
def __init__(self, i2c, *, address=0x18, int1=None, int2=None):
from machine import I2C
self.address = address
self._i2c = i2c
self._buffer = bytearray(6)
super().__init__(int1=int1, int2=int2)
def _read_register(self, register, length):
return self._i2c.readfrom_mem(0x18,register,length)
def _write_register_byte(self, register, value):
self._buffer[0] = register & 0xFF
self._buffer[1] = value & 0xFF
self._i2c.writeto(self.address, self._buffer[0:2])
class LIS3DH_SPI(LIS3DH):
"""Driver for the LIS3DH accelerometer connected over SPI."""
def __init__(self, spi, cs, *, baudrate=100000, int1=None, int2=None):
import adafruit_bus_device.spi_device as spi_device
self._spi = spi_device.SPIDevice(spi, cs, baudrate=baudrate)
self._buffer = bytearray(6)
super().__init__(int1=int1, int2=int2)
def _read_register(self, register, length):
if length == 1:
self._buffer[0] = (register | 0x80) & 0xFF # Read single, bit 7 high.
else:
self._buffer[0] = (register | 0xC0) & 0xFF # Read multiple, bit 6&7 high.
with self._spi as spi:
spi.write(self._buffer, start=0, end=1) # pylint: disable=no-member
spi.readinto(self._buffer, start=0, end=length) # pylint: disable=no-member
return self._buffer
def _write_register_byte(self, register, value):
self._buffer[0] = register & 0x7F # Write, bit 7 low.
self._buffer[1] = value & 0xFF
with self._spi as spi:
spi.write(self._buffer, start=0, end=2) # pylint: disable=no-member | 38.78534 | 126 | 0.628375 |
acfbd233853c5fef7c39ecc9061dfb2c533684dd | 1,445 | py | Python | app.py | sathishmtech01/rasa_chatbot | 7bacb1a2fc941a80929249d920be4b003ea7438b | [
"Apache-2.0"
] | 1 | 2020-09-01T10:21:04.000Z | 2020-09-01T10:21:04.000Z | app.py | sathishmtech01/rasa_chatbot | 7bacb1a2fc941a80929249d920be4b003ea7438b | [
"Apache-2.0"
] | 6 | 2020-06-18T15:01:51.000Z | 2022-02-12T10:18:54.000Z | app.py | sathishmtech01/rasa_chatbot | 7bacb1a2fc941a80929249d920be4b003ea7438b | [
"Apache-2.0"
] | null | null | null | from flask import Flask,request
from flask_restful import Resource, Api
from flask_cors import CORS
import random
app = Flask(__name__)
# api = Api(app)
app = Flask(__name__)
cors = CORS(app, resources={r"*": {"origins": "*"}})
api = Api(app)
class HelloWorld(Resource):
def get(self):
return [{"recipient_id": "default", "text": "A circus clown once bumped into Chuck Norris. It took him only three seconds to twist the clown into an animal balloon."}]
def post(self):
text = ['CSK', 'Boss', 'Sir', 'Bro', 'Man']
image = ["https://i.imgur.com/nGF1K8f.jpg","https://images.unsplash.com/photo-1534188753412-3e26d0d618d6?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1834&q=80",
"https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/cute-baby-animals-1558535060.jpg?crop=1.00xw:0.669xh;0,0.158xh&resize=980:*"
]
print(random.choice(text))
print(Resource)
print(request.data)
request_input=eval(request.data.decode("utf-8"))
print(type(request_input))
print(request_input)
output=[{"recipient_id": request_input["sender"],"text":"hello "+random.choice(text)},{"recipient_id": request_input["sender"],"image": random.choice(image)}]
return output
api.add_resource(HelloWorld, '/webhooks/rest/webhook')
if __name__ == '__main__':
app.run(debug=True,port=5005) | 41.285714 | 212 | 0.675433 |
acfbd2daef8eda9a6f4946da20a53c32ea9f106a | 34,718 | py | Python | tests/pytests/test_json.py | simonprickett/RediSearch | 6f720f2d81d8c29133b9215badadb246c214141f | [
"MIT",
"Ruby",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/pytests/test_json.py | simonprickett/RediSearch | 6f720f2d81d8c29133b9215badadb246c214141f | [
"MIT",
"Ruby",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/pytests/test_json.py | simonprickett/RediSearch | 6f720f2d81d8c29133b9215badadb246c214141f | [
"MIT",
"Ruby",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import bz2
from common import getConnectionByEnv, waitForIndex, toSortedFlatList, slice_at, index_info
from includes import *
UNSTABLE_TESTS = os.getenv('UNSTABLE_TESTS', '0') == '1'
GAMES_JSON = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'games.json.bz2')
doc1_content = r'''{"string": "gotcha1",
"null": null,
"boolT": true,
"boolN": true,
"int": 972,
"flt": 9.72,
"geo": "1.23,4.56",
"obj": {"int": 1, "string": "gotcha6","c": null},
"complex_arr": [42, null, -1.2, false, {"nested_array":["sub", "array", "gotcha2"]}, {"nested_obj": "gotcha3"}, "gotcha4"],
"scalar_arr": [42, null, -1.2, false, "gotcha5"],
"string_arr": ["a", "b", "c", "d", "e", "f", "gotcha6"]
}'''
def testSearchUpdatedContent(env):
conn = getConnectionByEnv(env)
# TODO: test when rejson module is loaded after search
# TODO: test when rejson module is loaded before search
# TODO: test when rejson module is not loaded (fail gracefully with error messages)
# Set a value before index is defined
plain_val_1_raw = r'{"t":"rex","n":12}'
plain_val_1 = '['+plain_val_1_raw+']'
res = conn.execute_command('json.get', 'doc:1', '$')
env.assertEqual(res, None)
conn.execute_command('json.set', 'doc:1', '$', plain_val_1_raw)
res = conn.execute_command('json.get', 'doc:1', '$')
env.assertEqual(json.loads(res), json.loads(plain_val_1))
res = conn.execute_command('json.get', 'doc:1', '.')
env.assertEqual(json.loads(res), json.loads(plain_val_1_raw))
# Index creation
conn.execute_command('FT.CREATE', 'idx1', 'ON', 'JSON', 'SCHEMA', '$.t', 'AS', 'labelT', 'TEXT', '$.n', 'AS',
'labelN', 'NUMERIC')
waitForIndex(env, 'idx1')
# No results before ingestion
env.expect('ft.search', 'idx1', 'rice*').equal([0L])
# Set another value after index was defined
plain_val_2_raw = r'{"t":"riceratops","n":9}'
plain_val_2 = '[' + plain_val_2_raw + ']'
conn.execute_command('json.set', 'doc:2', '$', plain_val_2_raw)
res = conn.execute_command('json.get', 'doc:2', '$')
env.assertEqual(json.loads(res), json.loads(plain_val_2))
res = conn.execute_command('json.get', 'doc:2', '.')
env.assertEqual(json.loads(res), json.loads(plain_val_2_raw))
res = conn.execute_command('json.get', 'doc:2', '$.n')
env.assertEqual(res, '[9]')
res = conn.execute_command('json.get', 'doc:2', '.n')
env.assertEqual(res, '9')
res = conn.execute_command('json.get', 'doc:2', '$.t')
env.assertEqual(res, '["riceratops"]')
res = conn.execute_command('json.get', 'doc:2', '.t')
env.assertEqual(res, '"riceratops"')
# Test updated values are found
expected = [2L, 'doc:1', ['$', json.loads(plain_val_1_raw)], 'doc:2', ['$', json.loads(plain_val_2_raw)]]
res = env.cmd('ft.search', 'idx1', '*')
res[2][1] = json.loads(res[2][1])
res[4][1] = json.loads(res[4][1])
env.assertEqual(res, expected)
expected = [1L, 'doc:1', ['$', json.loads(plain_val_1_raw)]]
res = env.cmd('ft.search', 'idx1', 're*')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, expected)
# TODO: Why does the following result look like that? (1 count and 2 arrays of result pairs)
res = env.execute_command('ft.aggregate', 'idx1', '*', 'LOAD', '1', 'labelT')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, ['labelT', 'rex'], ['labelT', 'riceratops']]))
env.expect('ft.aggregate', 'idx1', 're*', 'LOAD', '1', 'labelT').equal([1L, ['labelT', 'rex']])
res = env.execute_command('ft.aggregate', 'idx1', '*', 'LOAD', '1', 'labelT')
# Update an existing text value
plain_text_val_3_raw = '"hescelosaurus"'
plain_text_val_3 = '[' +plain_text_val_3_raw + ']'
env.expect('json.set', 'doc:1', '$.t', plain_text_val_3_raw).ok()
env.expect('json.get', 'doc:1', '$.t').equal(plain_text_val_3)
# Update an existing int value
plain_int_val_3 = '13'
int_incrby_3 = '2'
plain_int_res_val_3 = str(int(plain_int_val_3) + int(int_incrby_3))
env.expect('json.set', 'doc:1', '$.n', plain_int_val_3).ok()
# test JSON.NUMINCRBY
env.expect('json.numincrby', 'doc:1', '$.n', int_incrby_3).equal(plain_int_res_val_3)
expected = [1L, 'doc:1', ['$', json.loads(r'{"t":"hescelosaurus","n":' + plain_int_res_val_3 + '}')]]
res = env.cmd('ft.search', 'idx1', 'he*')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, expected)
expected = [1L, 'doc:2', ['$', json.loads('{"t":"riceratops","n":9}')]]
res = env.cmd('ft.search', 'idx1', 'riceratops', 'RETURN', '1', '$')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, expected)
env.expect('ft.search', 'idx1', 'riceratops', 'RETURN', '1', '$.n').equal([1L, 'doc:2', ['$.n', '9']])
env.expect('ft.search', 'idx1', 'riceratops', 'RETURN', '1', '$.t').equal([1L, 'doc:2', ['$.t', 'riceratops']])
# FIXME: Test PREFIX, SORTBY, NOSTEM, Fuzzy, Pagination, Limit 0 0, Score - Need to repeat all search testing as done on hash?
# FIXME: Test Aggregate - Need to repeat all aggregate testing as done on hash?
# TODO: Check null values
# TODO: Check arrays
# TODO: Check Object/Map
def testHandleUnindexedTypes(env):
# TODO: Ignore and resume indexing when encountering an Object/Array/null
# TODO: Except for array of only scalars which is defined as a TAG in the schema
# ... FT.CREATE idx SCHEMA $.arr TAG
if not UNSTABLE_TESTS:
env.skip()
env.expect('JSON.SET', 'doc:1', '$', doc1_content).ok()
env.expect('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA',
'$.string', 'AS', 'string', 'TEXT',
'$.null', 'AS', 'nil', 'TEXT',
'$.boolT', 'AS', 'boolT', 'TEXT',
'$.boolN', 'AS', 'boolN', 'NUMERIC',
'$.int', 'AS', 'int', 'NUMERIC',
'$.flt', 'AS', 'flt', 'NUMERIC',
'$.geo', 'AS', 'geo', 'GEO',
'$.obj', 'AS', 'obj', 'TEXT',
'$.complex_arr', 'AS', 'complex_arr', 'TEXT',
'$.scalar_arr', 'AS', 'scalar_arr', 'TAG',
'$.int_arr', 'AS', 'int_arr', 'TAG',
).ok()
waitForIndex(env, 'idx')
# FIXME: Why does the following search return zero results?
env.expect('ft.search', 'idx', '*', 'RETURN', '2', 'string', 'int_arr')\
.equal([1L, 'doc:1', ['string', '"gotcha1"', 'int_arr', ["a", "b", "c", "d", "e", "f", "gotcha6"]]])
# TODO: test TAGVALS ?
pass
def testReturnAllTypes(env):
# Test returning all JSON types
# (even if some of them are not able to be indexed/found,
# they can be returned together with other fields which are indexed)
env.expect('JSON.SET', 'doc:1', '$', doc1_content).ok()
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.string', 'AS', 'string', 'TEXT')
# TODO: Make sure TAG can be used as a label in "FT.SEARCH idx "*" RETURN $.t As Tag"
pass
def testOldJsonPathSyntax(env):
# Make sure root path '.' is working
# For example, '$.t' should also work as '.t' and 't'
pass
def testNoContent(env):
# Test NOCONTENT
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT', '$.flt', 'NUMERIC')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"riceratops","n":"9072","flt":97.2}')
env.expect('ft.search', 'idx', 're*', 'NOCONTENT').equal([0L])
env.expect('ft.search', 'idx', 'ri*', 'NOCONTENT').equal([1L, 'doc:1'])
def testDocNoFullSchema(env):
# Test NOCONTENT
env.cmd('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t1', 'TEXT', '$.t2', 'TEXT')
env.cmd('JSON.SET', 'doc:1', '$', r'{"t1":"riceratops"}')
env.expect('ft.search', 'idx', 're*', 'NOCONTENT').equal([0L])
env.expect('ft.search', 'idx', 'ri*', 'NOCONTENT').equal([1L, 'doc:1'])
def testReturnRoot(env):
# Test NOCONTENT
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"foo"}')
env.expect('ft.search', 'idx', 'foo', 'RETURN', '1', '$').equal([1L, 'doc:1', ['$', '{"t":"foo"}']])
def testNonEnglish(env):
# Test json in non-English languages
env.execute_command('FT.CREATE', 'idx1', 'ON', 'JSON', 'SCHEMA', '$.t', 'AS', 'labelT', 'TEXT', '$.n', 'AS',
'labelN', 'NUMERIC')
japanese_value_1 = 'ドラゴン'
japanese_doc_value_raw = r'{"t":"' + japanese_value_1 + r'","n":5}'
japanese_doc_value = [ json.loads(japanese_doc_value_raw) ]
env.expect('json.set', 'doc:4', '$', japanese_doc_value_raw).ok()
env.assertEqual(json.loads(env.cmd('json.get', 'doc:4', '$')), japanese_doc_value)
env.assertEqual(json.loads(env.cmd('json.get', 'doc:4', '.')), json.loads(japanese_doc_value_raw))
env.expect('json.get', 'doc:4', '$.t').equal('["' + japanese_value_1 + '"]')
env.expect('json.get', 'doc:4', '.t').equal('"' + japanese_value_1 + '"')
chinese_value_1_raw = r'{"t":"踪迹","n":5}'
chinese_value_1 = [ json.loads(chinese_value_1_raw)]
env.expect('json.set', 'doc:5', '$', chinese_value_1_raw).ok()
env.assertEqual(json.loads(env.cmd('json.get', 'doc:5', '$')), chinese_value_1)
env.assertEqual(json.loads(env.cmd('json.get', 'doc:5', '.')), json.loads(chinese_value_1_raw))
env.expect('ft.search', 'idx1', '*', 'RETURN', '3', '$.t', 'AS', 'MyReturnLabel') \
.equal([2L,
'doc:4', ['MyReturnLabel', '\xe3\x83\x89\xe3\x83\xa9\xe3\x82\xb4\xe3\x83\xb3'],
'doc:5', ['MyReturnLabel', '\xe8\xb8\xaa\xe8\xbf\xb9']])
def testSet(env):
# JSON.SET (either set the entire key or a sub-value)
# Can also do multiple changes/side-effects, such as converting an object to a scalar
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"ReJSON"}')
res = [1L, 'doc:1', ['$', '{"t":"ReJSON"}']]
env.expect('ft.search', 'idx', 'rejson').equal(res)
env.expect('ft.search', 'idx', 'ReJSON').equal(res)
env.expect('ft.search', 'idx', 're*').equal(res)
env.expect('ft.search', 'idx', 're*', 'NOCONTENT').equal([1L, 'doc:1'])
def testDel(env):
conn = getConnectionByEnv(env)
# JSON.DEL and JSON.FORGET
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT')
conn.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"ReJSON"}')
conn.execute_command('JSON.SET', 'doc:2', '$', r'{"t":"RediSearch"}')
env.expect('ft.search', 'idx', 're*', 'NOCONTENT').equal([2L, 'doc:1', 'doc:2'])
res = conn.execute_command('JSON.DEL', 'doc:2', '$.t')
env.assertEqual(res, 1L)
env.expect('ft.search', 'idx', 're*', 'NOCONTENT').equal([1L, 'doc:1'])
res = conn.execute_command('JSON.FORGET', 'doc:1', '$.t')
env.assertEqual(res, 1L)
env.expect('ft.search', 'idx', 're*', 'NOCONTENT').equal([0L])
def testToggle(env):
# JSON.TOGGLE
env.expect('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA',
'$.boolT', 'AS', 'boolT', 'TAG').ok()
env.expect('JSON.SET', 'doc:1', '$', r'{"boolT":false}').ok()
env.expect('ft.search', 'idx', '*').equal([1L, 'doc:1', ['$', '{"boolT":false}']])
env.expect('JSON.TOGGLE','doc:1','$.boolT').equal('true')
env.expect('ft.search', 'idx', '*').equal([1L, 'doc:1', ['$', '{"boolT":true}']])
def testStrappend(env):
# JSON.STRAPPEND
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"Redis"}')
env.expect('json.get', 'doc:1', '$').equal('[{"t":"Redis"}]')
env.expect('ft.search', 'idx', '*').equal([1L, 'doc:1', ['$', '{"t":"Redis"}']])
env.expect('ft.search', 'idx', 'Redis').equal([1L, 'doc:1', ['$', '{"t":"Redis"}']])
env.execute_command('JSON.STRAPPEND', 'doc:1', '.t', '"Labs"')
env.expect('json.get', 'doc:1', '$').equal('[{"t":"RedisLabs"}]')
env.expect('ft.search', 'idx', '*').equal([1L, 'doc:1', ['$', '{"t":"RedisLabs"}']])
env.expect('ft.search', 'idx', 'RedisLabs').equal([1L, 'doc:1', ['$', '{"t":"RedisLabs"}']])
env.expect('ft.search', 'idx', 'Redis').equal([0L])
def testArrappend(env):
# JSON.ARRAPPEND
# FIXME: Currently unsupported
pass
def testArrInsert(env):
# JSON.ARRINSERT
# FIXME: Currently unsupported
pass
def testArrpop(env):
# TODO: array cannot be indexed yet
if not UNSTABLE_TESTS:
env.skip()
# JSON.ARRPOP
env.execute_command('FT.CREATE', 'idx1', 'ON', 'JSON', 'SCHEMA', '$.t', 'AS', 'labelT', 'TAG')
env.expect('JSON.SET', 'doc:1', '$', '{"t":["foo", "bar", "back"]}').ok()
# FIXME: Enable the following line: Should we search in array content? Need TAG for that?
#env.expect('FT.SEARCH', 'idx1', 'ba*', 'RETURN', '1', 'labelT').equal([1L, 'doc:1', ['labelT', '"bar"']])
# FIXME: Why aggregate 'ba*' returns zero results?
# env.expect('FT.AGGREGATE', 'idx1', 'ba*', 'LOAD', '3', '@$.t', 'AS', 't').equal([1L, ['t', '["foo","bar","back"]']])
env.expect('FT.SEARCH', 'idx1', '*').equal([1L, 'doc:1', ['$', '{"t":["foo","bar","back"]}']])
env.expect('FT.AGGREGATE', 'idx1', '*', 'LOAD', '3', '@$.t', 'AS', 't').equal([1L, ['t', '["foo","bar","back"]']])
env.expect('JSON.ARRPOP', 'doc:1', '$.t').equal('"back"')
env.expect('FT.SEARCH', 'idx1', '*').equal([1L, 'doc:1', ['$', '{"t":["foo","bar"]}']])
env.expect('JSON.ARRPOP', 'doc:1', '$.t', 0).equal('"foo"')
env.expect('FT.SEARCH', 'idx1', '*').equal([1L, 'doc:1', ['$', '{"t":["bar"]}']])
def testRootValues(env):
# Search all JSON types as a top-level element
# FIXME:
pass
def testArrtrim(env):
# json.arrtrim
# FIXME:
pass
def testAsTag(env):
res = env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON',
'SCHEMA', '$.tag', 'AS', 'tag', 'TAG', 'SEPARATOR', ',')
env.expect('JSON.SET', 'doc:1', '$', '{"tag":"foo,bar,baz"}').ok()
env.expect('JSON.GET', 'doc:1', '$').equal('[{"tag":"foo,bar,baz"}]')
env.expect('JSON.GET', 'doc:1', '$.tag').equal('["foo,bar,baz"]')
res = [1L, 'doc:1', ['$', '{"tag":"foo,bar,baz"}']]
env.expect('FT.SEARCH', 'idx', '@tag:{foo}').equal(res)
env.expect('FT.SEARCH', 'idx', '@tag:{bar}').equal(res)
env.expect('FT.SEARCH', 'idx', '@tag:{baz}').equal(res)
env.expect('FT.SEARCH', 'idx', '@tag:{foo\\,bar\\,baz}').equal([0L])
def testMultiValueTag(env):
conn = getConnectionByEnv(env)
# Index with Tag for array with multi-values
res = env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON',
'SCHEMA', '$.tag[*]', 'AS', 'tag', 'TAG', 'SEPARATOR', ',')
# multivalue without a separator
#
env.assertOk(conn.execute_command('JSON.SET', 'doc:1', '$', '{"tag":["foo", "bar", "baz"]}'))
env.assertOk(conn.execute_command('JSON.SET', 'doc:2', '$', '{"tag":["foo, bar", "baz"]}'))
env.assertOk(conn.execute_command('JSON.SET', 'doc:3', '$', '{"tag":["foo, bar, baz"]}'))
env.assertEqual(conn.execute_command('JSON.GET', 'doc:1', '$'), '[{"tag":["foo","bar","baz"]}]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:1', '$.tag'), '[["foo","bar","baz"]]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:1', '$.tag[*]'), '["foo","bar","baz"]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:2', '$'), '[{"tag":["foo, bar","baz"]}]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:2', '$.tag'), '[["foo, bar","baz"]]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:2', '$.tag[*]'), '["foo, bar","baz"]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:3', '$'), '[{"tag":["foo, bar, baz"]}]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:3', '$.tag'), '[["foo, bar, baz"]]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:3', '$.tag[*]'), '["foo, bar, baz"]')
res = [3L, 'doc:1', ['$', '{"tag":["foo","bar","baz"]}'],
'doc:2', ['$', '{"tag":["foo, bar","baz"]}'],
'doc:3', ['$', '{"tag":["foo, bar, baz"]}']]
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@tag:{foo}'), res)
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@tag:{bar}'), res)
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@tag:{baz}'), res)
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@tag:{foo/,bar/,baz}'), [0L])
def testMultiValueTag_Recursive_Decent(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'ON', 'JSON',
'SCHEMA', '$..name', 'AS', 'name', 'TAG')
conn.execute_command('JSON.SET', 'doc:1', '$', '{"name":"foo", "in" : {"name":"bar"}}')
res = [1L, 'doc:1', ['$', '{"name":"foo","in":{"name":"bar"}}']]
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@name:{foo}'), res)
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@name:{bar}'), res)
def testMultiValueErrors(env):
# Index with Tag for array with multi-values
env.execute_command('FT.CREATE', 'idxtext', 'ON', 'JSON',
'SCHEMA', '$.text', 'AS', 'text', 'TEXT')
env.execute_command('FT.CREATE', 'idxnum', 'ON', 'JSON',
'SCHEMA', '$.num', 'AS', 'num', 'NUMERIC')
env.execute_command('FT.CREATE', 'idxgeo', 'ON', 'JSON',
'SCHEMA', '$.geo', 'AS', 'geo', 'GEO')
env.expect('JSON.SET', 'doc:1', '$', '{"text":["foo, bar","baz"], \
"num":[1,2,3,3.14], \
"geo":["1.234, 4.321", "0.123, 3.210"]}').ok()
# test non-tag indexes fail to index multivalue
indexes = ['idxtext', 'idxnum', 'idxgeo']
for index in indexes:
res_actual = env.cmd('FT.INFO', index)
res_actual = {res_actual[i]: res_actual[i + 1] for i in range(0, len(res_actual), 2)}
env.assertEqual(str(res_actual['hash_indexing_failures']), '1')
def add_values(env, number_of_iterations=1):
res = env.execute_command('FT.CREATE', 'games', 'ON', 'JSON',
'SCHEMA', '$.title', 'TEXT', 'SORTABLE',
'$.brand', 'TEXT', 'NOSTEM', 'SORTABLE',
) # ,'$.description', 'AS', 'description', 'TEXT', 'price', 'NUMERIC',
# 'categories', 'TAG')
conn = getConnectionByEnv(env)
for i in range(number_of_iterations):
fp = bz2.BZ2File(GAMES_JSON, 'r')
for line in fp:
obj = json.loads(line)
id = obj['asin'] + (str(i) if i > 0 else '')
del obj['asin']
b = obj.get('brand')
obj['brand'] = str(b) if b else ""
# FIXME: When NUMERIC is restored, restore 'price'
del obj['price']
# obj['price'] = obj.get('price') or 0
str_val = json.dumps(obj)
cmd = ['JSON.SET', id, '$', str_val]
conn.execute_command(*cmd)
fp.close()
def testAggregate(env):
add_values(env)
cmd = ['ft.aggregate', 'games', '*',
'GROUPBY', '1', '@$.brand',
'REDUCE', 'count', '0', 'AS', 'count',
'SORTBY', 2, '@count', 'desc',
'LIMIT', '0', '5'
]
env.expect(*cmd).equal([292L, ['$.brand', '', 'count', '1518'],
['$.brand', 'mad catz', 'count', '43'],
['$.brand', 'generic', 'count', '40'],
['$.brand', 'steelseries', 'count', '37'],
['$.brand', 'logitech', 'count', '35']])
# FIXME: Test FT.AGGREGATE params - or alternatively reuse test_aggregate.py to also run on json content
def testDemo(env):
conn = getConnectionByEnv(env)
# Set a value before index is defined
tlv = r'{"iata":"TLV","name":"Ben Gurion International Airport","location":"34.8866997,32.01139832"}'
sfo = r'{"iata":"SFO","name":"San Francisco International Airport","location":"-122.375,37.6189995"}'
tlv_doc = [1L, 'A:TLV', ['$', json.loads(tlv)]]
sfo_doc = [1L, 'A:SFO', ['$', json.loads(sfo)]]
conn.execute_command('json.set', 'A:TLV', '$', tlv)
conn.execute_command('json.set', 'A:SFO', '$', sfo)
env.expect('FT.CREATE airports ON JSON SCHEMA $.iata AS iata TAG \
$.iata AS iata_txt TEXT NOSTEM \
$.name AS name TEXT NOSTEM PHONETIC dm:en \
$.location AS location GEO').ok()
conn.execute_command('json.set', 'A:TLV', '$', tlv)
conn.execute_command('json.set', 'A:SFO', '$', sfo)
info = env.cmd('FT.INFO airports')
env.assertEqual(slice_at(info, 'index_name')[0], 'airports')
env.assertEqual(slice_at(slice_at(info, 'index_definition')[0], 'key_type')[0], 'JSON')
env.assertEqual(slice_at(info, 'attributes')[0],
[['identifier', '$.iata', 'attribute', 'iata', 'type', 'TAG', 'SEPARATOR', ''],
['identifier', '$.iata', 'attribute', 'iata_txt', 'type', 'TEXT', 'WEIGHT', '1', 'NOSTEM'],
['identifier', '$.name', 'attribute', 'name', 'type', 'TEXT', 'WEIGHT', '1', 'NOSTEM'],
['identifier', '$.location', 'attribute', 'location', 'type', 'GEO']])
env.assertEqual(int(slice_at(info, 'num_docs')[0]), 2)
res = env.cmd('FT.SEARCH', 'airports', 'TLV')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, tlv_doc)
res = env.cmd('FT.SEARCH', 'airports', 'TL*')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, tlv_doc)
res = env.cmd('FT.SEARCH', 'airports', 'sen frensysclo')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, sfo_doc)
res = env.cmd('FT.SEARCH', 'airports', '@location:[-122.41 37.77 100 km]')
res[2][1] = json.loads(res[2][1])
env.assertEqual(res, sfo_doc)
env.expect('FT.SEARCH', 'airports', 'sfo', 'RETURN', '1', '$.name') \
.equal([1L, 'A:SFO', ['$.name', 'San Francisco International Airport']])
def testIndexSeparation(env):
# Test results from different indexes do not mix (either JSON with JSON and JSON with HASH)
env.expect('HSET', 'hash:1', 't', 'telmatosaurus', 'n', '9', 'f', '9.72').equal(3)
env.execute_command('FT.CREATE', 'idxHash', 'ON', 'HASH', 'SCHEMA', 't', 'TEXT', 'n', 'NUMERIC', 'f', 'NUMERIC')
waitForIndex(env, 'idxHash')
env.execute_command('FT.CREATE', 'idxJson', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT', '$.flt', 'NUMERIC')
waitForIndex(env, 'idxJson')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"riceratops","t2":"telmatosaurus","n":9072,"flt":97.2}')
env.execute_command('FT.CREATE', 'idxJson2', 'ON', 'JSON', 'SCHEMA', '$.t2', 'TEXT', '$.flt', 'NUMERIC')
waitForIndex(env, 'idxJson2')
# FIXME: Probably a bug where HASH key is found when searching a JSON index
env.expect('FT.SEARCH', 'idxJson', '*', 'RETURN', '3', '$.t', 'AS', 'txt').equal(
[1L, 'doc:1', ['txt', 'riceratops']])
env.expect('FT.SEARCH', 'idxJson2', '*', 'RETURN', '3', '$.t2', 'AS', 'txt').equal(
[1L, 'doc:1', ['txt', 'telmatosaurus']])
env.expect('FT.SEARCH', 'idxHash', '*', 'RETURN', '3', 't', 'AS', 'txt').equal(
[1L, 'hash:1', ['txt', 'telmatosaurus']])
def testMapProjectionAsToSchemaAs(env):
# Test that label defined in the schema can be used in the search query
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'AS', 'labelT', 'TEXT', '$.flt', 'AS',
'labelFlt', 'NUMERIC')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"riceratops","n":"9072","flt":97.2}')
env.expect('FT.SEARCH', 'idx', '*', 'RETURN', '1', 'labelT').equal(
[1L, 'doc:1', ['labelT', 'riceratops']]) # use $.t value
def testAsProjection(env):
# Test RETURN and LOAD with label/alias from schema
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT', '$.flt', 'NUMERIC')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"riceratops","n":"9072","flt":97.2, "sub":{"t":"rex"}}')
# Test RETURN with label from schema
env.expect('FT.SEARCH', 'idx', '*', 'RETURN', '3', '$.t', 'AS', 'txt').equal([1L, 'doc:1', ['txt', 'riceratops']])
# Test LOAD with label from schema
env.expect('FT.AGGREGATE', 'idx', '*', 'LOAD', '3', '@$.t', 'AS', 'txt').equal([1L, ['txt', 'riceratops']])
# Test RETURN with label not from schema
env.expect('FT.SEARCH', 'idx', '*', 'RETURN', '3', '$.n', 'AS', 'num').equal([1L, 'doc:1', ['num', '9072']])
# FIXME:: enable next line - why not found?
#env.expect('FT.SEARCH', 'idx', '907*', 'RETURN', '3', '$.n', 'AS', 'num').equal([1L, 'doc:1', ['num', '"9072"']])
# Test LOAD with label not from schema
env.expect('FT.AGGREGATE', 'idx', '*', 'LOAD', '6', '@$.n', 'AS', 'num', '$.sub.t', 'AS', 'subt').equal(
[1L, ['num', '9072', 'subt', 'rex']])
# FIXME:: enable next line - why not found?
# env.expect('FT.AGGREGATE', 'idx', '907*', 'LOAD', '3', '@$.n', 'AS', 'num').equal([1L, ['num', '"9072"']])
# TODO: Search for numeric field 'flt'
def testAsProjectionRedefinedLabel(env):
conn = getConnectionByEnv(env)
# Test redefining projection 'AS' label in query params RETURN and LOAD
# FIXME: Should we fail SEARCH/AGGREGATE command with RETURN/LOAD alias duplication
# (as with FT.CREATE)
# BTW, iN SQLite, it is allowed, e.g., SELECT F1 AS Label1, F2 AS Label1 FROM doc;
# (different values for fields F1 and F2 were retrieved with the same label Label1)
# FIXME: Handle Numeric - In the following line, change '$.n' to: 'AS', 'labelN', 'NUMERIC'
env.execute_command('FT.CREATE', 'idx2', 'ON', 'JSON', 'SCHEMA',
'$.t', 'AS', 'labelT', 'TEXT', '$.n', 'AS', 'labelN', 'TEXT')
conn.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"riceratops","n":"9072"}')
# Allow redefining a new label for a field which has a label in the schema
env.expect('ft.search', 'idx2', '*', 'RETURN', '3', '$.t', 'AS', 'MyOnTheFlyReturnLabel').equal(
[1L, 'doc:1', ['MyOnTheFlyReturnLabel', 'riceratops']])
env.expect('ft.aggregate', 'idx2', '*', 'LOAD', '3', '@$.t', 'AS', 'MyOnTheFlyReturnLabel').equal(
[1L, ['MyOnTheFlyReturnLabel', 'riceratops']])
# Allow redefining a label with existing label found in another field in the schema
env.expect('ft.search', 'idx2', '*', 'RETURN', '3', '$.t', 'AS', 'labelN').equal(
[1L, 'doc:1', ['labelN', 'riceratops']])
env.expect('ft.aggregate', 'idx2', '*', 'LOAD', '3', '@$.t', 'AS', 'labelN').equal(
[1L, ['labelN', 'riceratops']])
# (?) Allow redefining a label with existing label found in another field in the schema,
# together with just a label from the schema
env.expect('ft.search', 'idx2', '*', 'RETURN', '4', '$.n', 'AS', 'labelT', 'labelT').equal(
[1L, 'doc:1', ['labelT', '9072']])
# TODO: re-enable this
if UNSTABLE_TESTS:
env.expect('ft.aggregate', 'idx2', '*', 'LOAD', '4', '@$.n', 'AS', 'labelT', 'labelT').equal(
[1L, ['labelT', '"9072"', 'labelT', 'riceratops']])
env.expect('ft.search', 'idx2', '*', 'RETURN', '4', '$.n', 'AS', 'labelT', 'labelN').equal(
[1L, 'doc:1', ['labelT', '9072', 'labelN', '9072']])
env.expect('ft.aggregate', 'idx2', '*', 'LOAD', '4', '@$.n', 'AS', 'labelT', 'labelN').equal(
[1L, ['labelT', '9072', 'labelN', '9072']])
def testNumeric(env):
conn = getConnectionByEnv(env)
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$.n', 'AS', 'n', 'NUMERIC', "$.f", 'AS', 'f', 'NUMERIC')
conn.execute_command('JSON.SET', 'doc:1', '$', r'{"n":9, "f":9.72}')
env.expect('FT.SEARCH', 'idx', '*', 'RETURN', '3', '$.n', 'AS', 'int').equal([1L, 'doc:1', ['int', '9']])
env.expect('FT.SEARCH', 'idx', '@n:[0 10]', 'RETURN', '3', '$.n', 'AS', 'int').equal([1L, 'doc:1', ['int', '9']])
env.expect('FT.SEARCH', 'idx', '@f:[9.5 9.9]', 'RETURN', '1', 'f') \
.equal([1L, 'doc:1', ['f', '9.72']])
env.expect('FT.SEARCH', 'idx', '@f:[9.5 9.9]', 'RETURN', '3', '$.f', 'AS', 'flt') \
.equal([1L, 'doc:1', ['flt', '9.72']])
def testLanguage(env):
if not UNSTABLE_TESTS:
env.skip()
# TODO: Check stemming? e.g., trad is stem of traduzioni and tradurre ?
env.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'LANGUAGE_FIELD', '$.lang', 'SCHEMA', '$.t', 'TEXT')
env.execute_command('FT.CREATE', 'idx2', 'ON', 'JSON', 'LANGUAGE', 'Italian', 'SCHEMA', '$.domanda', 'TEXT')
env.execute_command('JSON.SET', 'doc:1', '$', r'{"t":"traduzioni", "lang":"Italian"}')
env.expect('ft.search', 'idx', 'tradu*', 'RETURN', '1', '$.t' ).equal([1L, 'doc:1', ['$.t', '"traduzioni"']])
env.execute_command('JSON.SET', 'doc:2', '$', r'{"domanda":"perché"}')
env.expect('ft.search', 'idx2', 'per*', 'RETURN', '1', '$.domanda' ).equal([1L, 'doc:2', ['$.domanda', '"perch\xc3\xa9"']])
def testDifferentType(env):
conn = getConnectionByEnv(env)
env.execute_command('FT.CREATE', 'hidx', 'ON', 'HASH', 'SCHEMA', '$.t', 'TEXT')
env.execute_command('FT.CREATE', 'jidx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT')
conn.execute_command('HSET', 'doc:1', '$.t', 'hello world')
conn.execute_command('JSON.SET', 'doc:2', '$', r'{"t":"hello world"}')
env.expect('FT.SEARCH', 'hidx', '*', 'NOCONTENT').equal([1L, 'doc:1'])
env.expect('FT.SEARCH', 'jidx', '*', 'NOCONTENT').equal([1L, 'doc:2'])
def test_WrongJsonType(env):
# test all possible errors in processing a field
# we test that all documents failed to index
conn = getConnectionByEnv(env)
wrong_types = ['object', 'array', 'null']
conn.execute_command('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA',
'$.object1', 'TEXT',
'$.object2', 'TAG',
'$.object3', 'NUMERIC',
'$.object4', 'GEO',
'$.array1', 'TEXT',
'$.array2', 'NUMERIC',
'$.array3', 'GEO',
'$.numeric1', 'TEXT',
'$.numeric2', 'TAG',
'$.numeric3', 'GEO',
'$.bool1', 'TEXT',
'$.bool2', 'NUMERIC',
'$.bool3', 'GEO',
'$.geo1', 'NUMERIC',
'$.text1', 'NUMERIC',
'$.text2', 'GEO',
'$.null1', 'TEXT',
'$.null2', 'TAG',
'$.null3', 'NUMERIC',
'$.null4', 'GEO')
env.expect('JSON.SET', 'doc', '$', '{"object1":{"1":"foo", "2":"bar"}}').ok()
env.expect('JSON.SET', 'doc', '$', '{"object2":{"1":"foo", "2":"bar"}}').ok()
env.expect('JSON.SET', 'doc', '$', '{"object3":{"1":"foo", "2":"bar"}}').ok()
env.expect('JSON.SET', 'doc', '$', '{"object4":{"1":"foo", "2":"bar"}}').ok()
env.expect('JSON.SET', 'doc', '$', '{"array1":["foo", "bar"]}').ok()
env.expect('JSON.SET', 'doc', '$', '{"array2":["foo", "bar"]}').ok()
env.expect('JSON.SET', 'doc', '$', '{"array3":["foo", "bar"]}').ok()
env.expect('JSON.SET', 'doc', '$', '{"numeric1":3.141}').ok()
env.expect('JSON.SET', 'doc', '$', '{"numeric2":3.141}').ok()
env.expect('JSON.SET', 'doc', '$', '{"numeric3":3.141}').ok()
env.expect('JSON.SET', 'doc', '$', '{"bool1":true}').ok()
env.expect('JSON.SET', 'doc', '$', '{"bool2":true}').ok()
env.expect('JSON.SET', 'doc', '$', '{"bool3":true}').ok()
env.expect('JSON.SET', 'doc', '$', '{"geo1":"1.23,2.34"}').ok()
env.expect('JSON.SET', 'doc', '$', '{"text1":"foo"}').ok()
env.expect('JSON.SET', 'doc', '$', '{"text2":"foo"}').ok()
env.expect('JSON.SET', 'doc', '$', '{"null1":null}').ok()
env.expect('JSON.SET', 'doc', '$', '{"null2":null}').ok()
env.expect('JSON.SET', 'doc', '$', '{"null3":null}').ok()
env.expect('JSON.SET', 'doc', '$', '{"null4":null}').ok()
# no field was indexed
env.expect('FT.SEARCH', 'idx', '*').equal([0L])
# check indexing failed on all field in schema
res = index_info(env, 'idx')
env.assertEqual(int(res['hash_indexing_failures']), len(res['attributes']))
def testTagNoSeparetor(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA',
'$.tag1', 'AS', 'tag_list', 'TAG',
'$.tag2[*]', 'AS', 'tag_array', 'TAG')
env.assertOk(conn.execute_command('JSON.SET', 'doc:1', '$', '{"tag1":"foo,bar,baz"}'))
env.assertOk(conn.execute_command('JSON.SET', 'doc:2', '$', '{"tag2":["foo","bar,baz"]}'))
env.assertEqual(conn.execute_command('JSON.GET', 'doc:1', '$'), '[{"tag1":"foo,bar,baz"}]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:1', '$.tag1'), '["foo,bar,baz"]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:2', '$'), '[{"tag2":["foo","bar,baz"]}]')
env.assertEqual(conn.execute_command('JSON.GET', 'doc:2', '$.tag2[*]'), '["foo","bar,baz"]')
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@tag_list:{foo\\,bar\\,baz}'), [1L, 'doc:1', ['$', '{"tag1":"foo,bar,baz"}']])
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx', '@tag_array:{bar\\,baz}'), [1L, 'doc:2', ['$', '{"tag2":["foo","bar,baz"]}']])
def testMixedTagError(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE', 'idx1', 'ON', 'JSON', 'SCHEMA', '$.tag[*]', 'AS', 'tag', 'TAG')
#field has a combination of a single tag, array and object
env.assertOk(conn.execute_command('JSON.SET', 'doc1', '$', '{"tag":["good result", \
["bad result"], \
{"another":"bad result"}]}'))
env.assertEqual(conn.execute_command('FT.SEARCH', 'idx1', '*'), [0L])
def testSortableTagError(env):
env.expect('FT.CREATE', 'idx1', 'ON', 'JSON', \
'SCHEMA', '$.tag[*]', 'AS', 'idxtag', 'TAG', 'SORTABLE').error() \
.contains('On JSON, cannot set tag field to sortable - idxtag')
def testNotExistField(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx1', 'ON', 'JSON', 'SCHEMA', '$.t', 'AS', 't', 'TEXT')
conn.execute_command('JSON.SET', 'doc1', '$', '{"t":"foo"}')
env.expect('FT.SEARCH', 'idx1', '*', 'RETURN', 1, 'name').equal([1L, 'doc1', []])
| 48.152566 | 140 | 0.542888 |
acfbd300e29dc0d2bdf028098373c43cb3dc8d34 | 2,490 | py | Python | code-for-fetching-data/PUBLIC_TRADINGIS_DATA-monir.py | mzkhan2000/AEMO-data-Analytics | 94c2906d8af699b55e95744656841c79fd019f77 | [
"MIT"
] | 1 | 2021-07-15T00:28:23.000Z | 2021-07-15T00:28:23.000Z | code-for-fetching-data/PUBLIC_TRADINGIS_DATA-monir.py | mzkhan2000/AEMO-data-Analytics | 94c2906d8af699b55e95744656841c79fd019f77 | [
"MIT"
] | null | null | null | code-for-fetching-data/PUBLIC_TRADINGIS_DATA-monir.py | mzkhan2000/AEMO-data-Analytics | 94c2906d8af699b55e95744656841c79fd019f77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[36]:
# import necessary libraries - Monir
import pandas as pd
import os
import glob
import numpy as np
from csv import reader
# In[37]:
# assign dataset names - Monir
PUBLIC_TRADINGIS_list_of_files = []
#read all dataset names with starting PUBLIC_DISPATCHSCADA - Monir
PUBLIC_TRADINGIS_list_of_files = glob.glob('PUBLIC_TRADINGIS*.csv')
# In[38]:
len(PUBLIC_TRADINGIS_list_of_files)
# In[39]:
# create empty list
dataframes_list = []
list_of_names = PUBLIC_TRADINGIS_list_of_files
# In[40]:
# append datasets into teh list
for i in range(len(list_of_names)):
# read csv file as a list of lists
with open(list_of_names[i], 'r') as read_obj:
# pass the file object to reader() to get the reader object
csv_reader = reader(read_obj)
# Pass reader object to list() to get a list of lists
list_of_rows = list(csv_reader)
#print(list_of_rows)
#temp_df = pd.DataFrame(list_of_rows)
temp_df = pd.DataFrame(list_of_rows[9:14])
dataframes_list.append(temp_df)
list_of_column = list_of_rows[8]
#temp_df = pd.read_csv(list_of_names[i], skiprows = 1, skipfooter = 1)
#dataframes_list[i]=temp_df
#dataframes_list.append(temp_df)
# In[42]:
len(dataframes_list)
# In[41]:
list_of_column
# In[43]:
dataframes_list[0].shape
# In[44]:
dataframes_list[1673].tail()
# In[45]:
# multiple DataFrames are be merged (Concatenate pandas objects) - Monir
PUBLIC_TRADINGIS_df = pd.concat(dataframes_list)
# In[46]:
PUBLIC_TRADINGIS_df.shape
# In[47]:
PUBLIC_TRADINGIS_df
# In[48]:
PUBLIC_TRADINGIS_df.columns = list_of_column
# In[49]:
PUBLIC_TRADINGIS_df.head()
# In[21]:
with open('PUBLIC_TRADINGIS_202104180030_0000000340056853.csv', 'r') as read_obj:
# pass the file object to reader() to get the reader object
csv_reader = reader(read_obj)
# Pass reader object to list() to get a list of lists
list_of_rows = list(csv_reader)
#print(list_of_rows)
#temp_df = pd.DataFrame(list_of_rows)
test_df = pd.DataFrame(list_of_rows[8:14])
#temp_df = pd.DataFrame(list_of_rows[8:14])
# In[22]:
test_df.head()
# In[30]:
PUBLIC_TRADINGIS_df.dtypes
# In[31]:
PUBLIC_TRADINGIS_df.info()
# In[50]:
PUBLIC_TRADINGIS_df
# In[51]:
# Export Pandas DataFrame to CSV - Monir
PUBLIC_TRADINGIS_df.to_csv('PUBLIC_TRADINGIS_df.csv', index=False)
# In[ ]:
| 14.647059 | 81 | 0.690763 |
acfbd375cda8f6e6faee36700a8e0554722befd1 | 1,073 | py | Python | backend/core/cdn.py | GLY0826/flask-bigger4edu | 663a4dedb39e2abb12e9fe98ed8eb5d1314fe413 | [
"MIT"
] | 29 | 2018-11-13T09:03:29.000Z | 2021-11-07T20:20:38.000Z | backend/core/cdn.py | GLY0826/flask-bigger4edu | 663a4dedb39e2abb12e9fe98ed8eb5d1314fe413 | [
"MIT"
] | null | null | null | backend/core/cdn.py | GLY0826/flask-bigger4edu | 663a4dedb39e2abb12e9fe98ed8eb5d1314fe413 | [
"MIT"
] | 21 | 2018-11-14T01:11:24.000Z | 2021-12-08T09:20:30.000Z | # -*- coding: utf-8 -*-
'''后端主要业务文件'''
import os
from flask import url_for
from ..utils import (
set_query_parameter,
random_uuid
)
def with_cdn_setting(app):
'''cdn设置'''
@app.template_global('static_url')
def static_url(filename):
use_cdn = app.config['USE_CDN']
url = ''
is_cdn = False
if filename in app.config['CDN_LIST']:
cdn_item = app.config['CDN_LIST'][filename]
if use_cdn and 'cdn' in cdn_item:
url = cdn_item['cdn']
is_cdn = True
else:
url = url_for('static', filename=cdn_item['local'])
else:
url = url_for('static', filename=filename)
'''
# 使用第三方库furl添加查询参数
from furl import furl
url = furl(url).add({'_v': app.site_version}).url
'''
# 使用标准库构造的方法添加查询参数
url = set_query_parameter(url, '_v', app.site_version)
if app.debug and not is_cdn:
url = set_query_parameter(url, '_t', random_uuid())
return url
return app | 26.170732 | 67 | 0.556384 |
acfbd44c6321f0669f5b32d84ea6bbddbfb3db17 | 8,668 | py | Python | pl_bolts/datamodules/sklearn_datamodule.py | norabelrose/lightning-bolts | cb83c2e93fc7860474b9e8d216fac450772fb847 | [
"Apache-2.0"
] | 2 | 2021-06-25T18:10:22.000Z | 2021-12-29T23:17:31.000Z | pl_bolts/datamodules/sklearn_datamodule.py | norabelrose/lightning-bolts | cb83c2e93fc7860474b9e8d216fac450772fb847 | [
"Apache-2.0"
] | null | null | null | pl_bolts/datamodules/sklearn_datamodule.py | norabelrose/lightning-bolts | cb83c2e93fc7860474b9e8d216fac450772fb847 | [
"Apache-2.0"
] | null | null | null | import math
from typing import Any, Tuple
import numpy as np
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from pl_bolts.utils import _SKLEARN_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _SKLEARN_AVAILABLE:
from sklearn.utils import shuffle as sk_shuffle
else: # pragma: no cover
warn_missing_pkg("sklearn")
class SklearnDataset(Dataset):
"""
Mapping between numpy (or sklearn) datasets to PyTorch datasets.
Example:
>>> from sklearn.datasets import load_diabetes
>>> from pl_bolts.datamodules import SklearnDataset
...
>>> X, y = load_diabetes(return_X_y=True)
>>> dataset = SklearnDataset(X, y)
>>> len(dataset)
442
"""
def __init__(self, X: np.ndarray, y: np.ndarray, X_transform: Any = None, y_transform: Any = None) -> None:
"""
Args:
X: Numpy ndarray
y: Numpy ndarray
X_transform: Any transform that works with Numpy arrays
y_transform: Any transform that works with Numpy arrays
"""
super().__init__()
self.X = X
self.Y = y
self.X_transform = X_transform
self.y_transform = y_transform
def __len__(self) -> int:
return len(self.X)
def __getitem__(self, idx) -> Tuple[np.ndarray, np.ndarray]:
x = self.X[idx].astype(np.float32)
y = self.Y[idx]
# Do not convert integer to float for classification data
if not ((y.dtype == np.int32) or (y.dtype == np.int64)):
y = y.astype(np.float32)
if self.X_transform:
x = self.X_transform(x)
if self.y_transform:
y = self.y_transform(y)
return x, y
class TensorDataset(Dataset):
"""
Prepare PyTorch tensor dataset for data loaders.
Example:
>>> from pl_bolts.datamodules import TensorDataset
...
>>> X = torch.rand(10, 3)
>>> y = torch.rand(10)
>>> dataset = TensorDataset(X, y)
>>> len(dataset)
10
"""
def __init__(self, X: torch.Tensor, y: torch.Tensor, X_transform: Any = None, y_transform: Any = None) -> None:
"""
Args:
X: PyTorch tensor
y: PyTorch tensor
X_transform: Any transform that works with PyTorch tensors
y_transform: Any transform that works with PyTorch tensors
"""
super().__init__()
self.X = X
self.Y = y
self.X_transform = X_transform
self.y_transform = y_transform
def __len__(self) -> int:
return len(self.X)
def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.X[idx].float()
y = self.Y[idx]
if self.X_transform:
x = self.X_transform(x)
if self.y_transform:
y = self.y_transform(y)
return x, y
class SklearnDataModule(LightningDataModule):
"""
Automatically generates the train, validation and test splits for a Numpy dataset. They are set up as
dataloaders for convenience. Optionally, you can pass in your own validation and test splits.
Example:
>>> from sklearn.datasets import load_diabetes
>>> from pl_bolts.datamodules import SklearnDataModule
...
>>> X, y = load_diabetes(return_X_y=True)
>>> loaders = SklearnDataModule(X, y, batch_size=32)
...
>>> # train set
>>> train_loader = loaders.train_dataloader()
>>> len(train_loader.dataset)
310
>>> len(train_loader)
10
>>> # validation set
>>> val_loader = loaders.val_dataloader()
>>> len(val_loader.dataset)
88
>>> len(val_loader)
3
>>> # test set
>>> test_loader = loaders.test_dataloader()
>>> len(test_loader.dataset)
44
>>> len(test_loader)
2
"""
name = 'sklearn'
def __init__(
self,
X,
y,
x_val=None,
y_val=None,
x_test=None,
y_test=None,
val_split=0.2,
test_split=0.1,
num_workers=2,
random_state=1234,
shuffle=True,
batch_size: int = 16,
pin_memory=False,
drop_last=False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.num_workers = num_workers
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
# shuffle x and y
if shuffle and _SKLEARN_AVAILABLE:
X, y = sk_shuffle(X, y, random_state=random_state)
elif shuffle and not _SKLEARN_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
'You want to use shuffle function from `scikit-learn` which is not installed yet.'
)
val_split = 0 if x_val is not None or y_val is not None else val_split
test_split = 0 if x_test is not None or y_test is not None else test_split
hold_out_split = val_split + test_split
if hold_out_split > 0:
val_split = val_split / hold_out_split
hold_out_size = math.floor(len(X) * hold_out_split)
x_holdout, y_holdout = X[:hold_out_size], y[:hold_out_size]
test_i_start = int(val_split * hold_out_size)
x_val_hold_out, y_val_holdout = x_holdout[:test_i_start], y_holdout[:test_i_start]
x_test_hold_out, y_test_holdout = x_holdout[test_i_start:], y_holdout[test_i_start:]
X, y = X[hold_out_size:], y[hold_out_size:]
# if don't have x_val and y_val create split from X
if x_val is None and y_val is None and val_split > 0:
x_val, y_val = x_val_hold_out, y_val_holdout
# if don't have x_test, y_test create split from X
if x_test is None and y_test is None and test_split > 0:
x_test, y_test = x_test_hold_out, y_test_holdout
self._init_datasets(X, y, x_val, y_val, x_test, y_test)
def _init_datasets(
self, X: np.ndarray, y: np.ndarray, x_val: np.ndarray, y_val: np.ndarray, x_test: np.ndarray, y_test: np.ndarray
) -> None:
self.train_dataset = SklearnDataset(X, y)
self.val_dataset = SklearnDataset(x_val, y_val)
self.test_dataset = SklearnDataset(x_test, y_test)
def train_dataloader(self) -> DataLoader:
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def val_dataloader(self) -> DataLoader:
loader = DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def test_dataloader(self) -> DataLoader:
loader = DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
# TODO: this seems to be wrong, something missing here, another inherit class?
# class TensorDataModule(SklearnDataModule):
# """
# Automatically generates the train, validation and test splits for a PyTorch tensor dataset. They are set up as
# dataloaders for convenience. Optionally, you can pass in your own validation and test splits.
#
# Example:
#
# >>> from pl_bolts.datamodules import TensorDataModule
# >>> import torch
# ...
# >>> # create dataset
# >>> X = torch.rand(100, 3)
# >>> y = torch.rand(100)
# >>> loaders = TensorDataModule(X, y)
# ...
# >>> # train set
# >>> train_loader = loaders.train_dataloader(batch_size=10)
# >>> len(train_loader.dataset)
# 70
# >>> len(train_loader)
# 7
# >>> # validation set
# >>> val_loader = loaders.val_dataloader(batch_size=10)
# >>> len(val_loader.dataset)
# 20
# >>> len(val_loader)
# 2
# >>> # test set
# >>> test_loader = loaders.test_dataloader(batch_size=10)
# >>> len(test_loader.dataset)
# 10
# >>> len(test_loader)
# 1
# """
| 31.0681 | 120 | 0.590909 |
acfbd51ad81a9b310e22faece2eb02294a09ed39 | 3,044 | py | Python | src/abaqus/Material/Plastic/DruckerPrager/ModifiedCap/CapCreepCohesion.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Material/Plastic/DruckerPrager/ModifiedCap/CapCreepCohesion.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Material/Plastic/DruckerPrager/ModifiedCap/CapCreepCohesion.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from abaqusConstants import *
class CapCreepCohesion:
"""The CapCreepCohesion object specifies a cap creep model and material properties.
Notes
-----
This object can be accessed by:
.. code-block:: python
import material
mdb.models[name].materials[name].capPlasticity.capCreepCohesion
import odbMaterial
session.odbs[name].materials[name].capPlasticity.capCreepCohesion
The table data for this object are:
- If *law*=STRAIN or *law*=TIME, the table data specify the following:
- A.
- n.
- m.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *law*=SINGHM, the table data specify the following:
- A.
- α.
- m.
- t1.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *law*=POWER_LAW or *law*=TIME_POWER_LAW, the table data specify the following:
- q0.
- n.
- m.
- ε0.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
The corresponding analysis keywords are:
- CAP CREEP
"""
def __init__(self, table: tuple, law: SymbolicConstant = STRAIN, temperatureDependency: Boolean = OFF,
dependencies: int = 0, time: SymbolicConstant = TOTAL):
"""This method creates a CapCreepCohesion object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].materials[name].capPlasticity.CapCreepCohesion
session.odbs[name].materials[name].capPlasticity.CapCreepCohesion
Parameters
----------
table
A sequence of sequences of Floats specifying the items described below.
law
A SymbolicConstant specifying the cap creep law. Possible values are STRAIN, TIME,
SINGHM, USER, POWER_LAW, and TIME_POWER_LAW. The default value is STRAIN.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
time
A SymbolicConstant specifying the time increment for the relevant laws. Possible values
are CREEP and TOTAL. The default value is TOTAL.
Returns
-------
A CapCreepCohesion object.
"""
pass
def setValues(self):
"""This method modifies the CapCreepCohesion object.
"""
pass
| 33.450549 | 106 | 0.607096 |
acfbd53464e85569f42a7091f879517d887cf080 | 12,315 | py | Python | pffit/vsf_fit.py | Tristanovsk/pffit | 6a9eeb3849c8670b2aa3c622d5d360d43e27ea8b | [
"MIT"
] | null | null | null | pffit/vsf_fit.py | Tristanovsk/pffit | 6a9eeb3849c8670b2aa3c622d5d360d43e27ea8b | [
"MIT"
] | null | null | null | pffit/vsf_fit.py | Tristanovsk/pffit | 6a9eeb3849c8670b2aa3c622d5d360d43e27ea8b | [
"MIT"
] | null | null | null | import os
opj = os.path.join
import numpy as np
import pandas as pd
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
color_cycle = ['dimgrey', 'firebrick', 'darkorange', 'olivedrab',
'dodgerblue', 'magenta']
plt.ioff()
plt.rcParams.update({'font.family': 'serif',
'font.size': 16, 'axes.labelsize': 20,
'mathtext.fontset': 'stix',
'axes.prop_cycle': plt.cycler('color', color_cycle)})
plt.rcParams["font.serif"] = ["Times New Roman"] + plt.rcParams["font.serif"]
import pffit
fit = pffit.phase_function_models.inversion()
m = pffit.phase_function_models.models()
dir = pffit.__path__[0]
dirdata = opj(dir, 'data')
trunc = False
if trunc:
dirfig = opj(dir, 'fig', 'truncated')
angular_range = [3, 150]
else:
dirfig = opj(dir, 'fig', 'test')
angular_range = [3, 173]
files = glob.glob(opj(dirdata, 'normalized_vsf*txt'))
# -------------------
# fitting section
# -------------------
models = (fit.TTFF_fit, fit.RM_fit, fit.FFRM_fit, fit.TTRM_fit)
samples = ['Arizona', 'Chlorella', 'Cylindrotheca', 'Dunaliella', 'Karenia', 'Skeletonema']
names = ['Arizona dust', r'$\it{C. autotrophica}$', r'$\it{C. closterium}$', r'$\it{D. salina}$',
r'$\it{K. mikimotoi}$', r'$\it{S. cf. costatum}$']
file_pattern = '/home/harmel/Dropbox/work/git/vrtc/RTxploitation/RTxploitation/../study_cases/vsf/data/normalized_vsf_lov_experiment2015_xxx.txt'
theta_ = np.linspace(0, 180, 100000)
back_ang = theta_[theta_ > 90]
for icol, model in enumerate(models):
model_ = model.__name__
res = []
for irow, sample in enumerate(samples):
file = file_pattern.replace('xxx', sample)
df = pd.read_csv(file, skiprows=8, sep='\t', index_col=0, skipinitialspace=True, na_values='inf')
for i, (label, group) in enumerate(df.iteritems()):
print(label)
wl_ = int(label.split('.')[-1])
group_ = group.dropna()
group_ = group_[
(group_.index >= angular_range[0]) & (group_.index <= angular_range[1])] # [group_.index<140]
theta, vsf = group_.index.values, group_.values
min1, func = model(theta, vsf)
out1 = min1.least_squares() # max_nfev=30, xtol=1e-7, ftol=1e-4)
x = out1.x
res_ = pd.DataFrame(data={'sample': [sample], 'name': [names[irow]], 'wavelength': [wl_]})
res_['cost'] = out1.residual.__abs__().mean()
for c in ('redchi', 'bic', 'aic'):
res_[c] = out1.__getattribute__(c)
for name, param in out1.params.items():
res_[name] = param.value
res_[name + '_std'] = param.stderr
norm = np.trapz(func(theta_[1:], *x) * np.sin(np.radians(theta_[1:])), np.radians(theta_[1:])) * np.pi * 2
bb_tilde = np.trapz(func(back_ang, *x) * np.sin(np.radians(back_ang)),
np.radians(back_ang)) * np.pi * 2 / norm
cos_ave = np.trapz(func(theta_[1:], *x) * np.sin(np.radians(theta_[1:]) * np.cos(np.radians(theta_[1:]))),
np.radians(theta_[1:])) * np.pi * 2
res_['norm'] = norm
res_['bb_ratio'] = bb_tilde
res_['asymmetry_factor'] = cos_ave
if model_ == 'TTRM_fit':
x = out1.x
# cov = out1.covar[:3, :3]
L1 = m.asym_RM(x[1], x[3])
mu_1 = (2 * x[1] * x[3] * L1 - (1 + x[1] ** 2)) / (2 * x[1] * (x[3] - 1))
L2 = m.asym_RM(x[2], x[4])
mu_2 = (2 * x[2] * x[4] * L2 - (1 + x[2] ** 2)) / (2 * x[2] * (x[4] - 1))
mu_ = x[0] * mu_1 + (1 - x[0]) * mu_2
print('result: ', x[0], mu_1, mu_2, mu_)
J = np.array([x[1] - x[2], x[0], 1 - x[0]])
# np.matmul(J, np.matmul(cov, J.T))
res.append(res_)
res = pd.concat(res)
res.to_csv(opj(dirdata, 'fit_res_' + model_ + '.csv'))
# -------------------
# plotting section
# -------------------
for param in ('redchi',): # 'bic','aic','bb_ratio','asymmetry_factor'):
fig, axs = plt.subplots(2, 2, figsize=(10, 10), sharex=True)
fig.subplots_adjust(bottom=0.175, top=0.96, left=0.1, right=0.98,
hspace=0.25, wspace=0.27)
axs = axs.ravel()
for icol, model in enumerate(models):
model_ = model.__name__
res = pd.read_csv(opj(dirdata, 'fit_res_' + model_ + '.csv'))
ax = axs[icol]
ax.set_title(model_)
for name, group in res.groupby('name'):
if icol == 3:
ax.plot(group.wavelength, group[param], label=name, linestyle='dashed', lw=2, marker='o', mec='grey',
ms=12, alpha=0.6)
else:
ax.plot(group.wavelength, group[param], linestyle='dashed', lw=2, marker='o', mec='grey', ms=12,
alpha=0.6)
if param == "redchi":
ax.set_ylabel(r'${\chi_\nu^2}$')
else:
ax.set_ylabel(param)
axs[-1].set_xlabel('Wavelength (nm)')
axs[-2].set_xlabel('Wavelength (nm)')
fig.legend(loc='upper center', bbox_to_anchor=(0.535, .115),
fancybox=True, shadow=True, ncol=3, handletextpad=0.5, fontsize=20)
# fig.tight_layout()
plt.savefig(opj(dirfig, param + '_fitting_performances.png'), dpi=300)
# \mathit
fig, axs = plt.subplots(4, 2, figsize=(10, 12), sharex=True)
axs = axs.ravel()
labels = ['$\gamma$', '$g_1$', '$g_2$', r'$\alpha _1$', r'$\alpha_2$', '$\~b_b$', r'$<cos\theta >$']
for i, param in enumerate(['gamma', 'g1', 'g2', 'alpha1', 'alpha2', 'bb_ratio', 'asymmetry_factor']):
ax = axs[i]
ax.set_ylabel(labels[i])
for name, group in res.groupby('name'):
# ax.errorbar(group.wavelength,group[param],yerr=group[param+'_std'],label=name,linestyle='dashed',lw=2, marker='o',mec='grey',ms=12,alpha=0.6)
ax.errorbar(group.wavelength, group[param], linestyle='dashed', lw=2, marker='o', mec='grey', ms=12, alpha=0.6)
for name, group in res.groupby('name'):
axs[-1].errorbar(group.wavelength, group[param], label=name, linestyle='dashed', lw=2, marker='o', mec='grey',
ms=12, alpha=0.6)
axs[-1].set_visible(False)
axs[-2].set_xlabel('Wavelength (nm)')
axs[-3].set_xlabel('Wavelength (nm)')
axs[-3].tick_params(axis='x', labelbottom='on')
fig.legend(loc='lower left', bbox_to_anchor=(0.57, 0.04),
fancybox=True, shadow=True, ncol=1, handletextpad=0.5, fontsize=17)
plt.tight_layout()
fig.subplots_adjust(hspace=0.065) # , wspace=0.065)
plt.savefig(opj(dirfig, 'TTRM_fitting_parameters.png'), dpi=300)
plt.legend()
plt.show()
color = ['black', 'blue', 'green', 'red']
for file in files:
df = pd.read_csv(file, skiprows=8, sep='\t', index_col=0, skipinitialspace=True, na_values='inf')
basename = os.path.basename(file).replace('.txt', '')
if not '3µm' in basename:
continue
fig, axs = plt.subplots(2, 2, figsize=(15, 12))
axs = axs.ravel()
for ax, model in zip(axs, models):
for i, (label, group) in enumerate(df.iteritems()):
print(label)
group_ = group.dropna()
group_ = group_[(group_.index >= angular_range[0]) & (group_.index <= angular_range[1])]
theta, vsf = group_.index.values, group_.values
min1, func = model(theta, vsf)
out1 = min1.least_squares() # max_nfev=30, xtol=1e-7, ftol=1e-4)
out1.params.pretty_print()
x = out1.x
ax.plot(theta, vsf, color=color[i], label=label)
ax.plot(theta, func(theta, *x), '--', color=color[i])
ax.set_xlabel('Scattering angle (deg)')
ax.set_ylabel('Phase function $(sr^{-1})$')
ax.semilogy()
ax.set_title(model.__name__)
plt.legend()
plt.suptitle(basename)
plt.savefig(opj(dirfig, basename + '.png'), dpi=300)
# fig all
samples = ['Arizona', 'Chlorella', 'Cylindrotheca', 'Dunaliella', 'Karenia', 'Skeletonema']
file_pattern = '/home/harmel/Dropbox/work/git/vrtc/RTxploitation/RTxploitation/../study_cases/vsf/data/normalized_vsf_lov_experiment2015_xxx.txt'
rows, cols = 6, 4
axslin = [[0 for x in range(cols)] for x in range(rows)]
irow = 0
fig, axs = plt.subplots(rows, cols, figsize=(22, 25), sharex=True, sharey=True)
for irow, sample in enumerate(samples):
file = file_pattern.replace('xxx', sample)
df = pd.read_csv(file, skiprows=8, sep='\t', index_col=0, skipinitialspace=True, na_values='inf')
for icol, model in enumerate(models):
ax = axs[irow, icol]
ax.loglog()
axs[0, icol].set_title(model.__name__)
ax.set_xlim((0.01, 10))
divider = make_axes_locatable(ax)
axlin = divider.append_axes("right", size=3, pad=0, sharey=ax)
axslin[irow][icol] = axlin
ax.spines['right'].set_visible(False)
axlin.spines['left'].set_linestyle('--')
# axlin.spines['left'].set_linewidth(1.8)
axlin.spines['left'].set_color('grey')
axlin.yaxis.set_ticks_position('right')
axlin.yaxis.set_visible(False)
axlin.xaxis.set_visible(False)
axlin.set_xscale('linear')
axlin.set_xlim((10, 190))
for i, (label_, group) in enumerate(df.iteritems()):
label = label_.split('.')[-1] + ' nm'
print(label)
group_ = group.dropna()
group_ = group_[(group_.index >= angular_range[0]) & (group_.index <= angular_range[1])]
theta, vsf = group_.index.values, group_.values
min1, func = model(theta, vsf)
out1 = min1.least_squares() # xtol=1e-15,ftol=1e-15) # max_nfev=30, xtol=1e-7, ftol=1e-4)
out1.params.pretty_print()
x = out1.x
for ax_ in (ax, axlin):
ax_.plot(theta, vsf, color=color[i], label=label)
ax_.plot(theta_, func(theta_, *x), '--', color=color[i])
# norm = (np.trapz(func(theta_[1:], *x) * np.sin(np.radians(theta_[1:])), np.radians(theta_[1:])) * np.pi * 2)
# bp_tilde = np.trapz(func(back_ang, *x) * np.sin(np.radians(back_ang)), np.radians(back_ang)) * np.pi * 2 / norm
# axlin.text(0.95, 0.75, '$\~b_b=${:6.4f}'.format(bp_tilde), size=20,
# transform=axlin.transAxes, ha="right", va="top", )
ax.xaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=4))
ax.yaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=10))
ax.xaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=10, subs=np.arange(10) * 0.1))
ax.yaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=10, subs=np.arange(10) * 0.1))
irow += 1
# ax.plot(theta, vsf, color=color[i], label=label)
# ax.plot(theta_, func(theta_, *x), '--', color=color[i])
# norm = (np.trapz( func(theta_[1:], *x)*np.sin(np.radians(theta_[1:])),np.radians(theta_[1:]))*np.pi*2)
# bp_tilde = np.trapz( func(back_ang, *x)*np.sin(np.radians(back_ang)),np.radians(back_ang))*np.pi*2 / norm
# ax.text(0.95, 0.75, '$\~b_b=${:6.4f}'.format(bp_tilde), size=20,
# transform=ax.transAxes, ha="right", va="top",)
plt.legend()
ax.set_ylim(ymin=0.0003, ymax=30 ** 2)
for irow, sample in enumerate(samples):
axslin[irow][0].text(0.95, 0.95, names[irow], size=20,
transform=axslin[irow][0].transAxes, ha="right", va="top",
bbox=dict(boxstyle="round",
ec=(0.1, 0.1, 0.1),
fc=plt.matplotlib.colors.to_rgba(color_cycle[irow], 0.3),
))
axs[irow, 0].set_ylabel(r'Phase function $(sr^{-1})$')
for icol, model in enumerate(models):
axslin[-1][icol].xaxis.set_visible(True)
axslin[-1][icol].set_xlabel('Scattering angle (deg)')
plt.tight_layout()
fig.subplots_adjust(hspace=0.065, wspace=0.065)
plt.suptitle('')
plt.savefig(opj(dirfig, 'all_vsf_fit.png'), dpi=300)
# ax.set_xlim(0.01, 200)
# for ax in axs.ravel():
# ax.semilogx()
# plt.savefig(opj(dirfig, 'all_vsf_fit_loglog.png'), dpi=300)
| 42.760417 | 151 | 0.575477 |
acfbd567addd287c47662c641aa17c02ec80528c | 1,520 | py | Python | setup.py | PlanetHunters/tkmatrix | 7c112e2cbcd1e75753828a334720ddf7972c8551 | [
"MIT"
] | 1 | 2021-03-09T18:50:23.000Z | 2021-03-09T18:50:23.000Z | setup.py | PlanetHunters/tkmatrix | 7c112e2cbcd1e75753828a334720ddf7972c8551 | [
"MIT"
] | 15 | 2021-03-24T14:13:02.000Z | 2021-10-16T06:37:45.000Z | setup.py | martindevora/tkmatrix | 7c112e2cbcd1e75753828a334720ddf7972c8551 | [
"MIT"
] | 1 | 2021-03-09T18:50:26.000Z | 2021-03-09T18:50:26.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
version = "0.3.12"
setuptools.setup(
name="tkmatrix",
version=version,
author="M. Dévora-Pajares & F.J. Pozuelos",
author_email="mdevorapajares@protonmail.com",
description="ToolKit for Multi-phAse Transits Recovery from Injected eXoplanets",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/PlanetHunters/tkmatrix",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6.8',
install_requires=['argparse==1.4.0',
'beautifulsoup4==4.9.3',
'configparser==5.0.1',
"corner==2.1.0",
"cython==0.29.21",
"ellc==1.8.5",
"lcbuilder==0.6.16",
"matplotlib==3.3.4",
"mock==4.0.3",
'numba>=0.53.0rc1',
'pyparsing==2.4.7', # Matplotlib dependency
"seaborn==0.11.1",
'setuptools>=41.0.0',
"scipy==1.5.4",
"sklearn==0.0",
'tqdm==4.56.0',
"wotan==1.9",
]
)
| 36.190476 | 85 | 0.490789 |
acfbd5f9e0085f66022586637ba3140637cee62a | 497 | py | Python | ics/structures/j2534_adapter_information.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 45 | 2017-10-17T08:42:08.000Z | 2022-02-21T16:26:48.000Z | ics/structures/j2534_adapter_information.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 106 | 2017-03-07T21:10:39.000Z | 2022-03-29T15:32:46.000Z | ics/structures/j2534_adapter_information.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 17 | 2017-04-04T12:30:22.000Z | 2022-01-28T05:30:25.000Z | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
import enum
class j2534_adapter_information(ctypes.Structure):
_fields_ = [
('szName', ctypes.c_char * 128),
('szDeviceName', ctypes.c_char * 64),
('Status', ctypes.c_ulong),
('bMAC_Address', ctypes.c_ubyte * 6),
('bIPV6_Address', ctypes.c_ubyte * 16),
('bIPV4_Address', ctypes.c_ubyte * 4),
]
J2534_ADAPTER_INFORMATION = j2534_adapter_information
| 24.85 | 72 | 0.657948 |
acfbd634b16c3f769a0981b319472a1776b7cbb6 | 26,911 | py | Python | dependencies/jedi/api/completion.py | zjzh/SublimeJEDI | 8a5054f0a053c8a8170c06c56216245240551d54 | [
"MIT"
] | 641 | 2015-01-01T11:27:21.000Z | 2022-03-22T12:46:01.000Z | dependencies/jedi/api/completion.py | zjzh/SublimeJEDI | 8a5054f0a053c8a8170c06c56216245240551d54 | [
"MIT"
] | 177 | 2015-01-01T13:00:21.000Z | 2022-03-15T08:23:28.000Z | dependencies/jedi/api/completion.py | zjzh/SublimeJEDI | 8a5054f0a053c8a8170c06c56216245240551d54 | [
"MIT"
] | 118 | 2015-01-14T03:03:08.000Z | 2022-03-10T03:37:40.000Z | import re
from textwrap import dedent
from parso.python.token import PythonTokenTypes
from parso.python import tree
from parso.tree import search_ancestor, Leaf
from parso import split_lines
from jedi._compatibility import Parameter
from jedi import debug
from jedi import settings
from jedi.api import classes
from jedi.api import helpers
from jedi.api import keywords
from jedi.api.strings import complete_dict
from jedi.api.file_name import complete_file_name
from jedi.inference import imports
from jedi.inference.base_value import ValueSet
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
from jedi.inference.context import get_global_filters
from jedi.inference.value import TreeInstance, ModuleValue
from jedi.inference.names import ParamNameWrapper, SubModuleName
from jedi.inference.gradual.conversion import convert_values, convert_names
from jedi.parser_utils import cut_value_at_position
from jedi.plugins import plugin_manager
class ParamNameWithEquals(ParamNameWrapper):
def get_public_name(self):
return self.string_name + '='
def _get_signature_param_names(signatures, positional_count, used_kwargs):
# Add named params
for call_sig in signatures:
for i, p in enumerate(call_sig.params):
# Allow protected access, because it's a public API.
# TODO reconsider with Python 2 drop
kind = p._name.get_kind()
if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
continue
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
and p.name not in used_kwargs:
yield ParamNameWithEquals(p._name)
def _must_be_kwarg(signatures, positional_count, used_kwargs):
if used_kwargs:
return True
must_be_kwarg = True
for signature in signatures:
for i, p in enumerate(signature.params):
# TODO reconsider with Python 2 drop
kind = p._name.get_kind()
if kind is Parameter.VAR_POSITIONAL:
# In case there were not already kwargs, the next param can
# always be a normal argument.
return False
if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY):
must_be_kwarg = False
break
if not must_be_kwarg:
break
return must_be_kwarg
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
comp_dct = set()
if settings.case_insensitive_completion:
like_name = like_name.lower()
for name in completion_names:
string = name.string_name
if settings.case_insensitive_completion:
string = string.lower()
if helpers.match(string, like_name, fuzzy=fuzzy):
new = classes.Completion(
inference_state,
name,
stack,
len(like_name),
is_fuzzy=fuzzy,
cached_name=cached_name,
)
k = (new.name, new.complete) # key
if k not in comp_dct:
comp_dct.add(k)
tree_name = name.tree_name
if tree_name is not None:
definition = tree_name.get_definition()
if definition is not None and definition.type == 'del_stmt':
continue
yield new
def _remove_duplicates(completions, other_completions):
names = {d.name for d in other_completions}
return [c for c in completions if c.name not in names]
def get_user_context(module_context, position):
"""
Returns the scope in which the user resides. This includes flows.
"""
leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True)
return module_context.create_context(leaf)
def get_flow_scope_node(module_node, position):
node = module_node.get_leaf_for_position(position, include_prefixes=True)
while not isinstance(node, (tree.Scope, tree.Flow)):
node = node.parent
return node
@plugin_manager.decorate()
def complete_param_names(context, function_name, decorator_nodes):
# Basically there's no way to do param completion. The plugins are
# responsible for this.
return []
class Completion:
def __init__(self, inference_state, module_context, code_lines, position,
signatures_callback, fuzzy=False):
self._inference_state = inference_state
self._module_context = module_context
self._module_node = module_context.tree_node
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._original_position = position
self._signatures_callback = signatures_callback
self._fuzzy = fuzzy
def complete(self):
leaf = self._module_node.get_leaf_for_position(
self._original_position,
include_prefixes=True
)
string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
prefixed_completions = complete_dict(
self._module_context,
self._code_lines,
start_leaf or leaf,
self._original_position,
None if string is None else quote + string,
fuzzy=self._fuzzy,
)
if string is not None and not prefixed_completions:
prefixed_completions = list(complete_file_name(
self._inference_state, self._module_context, start_leaf, quote, string,
self._like_name, self._signatures_callback,
self._code_lines, self._original_position,
self._fuzzy
))
if string is not None:
if not prefixed_completions and '\n' in string:
# Complete only multi line strings
prefixed_completions = self._complete_in_string(start_leaf, string)
return prefixed_completions
cached_name, completion_names = self._complete_python(leaf)
completions = list(filter_names(self._inference_state, completion_names,
self.stack, self._like_name,
self._fuzzy, cached_name=cached_name))
return (
# Removing duplicates mostly to remove False/True/None duplicates.
_remove_duplicates(prefixed_completions, completions)
+ sorted(completions, key=lambda x: (x.name.startswith('__'),
x.name.startswith('_'),
x.name.lower()))
)
def _complete_python(self, leaf):
"""
Analyzes the current context of a completion and decides what to
return.
Technically this works by generating a parser stack and analysing the
current stack for possible grammar nodes.
Possible enhancements:
- global/nonlocal search global
- yield from / raise from <- could be only exceptions/generators
- In args: */**: no completion
- In params (also lambda): no completion before =
"""
grammar = self._inference_state.grammar
self.stack = stack = None
self._position = (
self._original_position[0],
self._original_position[1] - len(self._like_name)
)
cached_name = None
try:
self.stack = stack = helpers.get_stack_at_position(
grammar, self._code_lines, leaf, self._position
)
except helpers.OnErrorLeaf as e:
value = e.error_leaf.value
if value == '.':
# After ErrorLeaf's that are dots, we will not do any
# completions since this probably just confuses the user.
return cached_name, []
# If we don't have a value, just use global completion.
return cached_name, self._complete_global_scope()
allowed_transitions = \
list(stack._allowed_transition_names_and_token_types())
if 'if' in allowed_transitions:
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
previous_leaf = leaf.get_previous_leaf()
indent = self._position[1]
if not (leaf.start_pos <= self._position <= leaf.end_pos):
indent = leaf.start_pos[1]
if previous_leaf is not None:
stmt = previous_leaf
while True:
stmt = search_ancestor(
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
'error_node',
)
if stmt is None:
break
type_ = stmt.type
if type_ == 'error_node':
first = stmt.children[0]
if isinstance(first, Leaf):
type_ = first.value + '_stmt'
# Compare indents
if stmt.start_pos[1] == indent:
if type_ == 'if_stmt':
allowed_transitions += ['elif', 'else']
elif type_ == 'try_stmt':
allowed_transitions += ['except', 'finally', 'else']
elif type_ == 'for_stmt':
allowed_transitions.append('else')
completion_names = []
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
kwargs_only = False
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
PythonTokenTypes.INDENT)):
# This means that we actually have to do type inference.
nonterminals = [stack_node.nonterminal for stack_node in stack]
nodes = _gather_nodes(stack)
if nodes and nodes[-1] in ('as', 'def', 'class'):
# No completions for ``with x as foo`` and ``import x as foo``.
# Also true for defining names as a class or function.
return cached_name, list(self._complete_inherited(is_function=True))
elif "import_stmt" in nonterminals:
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
completion_names += self._get_importer_names(
names,
level,
only_modules=only_modules,
)
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
dot = self._module_node.get_leaf_for_position(self._position)
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
completion_names += n
elif self._is_parameter_completion():
completion_names += self._complete_params(leaf)
else:
# Apparently this looks like it's good enough to filter most cases
# so that signature completions don't randomly appear.
# To understand why this works, three things are important:
# 1. trailer with a `,` in it is either a subscript or an arglist.
# 2. If there's no `,`, it's at the start and only signatures start
# with `(`. Other trailers could start with `.` or `[`.
# 3. Decorators are very primitive and have an optional `(` with
# optional arglist in them.
if nodes[-1] in ['(', ','] \
and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
signatures = self._signatures_callback(*self._position)
if signatures:
call_details = signatures[0]._call_details
used_kwargs = list(call_details.iter_used_keyword_arguments())
positional_count = call_details.count_positional_arguments()
completion_names += _get_signature_param_names(
signatures,
positional_count,
used_kwargs,
)
kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
if not kwargs_only:
completion_names += self._complete_global_scope()
completion_names += self._complete_inherited(is_function=False)
if not kwargs_only:
completion_names += self._complete_keywords(
allowed_transitions,
only_values=not (not current_line or current_line[-1] in ' \t.;'
and current_line[-3:] != '...')
)
return cached_name, completion_names
def _is_parameter_completion(self):
tos = self.stack[-1]
if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1:
# We are at the position `lambda `, where basically the next node
# is a param.
return True
if tos.nonterminal in 'parameters':
# Basically we are at the position `foo(`, there's nothing there
# yet, so we have no `typedargslist`.
return True
# var args is for lambdas and typed args for normal functions
return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ','
def _complete_params(self, leaf):
stack_node = self.stack[-2]
if stack_node.nonterminal == 'parameters':
stack_node = self.stack[-3]
if stack_node.nonterminal == 'funcdef':
context = get_user_context(self._module_context, self._position)
node = search_ancestor(leaf, 'error_node', 'funcdef')
if node.type == 'error_node':
n = node.children[0]
if n.type == 'decorators':
decorators = n.children
elif n.type == 'decorator':
decorators = [n]
else:
decorators = []
else:
decorators = node.get_decorators()
function_name = stack_node.nodes[1]
return complete_param_names(context, function_name.value, decorators)
return []
def _complete_keywords(self, allowed_transitions, only_values):
for k in allowed_transitions:
if isinstance(k, str) and k.isalpha():
if not only_values or k in ('True', 'False', 'None'):
yield keywords.KeywordName(self._inference_state, k)
def _complete_global_scope(self):
context = get_user_context(self._module_context, self._position)
debug.dbg('global completion scope: %s', context)
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
filters = get_global_filters(
context,
self._position,
flow_scope_node
)
completion_names = []
for filter in filters:
completion_names += filter.values()
return completion_names
def _complete_trailer(self, previous_leaf):
inferred_context = self._module_context.create_context(previous_leaf)
values = infer_call_of_leaf(inferred_context, previous_leaf)
debug.dbg('trailer completion values: %s', values, color='MAGENTA')
# The cached name simply exists to make speed optimizations for certain
# modules.
cached_name = None
if len(values) == 1:
v, = values
if v.is_module():
if len(v.string_names) == 1:
module_name = v.string_names[0]
if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
cached_name = module_name
return cached_name, self._complete_trailer_for_values(values)
def _complete_trailer_for_values(self, values):
user_context = get_user_context(self._module_context, self._position)
return complete_trailer(user_context, values)
def _get_importer_names(self, names, level=0, only_modules=True):
names = [n.value for n in names]
i = imports.Importer(self._inference_state, names, self._module_context, level)
return i.completion_names(self._inference_state, only_modules=only_modules)
def _complete_inherited(self, is_function=True):
"""
Autocomplete inherited methods when overriding in child class.
"""
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
cls = tree.search_ancestor(leaf, 'classdef')
if cls is None:
return
# Complete the methods that are defined in the super classes.
class_value = self._module_context.create_value(cls)
if cls.start_pos[1] >= leaf.start_pos[1]:
return
filters = class_value.get_filters(is_instance=True)
# The first dict is the dictionary of class itself.
next(filters)
for filter in filters:
for name in filter.values():
# TODO we should probably check here for properties
if (name.api_type == 'function') == is_function:
yield name
def _complete_in_string(self, start_leaf, string):
"""
To make it possible for people to have completions in doctests or
generally in "Python" code in docstrings, we use the following
heuristic:
- Having an indented block of code
- Having some doctest code that starts with `>>>`
- Having backticks that doesn't have whitespace inside it
"""
def iter_relevant_lines(lines):
include_next_line = False
for l in code_lines:
if include_next_line or l.startswith('>>>') or l.startswith(' '):
yield re.sub(r'^( *>>> ?| +)', '', l)
else:
yield None
include_next_line = bool(re.match(' *>>>', l))
string = dedent(string)
code_lines = split_lines(string, keepends=True)
relevant_code_lines = list(iter_relevant_lines(code_lines))
if relevant_code_lines[-1] is not None:
# Some code lines might be None, therefore get rid of that.
relevant_code_lines = [c or '\n' for c in relevant_code_lines]
return self._complete_code_lines(relevant_code_lines)
match = re.search(r'`([^`\s]+)', code_lines[-1])
if match:
return self._complete_code_lines([match.group(1)])
return []
def _complete_code_lines(self, code_lines):
module_node = self._inference_state.grammar.parse(''.join(code_lines))
module_value = ModuleValue(
self._inference_state,
module_node,
code_lines=code_lines,
)
module_value.parent_context = self._module_context
return Completion(
self._inference_state,
module_value.as_context(),
code_lines=code_lines,
position=module_node.end_pos,
signatures_callback=lambda *args, **kwargs: [],
fuzzy=self._fuzzy
).complete()
def _gather_nodes(stack):
nodes = []
for stack_node in stack:
if stack_node.dfa.from_rule == 'small_stmt':
nodes = []
else:
nodes += stack_node.nodes
return nodes
_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
def _extract_string_while_in_string(leaf, position):
def return_part_of_leaf(leaf):
kwargs = {}
if leaf.line == position[0]:
kwargs['endpos'] = position[1] - leaf.column
match = _string_start.match(leaf.value, **kwargs)
if not match:
return None, None, None
start = match.group(0)
if leaf.line == position[0] and position[1] < leaf.column + match.end():
return None, None, None
return cut_value_at_position(leaf, position)[match.end():], leaf, start
if position < leaf.start_pos:
return None, None, None
if leaf.type == 'string':
return return_part_of_leaf(leaf)
leaves = []
while leaf is not None:
if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
if len(leaf.value) > 1:
return return_part_of_leaf(leaf)
prefix_leaf = None
if not leaf.prefix:
prefix_leaf = leaf.get_previous_leaf()
if prefix_leaf is None or prefix_leaf.type != 'name' \
or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
prefix_leaf = None
return (
''.join(cut_value_at_position(l, position) for l in leaves),
prefix_leaf or leaf,
('' if prefix_leaf is None else prefix_leaf.value)
+ cut_value_at_position(leaf, position),
)
if leaf.line != position[0]:
# Multi line strings are always simple error leaves and contain the
# whole string, single line error leaves are atherefore important
# now and since the line is different, it's not really a single
# line string anymore.
break
leaves.insert(0, leaf)
leaf = leaf.get_previous_leaf()
return None, None, None
def complete_trailer(user_context, values):
completion_names = []
for value in values:
for filter in value.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
if not value.is_stub() and isinstance(value, TreeInstance):
completion_names += _complete_getattr(user_context, value)
python_values = convert_values(values)
for c in python_values:
if c not in values:
for filter in c.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
return completion_names
def _complete_getattr(user_context, instance):
"""
A heuristic to make completion for proxy objects work. This is not
intended to work in all cases. It works exactly in this case:
def __getattr__(self, name):
...
return getattr(any_object, name)
It is important that the return contains getattr directly, otherwise it
won't work anymore. It's really just a stupid heuristic. It will not
work if you write e.g. `return (getatr(o, name))`, because of the
additional parentheses. It will also not work if you move the getattr
to some other place that is not the return statement itself.
It is intentional that it doesn't work in all cases. Generally it's
really hard to do even this case (as you can see below). Most people
will write it like this anyway and the other ones, well they are just
out of luck I guess :) ~dave.
"""
names = (instance.get_function_slot_names(u'__getattr__')
or instance.get_function_slot_names(u'__getattribute__'))
functions = ValueSet.from_sets(
name.infer()
for name in names
)
for func in functions:
tree_node = func.tree_node
for return_stmt in tree_node.iter_return_stmts():
# Basically until the next comment we just try to find out if a
# return statement looks exactly like `return getattr(x, name)`.
if return_stmt.type != 'return_stmt':
continue
atom_expr = return_stmt.children[1]
if atom_expr.type != 'atom_expr':
continue
atom = atom_expr.children[0]
trailer = atom_expr.children[1]
if len(atom_expr.children) != 2 or atom.type != 'name' \
or atom.value != 'getattr':
continue
arglist = trailer.children[1]
if arglist.type != 'arglist' or len(arglist.children) < 3:
continue
context = func.as_context()
object_node = arglist.children[0]
# Make sure it's a param: foo in __getattr__(self, foo)
name_node = arglist.children[2]
name_list = context.goto(name_node, name_node.start_pos)
if not any(n.api_type == 'param' for n in name_list):
continue
# Now that we know that these are most probably completion
# objects, we just infer the object and return them as
# completions.
objects = context.infer_node(object_node)
return complete_trailer(user_context, objects)
return []
def search_in_module(inference_state, module_context, names, wanted_names,
wanted_type, complete=False, fuzzy=False,
ignore_imports=False, convert=False):
for s in wanted_names[:-1]:
new_names = []
for n in names:
if s == n.string_name:
if n.tree_name is not None and n.api_type == 'module' \
and ignore_imports:
continue
new_names += complete_trailer(
module_context,
n.infer()
)
debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
names = new_names
last_name = wanted_names[-1].lower()
for n in names:
string = n.string_name.lower()
if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
or not complete and string == last_name:
if isinstance(n, SubModuleName):
names = [v.name for v in n.infer()]
else:
names = [n]
if convert:
names = convert_names(names)
for n2 in names:
if complete:
def_ = classes.Completion(
inference_state, n2,
stack=None,
like_name_length=len(last_name),
is_fuzzy=fuzzy,
)
else:
def_ = classes.Name(inference_state, n2)
if not wanted_type or wanted_type == def_.type:
yield def_
| 40.651057 | 98 | 0.593958 |
acfbd6631525724d6ff9694729373b907c475c9b | 42 | py | Python | budgetmap/_node_python_test/konlpy_node/python/article.py | BudgetWiser/budgetmap | 29414c28b7b77d3b6b138f293a4999e4446e5f20 | [
"Apache-2.0"
] | null | null | null | budgetmap/_node_python_test/konlpy_node/python/article.py | BudgetWiser/budgetmap | 29414c28b7b77d3b6b138f293a4999e4446e5f20 | [
"Apache-2.0"
] | null | null | null | budgetmap/_node_python_test/konlpy_node/python/article.py | BudgetWiser/budgetmap | 29414c28b7b77d3b6b138f293a4999e4446e5f20 | [
"Apache-2.0"
] | null | null | null | import sys, json
a = raw_input()
print a
| 8.4 | 16 | 0.690476 |
acfbd668f29aba3ec150e5d19318dc6c4fc3210d | 15,079 | py | Python | tests/query/data_storage/test_service_buffer_file_stream_reader.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 33 | 2019-05-27T13:04:35.000Z | 2022-03-17T13:33:05.000Z | tests/query/data_storage/test_service_buffer_file_stream_reader.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 31 | 2019-06-10T01:55:47.000Z | 2022-03-09T07:27:49.000Z | tests/query/data_storage/test_service_buffer_file_stream_reader.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 25 | 2019-05-13T18:39:24.000Z | 2021-11-16T03:07:33.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import struct
import io
import json
from ossdbtoolsservice.query.data_storage.service_buffer_file_stream_reader import ServiceBufferFileStreamReader
from ossdbtoolsservice.query.contracts.column import DbColumn
from ossdbtoolsservice.parsers import datatypes
from ossdbtoolsservice.utils.constants import PG_PROVIDER_NAME
DECODING_METHOD = 'utf-8'
class TestServiceBufferFileStreamReader(unittest.TestCase):
def setUp(self):
# test data
self._bool_test_value = True
self._float_test_value1 = 123.456
self._float_test_value2 = 123.45600128173828
self._short_test_value = 12345
self._int_test_value = 1234567890
self._long_long_test_value = 123456789012
self._str_test_value = "TestString"
self._bytea_test_value = memoryview(b'TestString')
self._dict_test_value = {"Ser,ver": " Tes'tS,,erver ", "Sche'ma": "TestSchema"}
self._list_test_value = ["Test,Server", "Tes'tSchema", "Tes,'tTable"]
self._numericrange_test_value = "[10,20)"
self._datetimerange_test_value = "[2014-06-08T12:12:45,2016-07-06T14:12:08)"
self._datetimetzrange_test_value = "[2014-06-08T12:12:45-07:00,2016-07-06T14:12:08-07:00)"
self._daterange_test_value = "[2015-06-06,2016-08-08)"
# file_streams
self._bool_file_stream = io.BytesIO()
bool_val = bytearray(struct.pack("?", self._bool_test_value))
bool_len = len(bool_val)
bool_len_to_write = bytearray(struct.pack("i", bool_len))
self._bool_file_stream.write(bool_len_to_write)
self._bool_file_stream.write(bool_val)
self._bool_file_stream.write(b'\xff\xff\xff\xff')
self._bool_file_stream.write(bool_len_to_write)
self._bool_file_stream.write(bool_val)
self._float_file_stream1 = io.BytesIO()
float_val1 = bytearray(struct.pack("d", self._float_test_value1))
float_len1 = len(float_val1)
float_len_to_write1 = bytearray(struct.pack("i", float_len1))
self._float_file_stream1.write(float_len_to_write1)
self._float_file_stream1.write(float_val1)
self._float_file_stream2 = io.BytesIO()
float_val2 = bytearray(struct.pack("d", self._float_test_value2))
float_len2 = len(float_val2)
float_len_to_write2 = bytearray(struct.pack("i", float_len2))
self._float_file_stream2.write(float_len_to_write2)
self._float_file_stream2.write(float_val2)
self._short_file_stream = io.BytesIO()
short_val = bytearray(struct.pack("h", self._short_test_value))
short_len = len(short_val)
short_len_to_write = bytearray(struct.pack("i", short_len))
self._short_file_stream.write(short_len_to_write)
self._short_file_stream.write(short_val)
self._int_file_stream = io.BytesIO()
int_val = bytearray(struct.pack("i", self._int_test_value))
int_len = len(int_val)
int_len_to_write = bytearray(struct.pack("i", int_len))
self._int_file_stream.write(int_len_to_write)
self._int_file_stream.write(int_val)
self._long_long_file_stream = io.BytesIO()
long_long_val = bytearray(struct.pack("q", self._long_long_test_value))
long_long_len = len(long_long_val)
long_long_len_to_write = bytearray(struct.pack("i", long_long_len))
self._long_long_file_stream.write(long_long_len_to_write)
self._long_long_file_stream.write(long_long_val)
self._bytea_file_stream = io.BytesIO()
bytea_val = bytes(self._bytea_test_value)
bytea_len = len(bytea_val)
bytea_len_to_write = bytearray(struct.pack("i", bytea_len))
self._bytea_file_stream.write(bytea_len_to_write)
self._bytea_file_stream.write(bytea_val)
self._dict_file_stream = io.BytesIO()
dict_val = bytearray(json.dumps(self._dict_test_value).encode())
dict_len = len(dict_val)
dict_len_to_write = bytearray(struct.pack("i", dict_len))
self._dict_file_stream.write(dict_len_to_write)
self._dict_file_stream.write(dict_val)
self._list_file_stream = io.BytesIO()
list_val = bytearray(json.dumps(self._list_test_value).encode())
list_len = len(list_val)
list_len_to_write = bytearray(struct.pack("i", list_len))
self._list_file_stream.write(list_len_to_write)
self._list_file_stream.write(list_val)
self._numericrange_file_stream = io.BytesIO()
numericrange_val = bytearray(str(self._numericrange_test_value).encode())
numericrange_len = len(numericrange_val)
numericrange_len_to_write = bytearray(struct.pack("i", numericrange_len))
self._numericrange_file_stream.write(numericrange_len_to_write)
self._numericrange_file_stream.write(numericrange_val)
self._datetimerange_file_stream = io.BytesIO()
datetimerange_val = bytearray(str(self._datetimerange_test_value).encode())
datetimerange_len = len(datetimerange_val)
datetimerange_len_to_write = bytearray(struct.pack("i", datetimerange_len))
self._datetimerange_file_stream.write(datetimerange_len_to_write)
self._datetimerange_file_stream.write(datetimerange_val)
self._datetimetzrange_file_stream = io.BytesIO()
datetimetzrange_val = bytearray(str(self._datetimetzrange_test_value).encode())
datetimetzrange_len = len(datetimetzrange_val)
datetimetzrange_len_to_write = bytearray(struct.pack("i", datetimetzrange_len))
self._datetimetzrange_file_stream.write(datetimetzrange_len_to_write)
self._datetimetzrange_file_stream.write(datetimetzrange_val)
self._daterange_file_stream = io.BytesIO()
daterange_val = bytearray(str(self._daterange_test_value).encode())
daterange_len = len(daterange_val)
daterange_len_to_write = bytearray(struct.pack("i", daterange_len))
self._daterange_file_stream.write(daterange_len_to_write)
self._daterange_file_stream.write(daterange_val)
self._multiple_cols_file_stream = io.BytesIO()
val0 = bytearray(struct.pack("d", self._float_test_value1))
len0 = len(val0)
len0_to_write = bytearray(struct.pack("i", len0))
val1 = bytearray(struct.pack("i", self._int_test_value))
len1 = len(val1)
len1_to_write = bytearray(struct.pack("i", len1))
val2 = bytearray(self._str_test_value.encode())
len2 = len(val2)
len2_to_write = bytearray(struct.pack("i", len2))
val3 = bytearray(struct.pack("d", self._float_test_value2))
len3 = len(val3)
len3_to_write = bytearray(struct.pack("i", len3))
self._multiple_cols_file_stream.write(len0_to_write)
self._multiple_cols_file_stream.write(val0)
self._multiple_cols_file_stream.write(len1_to_write)
self._multiple_cols_file_stream.write(val1)
self._multiple_cols_file_stream.write(len2_to_write)
self._multiple_cols_file_stream.write(val2)
self._multiple_cols_file_stream.write(len3_to_write)
self._multiple_cols_file_stream.write(val3)
# Readers
self._bool_reader = ServiceBufferFileStreamReader(self._bool_file_stream)
self._float_reader1 = ServiceBufferFileStreamReader(self._float_file_stream1)
self._float_reader2 = ServiceBufferFileStreamReader(self._float_file_stream2)
self._bytea_reader = ServiceBufferFileStreamReader(self._bytea_file_stream)
self._dict_reader = ServiceBufferFileStreamReader(self._dict_file_stream)
self._list_reader = ServiceBufferFileStreamReader(self._list_file_stream)
self._numericrange_reader = ServiceBufferFileStreamReader(self._numericrange_file_stream)
self._datetimerange_reader = ServiceBufferFileStreamReader(self._datetimerange_file_stream)
self._datetimetzrange_reader = ServiceBufferFileStreamReader(self._datetimetzrange_file_stream)
self._daterange_reader = ServiceBufferFileStreamReader(self._daterange_file_stream)
self._multiple_cols_reader = ServiceBufferFileStreamReader(self._multiple_cols_file_stream)
def tearDown(self):
self._bool_file_stream.close()
self._float_file_stream1.close()
self._float_file_stream2.close()
self._multiple_cols_file_stream.close()
def test_read_bool(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_BOOL
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._bool_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(self._bool_test_value, res[0].raw_object)
test_file_offset += 5
test_row_id += 1
res = self._bool_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(None, res[0].raw_object)
test_file_offset += 4
test_row_id += 1
res = self._bool_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(self._bool_test_value, res[0].raw_object)
def test_read_float1(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_REAL
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._float_reader1.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(self._float_test_value1, res[0].raw_object)
def test_read_float2(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_REAL
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._float_reader2.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(self._float_test_value2, res[0].raw_object)
def test_read_bytea(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_BYTEA
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._bytea_reader.read_row(test_file_offset, test_row_id, test_columns_info)
expected = self._bytea_test_value.tobytes()
actual = str(res[0].raw_object)
self.assertEqual(str(expected), actual)
def test_read_json(self):
"""Test json/jsonb string is returned as is"""
test_file_offset = 0
test_row_id = 1
for datatype in [datatypes.DATATYPE_JSON, datatypes.DATATYPE_JSONB]:
col = DbColumn()
col.data_type = datatype
col.provider = PG_PROVIDER_NAME
test_columns_info = [col]
reader = ServiceBufferFileStreamReader(self._dict_file_stream)
res = reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(1, len(res))
self.assertEqual(json.dumps(self._dict_test_value), res[0].raw_object)
def test_read_numericrange(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_INT4RANGE
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._numericrange_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(str(self._numericrange_test_value), str(res[0].raw_object))
def test_read_datetimerange(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_TSRANGE
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._datetimerange_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(str(self._datetimerange_test_value), str(res[0].raw_object))
def test_read_datetimetzrange(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_TSTZRANGE
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._datetimetzrange_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(str(self._datetimetzrange_test_value), str(res[0].raw_object))
def test_read_daterange(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
col = DbColumn()
col.data_type = datatypes.DATATYPE_DATERANGE
col.provider = PG_PROVIDER_NAME
test_columns_info.append(col)
res = self._daterange_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(str(self._daterange_test_value), str(res[0].raw_object))
def test_read_multiple_cols(self):
test_file_offset = 0
test_row_id = 1
test_columns_info = []
real_column1 = DbColumn()
real_column1.data_type = datatypes.DATATYPE_REAL
real_column1.provider = PG_PROVIDER_NAME
integer_column = DbColumn()
integer_column.data_type = datatypes.DATATYPE_INTEGER
integer_column.provider = PG_PROVIDER_NAME
text_column = DbColumn()
text_column.data_type = datatypes.DATATYPE_TEXT
text_column.provider = PG_PROVIDER_NAME
real_column2 = DbColumn()
real_column2.data_type = datatypes.DATATYPE_REAL
real_column2.provider = PG_PROVIDER_NAME
test_columns_info.append(real_column1)
test_columns_info.append(integer_column)
test_columns_info.append(text_column)
test_columns_info.append(real_column2)
res = self._multiple_cols_reader.read_row(test_file_offset, test_row_id, test_columns_info)
self.assertEqual(self._float_test_value1, res[0].raw_object)
self.assertEqual(self._int_test_value, res[1].raw_object)
self.assertEqual(self._str_test_value, res[2].raw_object)
self.assertEqual(self._float_test_value2, res[3].raw_object)
if __name__ == '__main__':
unittest.main()
| 44.35 | 113 | 0.684594 |
acfbd721f71f52dd5575106433b0c76b9719dfe2 | 19,601 | py | Python | code/vl-bert/apex/apex/amp/frontend.py | e-bug/mpre-unmasked | cd12250b58152a558e15a33113bf98d90b88e776 | [
"MIT"
] | 5 | 2020-12-08T12:38:48.000Z | 2021-11-25T13:19:16.000Z | code/vl-bert/apex/apex/amp/frontend.py | e-bug/mpre-unmasked | cd12250b58152a558e15a33113bf98d90b88e776 | [
"MIT"
] | null | null | null | code/vl-bert/apex/apex/amp/frontend.py | e-bug/mpre-unmasked | cd12250b58152a558e15a33113bf98d90b88e776 | [
"MIT"
] | null | null | null | import torch
from ._initialize import _initialize
from ._amp_state import _amp_state, warn_or_err, maybe_print
class Properties(object):
"""
This class has two purposes: to establish a set of default properties,
and to route setting of these attributes through __setattr__ so that (in theory)
they can be checked for consistency with other existing args.
"""
def __init__(self):
self.options = {
"enabled" : False,
"opt_level" : None,
"cast_model_type" : None,
"patch_torch_functions" : False,
"keep_batchnorm_fp32" : None,
"master_weights" : None,
"loss_scale" : 1.0,
# Reserved for future functionality
# "fused_optimizer" : False,
# "enable_ddp_interop" : False,
}
"""
This function allows updating several options at a time without routing through
__setattr__ checks, to avoid "you can't get there from here" scenarios.
Currently not intended to be exposed; users are expected to select an opt_level
and apply consistent modifications.
"""
def _update_options_dict(new_options):
for k, v in new_options:
if k in self.options:
self.options[k] = v
else:
raise ValueError("Tried to set unexpected option {}".format(k))
"""
The members of "options" are not direct attributes of self, so access attempts
will roll down to __getattr__. This borrows from the logic in torch.nn.Module.
"""
def __getattr__(self, name):
if "options" in self.__dict__:
options = self.__dict__["options"]
if name in options:
return options[name]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def __setattr__(self, name, value):
if "options" in self.__dict__:
if name in self.options:
# print("setting {} {}".format(name, value))
if name == "cast_model_type":
if self.opt_level == "O1" and value is not None:
if value is not False:
if value is not torch.float32:
warn_or_err("O1 inserts casts around Torch functions rather than "
"model weights, so with O1, the model weights themselves "
"should remain FP32. If you wish to cast the model to a "
"different type, use opt_level='O2' or 'O3'. " +
"cast_model_type was {}".format(value))
self.options[name] = value
elif name == "patch_torch_functions":
if self.opt_level != "O1" and value:
warn_or_err("Currently, patch_torch_functions=True should only be set by "
"selecting opt_level='O1'.")
self.options[name] = value
elif name == "keep_batchnorm_fp32":
if self.opt_level == "O1" and value is not None:
warn_or_err("With opt_level O1, batchnorm functions are automatically patched "
"to run in FP32, so keep_batchnorm_fp32 should be None." +
" keep_batchnorm_fp32 was {}".format(value))
if value == "False":
self.options[name] = False
elif value == "True":
self.options[name] = True
else:
assert (value is True or value is False or value is None),\
"keep_batchnorm_fp32 must be a boolean, the string 'True' or 'False', "\
"or None, found keep_batchnorm_fp32={}".format(value)
self.options[name] = value
elif name == "master_weights":
if self.opt_level == "O1" and value is not None:
warn_or_err("It doesn't make sense to use master_weights with O1. "
"With O1, your model weights themselves should be FP32.")
self.options[name] = value
elif name == "loss_scale":
if value == "dynamic":
self.options[name] = value
else:
self.options[name] = float(value)
else:
self.options[name] = value
else:
super(Properties, self).__setattr__(name, value)
""" O0-O3 are convenience wrappers to establish defaults for typically used mixed precision options. """
class O3:
brief = "O3: Pure FP16 training."
more = "Calls .half() on your model, converting the entire model to FP16.\n"\
"A casting operation is also inserted to cast incoming Tensors to FP16,\n"\
"so you don't need to change your data pipeline.\n"\
"This mode is useful for establishing a performance ceiling.\n"\
"It's also possible training may 'just work' in this mode.\n"\
"If not, try other optimization levels."
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O3"
properties.cast_model_type = torch.float16
properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = False
properties.master_weights = False
properties.loss_scale = 1.0
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
class O2:
brief = "O2: FP16 training with FP32 batchnorm and FP32 master weights.\n"
more = "Calls .half() on your model, converting the entire model (except for batchnorms)\n"\
"to FP16. Batchnorms are retained in FP32 for additional stability.\n"\
"The forward pass is patched to cast incoming Tensors to FP16, so you don't need to change\n"\
"your data pipeline.\n"\
"O2 creates FP32 master weights outside the model and patches any optimizers to update\n"\
"these master weights, then copy the master weights into the FP16 model weights.\n"\
"Master weights can also improve convergence and stability."
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O2"
properties.cast_model_type = torch.float16
properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = True
properties.master_weights = True
properties.loss_scale = "dynamic"
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
class O1:
brief = "O1: Insert automatic casts around Pytorch functions and Tensor methods.\n"
more = "The type of your model's weights is not altered. However, internally,\n"\
"Pytorch functions are patched to cast any Tensor Core-friendly ops to FP16 for speed,\n"\
"while operations that might benefit from the additional stability of FP32 are patched\n"\
"to cast their inputs to fp32.\n"\
"O1 is the safest way to try mixed precision training, and is recommended when\n"\
"trying mixed precision training for the first time."
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O1"
properties.cast_model_type = None
properties.patch_torch_functions = True
properties.keep_batchnorm_fp32 = None
properties.master_weights = None
properties.loss_scale = "dynamic"
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
class O0:
brief = "O0: Pure FP32 training.\n"
more = "Your models are checked to make sure parameters are FP32, but otherwise the\n"\
"types of weights and internal Pytorch operations are not altered. This mode disables any\n"\
"FP16 arithmetic, although other optimizations like DDP interop may still be requested.\n"
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O0"
properties.cast_model_type = torch.float32
properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = None
properties.master_weights = False
properties.loss_scale = 1.0
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
opt_levels = {"O3": O3(),
"O2": O2(),
"O1": O1(),
"O0": O0()}
# allow user to directly pass Properties struct as well?
def initialize(
models,
optimizers=None,
enabled=True,
opt_level="O1",
cast_model_type=None,
patch_torch_functions=None,
keep_batchnorm_fp32=None,
master_weights=None,
loss_scale=None,
cast_model_outputs=None,
num_losses=1,
verbosity=1,
min_loss_scale=None,
max_loss_scale=2.**24
):
"""
Initialize your models, optimizers, and the Torch tensor and functional namespace according to the
chosen ``opt_level`` and overridden properties, if any.
``amp.initialize`` should be called **after** you have finished
constructing your model(s) and
optimizer(s), but **before** you send your model through any DistributedDataParallel wrapper.
See `Distributed training`_ in the Imagenet example.
Currently, ``amp.initialize`` should only be called **once**,
although it can process an arbitrary number of
models and optimizers (see the corresponding `Advanced Amp Usage topic`_).
If you think your use case requires ``amp.initialize`` to be called more than once,
`let us know`_.
Any property keyword argument that is not ``None`` will be interpreted as a manual override.
To prevent having to rewrite anything else in your scripts, name the returned models/optimizers
to replace the passed models/optimizers, as in the code sample below.
Args:
models (torch.nn.Module or list of torch.nn.Modules): Models to modify/cast.
optimizers (optional, torch.optim.Optimizer or list of torch.optim.Optimizers): Optimizers to modify/cast.
REQUIRED for training, optional for inference.
enabled (bool, optional, default=True): If False, renders all Amp calls no-ops, so your scripts
should run as if Amp were not present.
opt_level (str, optional, default="O1"): Pure or mixed precision optimization level. Accepted values are
"O0", "O1", "O2", and "O3", explained in detail above.
cast_model_type (``torch.dtype``, optional, default=None): Optional property override, see
above.
patch_torch_functions (bool, optional, default=None): Optional property override.
keep_batchnorm_fp32 (bool or str, optional, default=None): Optional property override. If
passed as a string, must be the string "True" or "False".
master_weights (bool, optional, default=None): Optional property override.
loss_scale (float or str, optional, default=None): Optional property override. If passed as a string,
must be a string representing a number, e.g., "128.0", or the string "dynamic".
cast_model_outputs (torch.dtype, optional, default=None): Option to ensure that the outputs
of your model(s) are always cast to a particular type regardless of ``opt_level``.
num_losses (int, optional, default=1): Option to tell Amp in advance how many losses/backward
passes you plan to use. When used in conjunction with the ``loss_id`` argument to
``amp.scale_loss``, enables Amp to use a different loss scale per loss/backward pass,
which can improve stability. See "Multiple models/optimizers/losses"
under `Advanced Amp Usage`_ for examples. If ``num_losses`` is left to 1, Amp will still
support multiple losses/backward passes, but use a single global loss scale
for all of them.
verbosity (int, default=1): Set to 0 to suppress Amp-related output.
min_loss_scale (float, default=None): Sets a floor for the loss scale values that can be chosen by dynamic
loss scaling. The default value of None means that no floor is imposed.
If dynamic loss scaling is not used, `min_loss_scale` is ignored.
max_loss_scale (float, default=2.**24): Sets a ceiling for the loss scale values that can be chosen by
dynamic loss scaling. If dynamic loss scaling is not used, `max_loss_scale` is ignored.
Returns:
Model(s) and optimizer(s) modified according to the ``opt_level``.
If either the ``models`` or ``optimizers`` args were lists, the corresponding return value will
also be a list.
Permissible invocations::
model, optim = amp.initialize(model, optim,...)
model, [optim1, optim2] = amp.initialize(model, [optim1, optim2],...)
[model1, model2], optim = amp.initialize([model1, model2], optim,...)
[model1, model2], [optim1, optim2] = amp.initialize([model1, model2], [optim1, optim2],...)
# This is not an exhaustive list of the cross product of options that are possible,
# just a set of examples.
model, optim = amp.initialize(model, optim, opt_level="O0")
model, optim = amp.initialize(model, optim, opt_level="O0", loss_scale="dynamic"|128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O1") # uses "loss_scale="dynamic" default
model, optim = amp.initialize(model, optim, opt_level="O1", loss_scale=128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O2") # uses "loss_scale="dynamic" default
model, optim = amp.initialize(model, optim, opt_level="O2", loss_scale=128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O2", keep_batchnorm_fp32=True|False|"True"|"False")
model, optim = amp.initialize(model, optim, opt_level="O3") # uses loss_scale=1.0 default
model, optim = amp.initialize(model, optim, opt_level="O3", loss_scale="dynamic"|128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O3", keep_batchnorm_fp32=True|False|"True"|"False")
The `Imagenet example`_ demonstrates live use of various opt_levels and overrides.
.. _`Distributed training`:
https://github.com/NVIDIA/apex/tree/master/examples/imagenet#distributed-training
.. _`Imagenet example`:
https://github.com/NVIDIA/apex/tree/master/examples/imagenet
.. _`Advanced Amp Usage`:
https://nvidia.github.io/apex/advanced.html
.. _`Advanced Amp Usage topic`:
https://nvidia.github.io/apex/advanced.html#multiple-models-optimizers-losses
.. _`let us know`:
https://github.com/NVIDIA/apex/issues
"""
_amp_state.opt_properties = Properties()
_amp_state.verbosity = verbosity
if not enabled:
if optimizers is None:
return models
else:
return models, optimizers
if not torch.backends.cudnn.enabled:
raise RuntimeError(
"Amp requires torch.backends.cudnn.enabled = True")
if opt_level not in opt_levels:
raise RuntimeError(
"Unexpected optimization level {}. ".format(opt_level) +
"Options are 'O0', 'O1', 'O2', 'O3'. Note that in `O0`, `O1`, etc., the prefix O is the letter O, " +
"not the number zero.")
else:
_amp_state.opt_properties = opt_levels[opt_level](_amp_state.opt_properties)
maybe_print("Selected optimization level {}".format(opt_levels[opt_level].brief), True)
maybe_print("Defaults for this optimization level are:", True)
for k, v in _amp_state.opt_properties.options.items():
maybe_print("{:22} : {}".format(k, v), True)
_amp_state.min_loss_scale = min_loss_scale
_amp_state.max_loss_scale = max_loss_scale
maybe_print("Processing user overrides (additional kwargs that are not None)...", True)
# I chose to have the keyword arguments listed directly in the argument list,
# instead of **kwargs, so I can't use kwargs.items() here.
if enabled is not None:
_amp_state.opt_properties.enabled = enabled
if opt_level is not None:
_amp_state.opt_properties.opt_level = opt_level
if cast_model_type is not None:
_amp_state.opt_properties.cast_model_type = cast_model_type
if patch_torch_functions is not None:
_amp_state.opt_properties.patch_torch_functions = patch_torch_functions
if keep_batchnorm_fp32 is not None:
_amp_state.opt_properties.keep_batchnorm_fp32 = keep_batchnorm_fp32
if master_weights is not None:
_amp_state.opt_properties.master_weights = master_weights
if loss_scale is not None:
_amp_state.opt_properties.loss_scale = loss_scale
maybe_print("After processing overrides, optimization options are:", True)
for k, v in _amp_state.opt_properties.options.items():
maybe_print("{:22} : {}".format(k, v), True)
return _initialize(models, optimizers, _amp_state.opt_properties, num_losses, cast_model_outputs)
# TODO: is this necessary/useful?
# def check_option_consistency(enabled=True,
# opt_level=None,
# cast_model_type=None,
# patch_torch_functions=None,
# keep_batchnorm_fp32=None,
# master_weights=None,
# loss_scale=None,
# enable_ddp_interop=None,
# hard_override=False):
# """
# Utility function that enables users to quickly check if the option combination they intend
# to use is permitted. ``check_option_consistency`` does not require models or optimizers
# to be constructed, and can be called at any point in the scripts. ``check_option_consistency``
# is totally self-contained; it does not set any amp global state or affect anything outside
# of itself.
# """
#
# if not enabled:
# return
#
# if opt_level not in opt_levels:
# raise RuntimeError("Unexpected optimization level. Options are 'O0', 'O1', 'O2', 'O3'.")
# else:
# opt_properties = opt_levels[opt_level](Properties())
# print("Selected optimization level {}", opt_levels[opt_level].brief)
# print("Defaults for this optimization level are:")
# for k, v in opt_properties.options:
# print("{:22} : {}".format(k, v))
#
# print("Processing user overrides (additional kwargs that are not None)...")
# for k, v in kwargs:
# if k not in _amp_state.opt_properties.options:
# raise RuntimeError("Unexpected kwarg {}".format(k))
# if v is not None:
# setattr(opt_properties, k, v)
#
# print("After processing overrides, optimization options are:")
# for k, v in opt_properties.options:
# print("{:22} : {}".format(k, v))
| 49.0025 | 115 | 0.63461 |
acfbd7f85c67fee814d765fc478e7b965c0ee5e1 | 2,834 | py | Python | encrypt/utils.py | alexjaniak/encrypt | 36f789551483cc88b78d1cb3ba60918530cef0b6 | [
"MIT"
] | null | null | null | encrypt/utils.py | alexjaniak/encrypt | 36f789551483cc88b78d1cb3ba60918530cef0b6 | [
"MIT"
] | null | null | null | encrypt/utils.py | alexjaniak/encrypt | 36f789551483cc88b78d1cb3ba60918530cef0b6 | [
"MIT"
] | null | null | null | # PROJECT: encrypt - encrypts text files
# AUTHOR: alexjaniak
# FILE: helper functions
# IMPORTS
from Crypto.Cipher import AES
from Crypto.Protocol import KDF
from Crypto.Util.Padding import pad, unpad
from Crypto.Random import get_random_bytes
import os
# MAIN-FUNCTIONS
def encrypt_file(read_file: str, password: str):
"""Reads read_file, encrypts data, and writes to write_file."""
# read text from file
total = b''
with open(read_file, 'rb') as rfile:
_bytes = rfile.read(16)
while(_bytes):
total += _bytes
_bytes = rfile.read(16)
# encrypt text
key, salt = _get_private_key(password)
encrypted_bytes, iv = _encrypt_bytes(total, key)
return salt + iv + encrypted_bytes
def decrypt_file(read_file: str, password: str):
"""Reads read_file, decryptes bytes, and writes as text to write_file."""
# read bytes from file
padded_bytes = b''
with open(read_file, 'rb') as rfile:
salt = rfile.read(32)
iv = rfile.read(16)
_bytes = rfile.read(16)
while(_bytes):
padded_bytes += _bytes
_bytes = rfile.read(16)
# decrypt encrypted bytes
key, salt = _get_private_key(password, salt)
text = _decrypt_bytes(padded_bytes, key, iv).decode('UTF-8')
return text
def new_file_ext(file_path: str, new_ext: str):
"""Returns path for file with new extention"""
root, ext = os.path.splitext(file_path)
return root + new_ext
# SUB-FUNCTIONS
def _encrypt_bytes(_bytes: bytes, key: bytes):
"""Encrypt bytes using 256-bit AES."""
cipher = AES.new(key, AES.MODE_CBC)
_padded_bytes = pad(_bytes, AES.block_size) # pad bytes
encrypted_bytes = cipher.encrypt(_padded_bytes) # encrypt bytes
return encrypted_bytes, cipher.iv
def _decrypt_bytes(_bytes: bytes, key: bytes, iv: bytes) -> bytes:
"""Decrypt 256-bit AES encrypted bytes."""
try:
cipher = AES.new(key, AES.MODE_CBC, iv)
_decrypted_bytes = cipher.decrypt(_bytes) # decrypt bytes
original_bytes = unpad(_decrypted_bytes, AES.block_size) # unpad bytes
return original_bytes
except ValueError:
raise DecryptionError
def _get_private_key(password: str, salt : bytes=None):
"""Generates 32-byte key from string."""
if salt == None: salt = get_random_bytes(32)
_bytes = password.encode('UTF-8') # encode into bytes.
key = KDF.scrypt(_bytes, salt, 32, N=2**14, r=8, p=1)
return key, salt
# EXCEPTIONS
class DecryptionError(Exception):
def __str__(self):
return "Failed Decryption: Incorrect Password"
if __name__ == '__main__':
password = "something"
tfile = "text.txt"
efile = new_file_ext(tfile, ".enc")
encrypt_file(tfile, efile, password)
decrypt_file(efile, tfile, password)
| 28.918367 | 78 | 0.667255 |
acfbd8599fdeec72201a14d3db49a6e4a2b4d85d | 15,493 | py | Python | genienlp/ned/bootleg.py | saligrama/genienlp | 35659911883c43fdbe38c4391e75ca106763eb40 | [
"BSD-3-Clause"
] | null | null | null | genienlp/ned/bootleg.py | saligrama/genienlp | 35659911883c43fdbe38c4391e75ca106763eb40 | [
"BSD-3-Clause"
] | null | null | null | genienlp/ned/bootleg.py | saligrama/genienlp | 35659911883c43fdbe38c4391e75ca106763eb40 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2020-2021 The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import fnmatch
import logging
import os
import torch
import ujson
from bootleg.end2end.bootleg_annotator import BootlegAnnotator as Annotator
from bootleg.end2end.extract_mentions import extract_mentions
from bootleg.run import run_model
from bootleg.utils.parser.parser_utils import parse_boot_and_emm_args
from ..util import get_devices
from . import AbstractEntityDisambiguator
from .ned_utils import is_banned, reverse_bisect_left
logger = logging.getLogger(__name__)
class BatchBootlegEntityDisambiguator(AbstractEntityDisambiguator):
'''
A wrapper for all functionalities needed from bootleg. It takes care of data preprocessing,
running examples through bootleg, and overriding examples features with the extracted ones
'''
def __init__(self, args):
super().__init__(args)
logger.info('Initializing Bootleg class')
### bootleg specific attribtues
self.model_dir = f'{self.args.database_dir}/{self.args.bootleg_model}'
self.config_path = f'{self.model_dir}/bootleg_config.yaml'
self.cand_map = f'{self.args.database_dir}/wiki_entity_data/entity_mappings/alias2qids.json'
self.entity_dir = f'{self.args.database_dir}/wiki_entity_data'
self.embed_dir = f'{self.args.database_dir}/wiki_entity_data'
with open(f'{self.args.database_dir}/wiki_entity_data/type_mappings/wiki/qid2typenames.json') as fin:
self.entityqid2typenames = ujson.load(fin)
###
self.ckpt_name = 'bootleg_wiki'
self.model_ckpt_path = os.path.join(self.model_dir, self.ckpt_name + '.pth')
self.fixed_overrides = [
# emmental configs
"--emmental.dataparallel",
'False',
"--emmental.log_path",
self.args.bootleg_output_dir,
"--emmental.use_exact_log_path",
'True',
"--emmental.model_path",
self.model_ckpt_path,
"--emmental.device",
str(getattr(self.args, 'bootleg_device', 0)),
# run configs
"--run_config.dataset_threads",
str(getattr(self.args, 'bootleg_dataset_threads', 1)),
"--run_config.dataloader_threads",
str(getattr(self.args, 'bootleg_dataloader_threads', 2)),
"--run_config.eval_batch_size",
str(getattr(self.args, 'bootleg_batch_size', 50)),
"--run_config.log_level",
'DEBUG',
# data configs
"--data_config.print_examples_prep",
'False',
"--data_config.entity_dir",
self.entity_dir,
"--data_config.entity_prep_dir",
"prep",
"--data_config.emb_dir",
self.embed_dir,
"--data_config.alias_cand_map",
'alias2qids.json',
"--data_config.word_embedding.cache_dir",
self.args.embeddings,
"--data_config.print_examples",
'False',
]
def create_config(self, overrides):
config_args = parse_boot_and_emm_args(self.config_path, overrides)
return config_args
def create_jsonl(self, input_path, examples, utterance_field):
# create jsonl file for examples
jsonl_input_path = input_path.rsplit('.', 1)[0] + '.jsonl'
with open(jsonl_input_path, 'w') as fout:
for ex in examples:
fout.write(ujson.dumps({"sentence": getattr(ex, utterance_field)}) + '\n')
def extract_mentions(self, input_path):
jsonl_input_path = input_path.rsplit('.', 1)[0] + '.jsonl'
jsonl_output_path = input_path.rsplit('.', 1)[0] + '_bootleg.jsonl'
logger.info('Extracting mentions...')
extract_mentions(
in_filepath=jsonl_input_path,
out_filepath=jsonl_output_path,
cand_map_file=self.cand_map,
min_alias_len=self.args.min_entity_len,
max_alias_len=self.args.max_entity_len,
num_workers=getattr(self.args, 'bootleg_extract_num_workers', 32),
verbose=False,
)
def disambiguate_mentions(self, config_args):
run_model(self.args.bootleg_dump_mode, config_args)
def post_process_bootleg_types(self, title):
types = None
title = title.lower()
for pair in self.wiki2normalized_type:
if fnmatch.fnmatch(title, pair[0]):
types = pair[1]
break
typeqids = None
if types is not None:
if isinstance(types, str):
typeqids = [self.type_vocab_to_typeqid[types]]
elif isinstance(types, (list, tuple)):
typeqids = [self.type_vocab_to_typeqid[type_] for type_ in types]
return typeqids
def collect_features_per_line(self, line, threshold):
tokenized = line['sentence'].split(' ')
tokens_type_ids = [[0] * self.max_features_size for _ in range(len(tokenized))]
tokens_type_probs = [[0] * self.max_features_size for _ in range(len(tokenized))]
tokens_qids = [[0] * self.max_features_size for _ in range(len(tokenized))]
for alias, all_qids, all_probs, span in zip(line['aliases'], line['cands'], line['cand_probs'], line['spans']):
# sort candidates based on bootleg's confidence scores (i.e. probabilities)
# this used to be in bootleg code but was removed in recent version
packed_list = zip(all_probs, all_qids)
packed_list_sorted = sorted(packed_list, key=lambda item: item[0], reverse=True)
all_probs, all_qids = list(zip(*packed_list_sorted))
# filter qids with probability lower than a threshold
idx = reverse_bisect_left(all_probs, threshold)
all_qids = all_qids[:idx]
all_probs = all_probs[:idx]
if len(all_qids) > self.args.max_qids_per_entity:
all_qids = all_qids[: self.args.max_qids_per_entity]
all_probs = all_probs[: self.args.max_qids_per_entity]
type_ids = []
type_probs = []
qids = []
if not is_banned(alias):
for qid, prob in zip(all_qids, all_probs):
# to map qids to unique ids we just need to remove the Q character as qids are distinct
qids.append(int(qid[1:]))
# get all types for a qid
all_typeqids = []
if qid in self.entityqid2typenames and self.entityqid2typenames[qid]:
# map entity qid to its type titles on wikidata ; then map titles to their wikidata qids
for typename in self.entityqid2typenames[qid]:
if typename in self.type_vocab_to_typeqid:
all_typeqids.append(self.type_vocab_to_typeqid[typename])
if len(all_typeqids):
count = 0
# go through all types
for typeqid in all_typeqids:
if typeqid in self.typeqid2id:
# map wikidata types to thingtalk types
if self.args.bootleg_post_process_types:
# map qid to title
title = self.typeqid_to_type_vocab[typeqid]
# process may return multiple types for a single type when it's ambiguous
typeqids = self.post_process_bootleg_types(title)
# attempt to normalize qids failed; just use the original type
if typeqids is None:
typeqids = [typeqid]
else:
typeqids = [typeqid]
for typeqid_ in typeqids:
if count >= self.args.max_types_per_qid:
break
type_id = self.typeqid2id[typeqid_]
if type_id in type_ids:
continue
type_ids.append(type_id)
type_probs.append(prob)
count += 1
padded_type_ids = self.pad_features(type_ids, self.max_features_size, 0)
padded_type_probs = self.pad_features(type_probs, self.max_features_size, 0)
padded_qids = self.pad_features(qids, self.max_features_size, -1)
tokens_type_ids[span[0] : span[1]] = [padded_type_ids] * (span[1] - span[0])
tokens_type_probs[span[0] : span[1]] = [padded_type_probs] * (span[1] - span[0])
tokens_qids[span[0] : span[1]] = [padded_qids] * (span[1] - span[0])
return tokens_type_ids, tokens_type_probs, tokens_qids
def process_examples(self, examples, split_path, utterance_field):
# extract features for each token in input sentence from bootleg outputs
all_token_type_ids, all_token_type_probs, all_token_qids = [], [], []
threshold = self.args.bootleg_prob_threshold
file_name = os.path.basename(split_path.rsplit('.', 1)[0])
with open(f'{self.args.bootleg_output_dir}/{file_name}_bootleg/{self.ckpt_name}/bootleg_labels.jsonl', 'r') as fin:
for i, line in enumerate(fin):
if i >= self.args.subsample:
break
line = ujson.loads(line)
tokens_type_ids, tokens_type_probs, tokens_qids = self.collect_features_per_line(line, threshold)
all_token_type_ids.append(tokens_type_ids)
all_token_type_probs.append(tokens_type_probs)
all_token_qids.append(tokens_qids)
all_token_type_ids = all_token_type_ids[: self.args.subsample]
all_token_type_probs = all_token_type_probs[: self.args.subsample]
all_token_qids = all_token_qids[: self.args.subsample]
self.replace_features_inplace(examples, all_token_type_ids, all_token_type_probs, all_token_qids, utterance_field)
def dump_entities_with_labels(self, examples, path, utterance_field):
input_file_dir = os.path.dirname(path)
input_file_name = os.path.basename(path.rsplit('.', 1)[0] + '_bootleg.jsonl')
data_overrides = ["--data_config.data_dir", input_file_dir, "--data_config.test_dataset.file", input_file_name]
# get config args
config_overrides = self.fixed_overrides
config_overrides.extend(data_overrides)
config_args = self.create_config(config_overrides)
# create jsonl files from input examples
# jsonl is the input format bootleg expects
self.create_jsonl(path, examples, utterance_field)
# extract mentions and mention spans in the sentence and write them to output jsonl files
self.extract_mentions(path)
# find the right entity candidate for each mention
self.disambiguate_mentions(config_args)
class ServingBootlegEntityDisambiguator(BatchBootlegEntityDisambiguator):
'''
BootlegAnnotator is a wrapper for bootleg's native annotator which takes care of bootleg instantiations and
extracting required features from examples on-the-fly
'''
def __init__(self, args):
super().__init__(args)
bootleg_config = self.create_config(self.fixed_overrides)
device = get_devices()[0] # server only runs on a single device
# instantiate the annotator class. we use annotator only in server mode.
# for training we use bootleg functions which preprocess and cache data using multiprocessing, and batching to speed up NED
self.annotator = Annotator(
config=bootleg_config,
device='cpu' if device.type == 'cpu' else 'cuda',
min_alias_len=args.min_entity_len,
max_alias_len=args.max_entity_len,
cand_map=self.cand_map,
threshold=args.bootleg_prob_threshold,
model_name=args.bootleg_model,
verbose=False,
)
# collect all outputs now; we will filter later
self.annotator.set_threshold(0.0)
def process_examples(self, examples, split_path, utterance_field):
with torch.no_grad():
bootleg_inputs = []
for ex in examples:
bootleg_inputs.append(getattr(ex, utterance_field))
bootleg_labels = self.annotator.label_mentions(bootleg_inputs)
keys = tuple(bootleg_labels.keys())
values = list(bootleg_labels.values())
values_unpacked = list(zip(*values))
bootleg_labels_unpacked = [dict(zip(keys, values)) for values in values_unpacked]
all_token_type_ids, all_token_type_probs, all_token_qids = [], [], []
for ex, label in zip(examples, bootleg_labels_unpacked):
line = {}
line['sentence'] = getattr(ex, utterance_field)
assert len(label) == 7
line['aliases'], line['spans'], line['cands'] = label['aliases'], label['spans'], label['cands']
line['cand_probs'] = list(map(lambda item: list(item), label['cand_probs']))
tokens_type_ids, tokens_type_probs, tokens_qids = self.collect_features_per_line(
line, self.args.bootleg_prob_threshold
)
all_token_type_ids.append(tokens_type_ids)
all_token_type_probs.append(tokens_type_probs)
all_token_qids.append(tokens_qids)
self.replace_features_inplace(examples, all_token_type_ids, all_token_type_probs, all_token_qids, utterance_field)
| 46.110119 | 131 | 0.628284 |
acfbd97cf5ef7411f39efbde98abe08a463157fc | 3,281 | py | Python | db.py | elviva404/Valorina | 9920aaa3156d63e33b7568cbc10078db39953356 | [
"MIT"
] | null | null | null | db.py | elviva404/Valorina | 9920aaa3156d63e33b7568cbc10078db39953356 | [
"MIT"
] | null | null | null | db.py | elviva404/Valorina | 9920aaa3156d63e33b7568cbc10078db39953356 | [
"MIT"
] | null | null | null | from bson.objectid import ObjectId
from pymongo import MongoClient
import os
from dotenv import load_dotenv
from cryptography.fernet import Fernet
load_dotenv()
MONGO = os.getenv('MONGO')
KEY = (os.getenv('KEY')).encode('utf-8')
ID = (os.getenv('ID'))
cluster = MongoClient(MONGO)
db = cluster["discord"]
def addUserDb(username,password,region):
if not checkUser(username,region):
collection = db[region]
password=encryptPass(password)
user={
"username":username,
"password":password
}
collection.insert_one(user)
return True
def encryptPass(password):
password=Fernet(KEY).encrypt(password.encode('utf-8'))
return password
def checkUser(username,region):
if getUser(username,region):
return True
else:
return False
def updatePass(username,password,region):
if checkUser(username,region):
password=encryptPass(password)
collection=db[region]
collection.update_one(
{'username':username},
{"$set":{'password':password}})
return True
def getUser(username,region):
collection=db[region]
user = collection.find_one({"username": username})
if user==None:
return False
user["password"]=(Fernet(KEY).decrypt(user["password"])).decode('utf-8')
return user
def addReminder(username,region,discord_id,weapon):
collection = db['reminders']
data={
"username":username,
"region":region,
"discord_id":discord_id,
"weapon":weapon,
}
collection.insert_one(data)
return True
def getReminders():
collection = db['reminders']
reminders = []
cursor = collection.find({})
for document in cursor:
reminders.append(document)
return reminders
def getDevReminders():
collection = db['dev_reminders']
reminders = []
cursor = collection.find({})
for document in cursor:
reminders.append(document)
return reminders
def getUserReminders(discord_id):
collection = db['reminders']
reminders = []
cursor = collection.find({"discord_id":discord_id})
for document in cursor:
reminders.append(document)
return reminders
def delReminder(username, region, discord_id, weapon):
collection = db['reminders']
res_find = collection.find_one({"username":username, "region": region, "discord_id": discord_id, "weapon": weapon})
print(res_find)
if(res_find):
res = collection.delete_one({"username":username, "region": region, "discord_id": discord_id, "weapon": weapon})
return True
else:
return False
def delUser(username,region):
try:
collection = db[region]
collection.delete_one({"username":username})
collection = db['reminders']
collection.delete_many({"username":username,"region":region})
return True
except:
return False
def updateServerCount(count):
collection = db['servers']
collection.update_one(
{'_id':ObjectId(ID)},
{"$set":{'server_count':count}})
def getServerCount():
collection = db['servers']
res = collection.find_one({'_id':ObjectId(ID)})
return res | 28.284483 | 120 | 0.639134 |
acfbd9e0f22834ade8f27b7238bb0690e61e3e7d | 254 | py | Python | day01/part1.py | ElliotFriend/aoc-2017 | 86f5d0438458c83b2af7ee09d427ac0572799ce6 | [
"MIT"
] | null | null | null | day01/part1.py | ElliotFriend/aoc-2017 | 86f5d0438458c83b2af7ee09d427ac0572799ce6 | [
"MIT"
] | null | null | null | day01/part1.py | ElliotFriend/aoc-2017 | 86f5d0438458c83b2af7ee09d427ac0572799ce6 | [
"MIT"
] | null | null | null | my_input = 1122
my_input_list = [int(d) for d in str(my_input)]
my_adding_list = []
for x in range(-1, len(my_input_list) - 1):
if my_input_list[x] == my_input_list[x + 1]:
my_adding_list.append(my_input_list[x])
print(sum(my_adding_list))
| 25.4 | 48 | 0.692913 |
acfbdb03601b4f4292f0ed536ab831d4b203c69f | 1,088 | py | Python | scripts/slack-dump.py | keotl/IM-poetry | 2a9281a6d3033057a7b8ed405ff7a7e3cede1870 | [
"MIT"
] | 2 | 2019-09-16T12:58:12.000Z | 2019-09-18T12:39:22.000Z | scripts/slack-dump.py | keotl/IM-poetry | 2a9281a6d3033057a7b8ed405ff7a7e3cede1870 | [
"MIT"
] | 3 | 2019-09-15T23:16:10.000Z | 2019-09-16T22:08:30.000Z | scripts/slack-dump.py | keotl/IM-poetry | 2a9281a6d3033057a7b8ed405ff7a7e3cede1870 | [
"MIT"
] | null | null | null | import json
import requests
# REPLACE THESE CONSTANTS WITH YOUR OWN
TOKEN = "<slack_personal_token>"
CHANNEL = "<channel_id>"
def get_messages(channel: str):
has_more = True
raw_messages = []
while has_more:
if (len(raw_messages) > 0):
res = requests.get(
f"https://slack.com/api/channels.history?token={TOKEN}&channel={channel}&count=1000&latest={raw_messages[-1]['ts']}")
else:
res = requests.get(f"https://slack.com/api/channels.history?token={TOKEN}&channel={channel}&count=1000")
raw_messages.extend(res.json()["messages"])
has_more = res.json()["has_more"]
return raw_messages
def get_users():
return requests.get(f"https://slack.com/api/users.list?token={TOKEN}").json()["members"]
def get_emojis():
return requests.get(f"https://slack.com/api/emoji.list?token={TOKEN}").json()["emoji"]
if __name__ == '__main__':
print(json.dumps({
"type": "SLACK",
"messages": get_messages(CHANNEL),
"users": get_users(),
"emojis": get_emojis()
}))
| 27.2 | 133 | 0.628676 |
acfbdb0a12d72a7ce20cf903064b39b1528d2ecc | 795 | py | Python | hciparse/android/phone.py | fotonick/hciparse | cafe64c57c74bbafe8a7c411f1d6f9d4d9c5c7e1 | [
"MIT"
] | null | null | null | hciparse/android/phone.py | fotonick/hciparse | cafe64c57c74bbafe8a7c411f1d6f9d4d9c5c7e1 | [
"MIT"
] | null | null | null | hciparse/android/phone.py | fotonick/hciparse | cafe64c57c74bbafe8a7c411f1d6f9d4d9c5c7e1 | [
"MIT"
] | null | null | null |
from .executor import Executor
class Phone(object):
def __init__(self, serial=None):
self.serial = serial
def shell(self, cmd):
cmd = "adb shell " + cmd
ret, out = Executor(cmd).execute()
if ret != 0:
raise ValueError("Could not execute adb shell " + cmd)
return out
def pull(self, src, dst):
cmd = "adb pull " + src + " " + dst
return Executor(cmd).execute()
def push(self, src, dst):
cmd = "adb push " + src + " " + dst
return Executor(cmd).execute()
def ls(self, path):
out = self.shell("ls " + path)
return out.splitlines()
def start_app(self, pkg_name):
cmd = 'monkey -p ' + pkg_name + ' -c android.intent.category.LAUNCHER 1'
self.shell(cmd) | 26.5 | 80 | 0.554717 |
acfbdbdcdb03514276ffa0012104020d600a98c4 | 1,088 | py | Python | libp2p/protocol_muxer/multiselect_muxer_interface.py | swedneck/py-libp2p | 85457fa308100ed0e5802849bf3918ffae486239 | [
"Apache-2.0",
"MIT"
] | null | null | null | libp2p/protocol_muxer/multiselect_muxer_interface.py | swedneck/py-libp2p | 85457fa308100ed0e5802849bf3918ffae486239 | [
"Apache-2.0",
"MIT"
] | 1 | 2019-10-15T15:28:00.000Z | 2019-10-15T15:28:00.000Z | libp2p/protocol_muxer/multiselect_muxer_interface.py | swedneck/py-libp2p | 85457fa308100ed0e5802849bf3918ffae486239 | [
"Apache-2.0",
"MIT"
] | 2 | 2019-10-15T13:08:19.000Z | 2019-10-15T13:41:49.000Z | from abc import ABC, abstractmethod
from typing import Dict, Tuple
from libp2p.typing import StreamHandlerFn, TProtocol
from .multiselect_communicator_interface import IMultiselectCommunicator
class IMultiselectMuxer(ABC):
"""
Multiselect module that is responsible for responding to
a multiselect client and deciding on
a specific protocol and handler pair to use for communication
"""
handlers: Dict[TProtocol, StreamHandlerFn]
@abstractmethod
def add_handler(self, protocol: TProtocol, handler: StreamHandlerFn) -> None:
"""
Store the handler with the given protocol
:param protocol: protocol name
:param handler: handler function
"""
@abstractmethod
async def negotiate(
self, communicator: IMultiselectCommunicator
) -> Tuple[TProtocol, StreamHandlerFn]:
"""
Negotiate performs protocol selection
:param stream: stream to negotiate on
:return: selected protocol name, handler function
:raise Exception: negotiation failed exception
"""
| 30.222222 | 81 | 0.706801 |
acfbdccd7d29f0fc2585af4dbd6c5549aa68a7ed | 3,070 | py | Python | main_coin.py | songaal/rltrader | 4aac8085dda1a58fbf30a313f2a4608398c971a3 | [
"MIT"
] | 2 | 2020-06-13T07:18:10.000Z | 2020-11-03T03:46:40.000Z | main_coin.py | songaal/rltrader | 4aac8085dda1a58fbf30a313f2a4608398c971a3 | [
"MIT"
] | null | null | null | main_coin.py | songaal/rltrader | 4aac8085dda1a58fbf30a313f2a4608398c971a3 | [
"MIT"
] | 1 | 2020-05-16T08:41:29.000Z | 2020-05-16T08:41:29.000Z | import logging
import os
import settings
import data_manager_coin
import pandas
from policy_learner_coin import PolicyLearner
import datetime
# SELECT first("open") AS "first_open", max("high") AS "max_high", min("low") AS "min_low", last("close") AS "last_close", sum("volume") AS "sum_volume" FROM "coin_v2"."autogen"."binance_btc_usdt" GROUP BY time(1d) FILL(null)
if __name__ == '__main__':
symbol = 'ADABTC'
t_start = '2017-12-01'
t_end = '2018-6-30'
epoches = 1000
balance = 100000
# 로그 기록
log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % symbol)
timestr = settings.get_time_str()
if not os.path.exists('logs/%s' % symbol):
os.makedirs('logs/%s' % symbol)
file_handler = logging.FileHandler(filename=os.path.join(
log_dir, "%s_%s.log" % (symbol, timestr)), encoding='utf-8')
stream_handler = logging.StreamHandler()
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(format="%(message)s",
handlers=[file_handler, stream_handler], level=logging.DEBUG)
# 주식 데이터 준비
chart_data = data_manager_coin.load_chart_data(
os.path.join(settings.BASE_DIR,
'data/chart_data/{}.csv'.format(symbol)))
chart_data['date'] = pandas.to_datetime(chart_data['date'])
prep_data = data_manager_coin.preprocess(chart_data)
training_data = data_manager_coin.build_training_data(prep_data)
# 기간 필터링
training_data = training_data[(training_data['date'] >= t_start) &
(training_data['date'] <= t_end)]
training_data = training_data.dropna()
# 차트 데이터 분리
features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_data = training_data[features_chart_data]
# 학습 데이터 분리
features_training_data = [
'rsi14', 'stoch_9_6_slowk', 'stoch_9_6_slowd', 'stoch_14_slowk', 'stoch_14_slowd', 'macd', 'macdsignal', 'adx', 'willr', 'cci', 'ultosc', 'roc',
'close_ma5', 'close_ma10', 'close_ma20', 'close_ma50', 'close_ma100', 'close_ma200',
'volume_ma5', 'volume_ma10', 'volume_ma20', 'volume_ma50', 'volume_ma100', 'volume_ma200'
]
training_data = training_data[features_training_data]
training_start = datetime.datetime.now()
# 강화학습 시작
policy_learner = PolicyLearner(
symbol=symbol, chart_data=chart_data, training_data=training_data,
min_trading_unit=1, max_trading_unit=2, delayed_reward_threshold=.2, lr=.001)
policy_learner.fit(balance=balance, num_epoches=epoches, discount_factor=0, start_epsilon=.5)
training_end = datetime.datetime.now()
delta = training_end - training_start
logging.info("학습 소요시간: %s", delta)
logging.info("모델파일: models/model_%s.h5", timestr)
# 정책 신경망을 파일로 저장
model_dir = os.path.join(settings.BASE_DIR, 'models/')
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
policy_learner.policy_network.save_model(model_path)
| 42.054795 | 225 | 0.681107 |
acfbdcf42723abe3fbf42aa671617046c1d39f46 | 766 | py | Python | exercises/exc_05_02.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 7 | 2021-04-28T13:12:16.000Z | 2022-01-15T00:21:11.000Z | exercises/exc_05_02.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 2 | 2021-04-02T18:42:55.000Z | 2021-05-20T08:43:06.000Z | exercises/exc_05_02.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 2 | 2021-07-04T22:57:29.000Z | 2021-07-29T19:28:43.000Z | # string with outcomes
outcomes = ['malignant', 'malignant', 'benign', 'malignant',
'malignant', 'benign', 'benign', 'benign']
# saving the string to txt file
with open('outcome.txt', mode='w') as file:
for val in outcomes: # write each value from the list at a new line
file.write(val + "\n") # adding "\n" creates a new line in a file
# import the file as a string
___ ___('___', mode=___) as ___:
outcomes_str = ___.___()
# import the file as a list
with open('outcome.txt', mode='r') as file:
outcomes_list = file.readlines()
print("Imported string:")
print(outcomes_str)
print("Imported list:")
print(outcomes_list)
outcomes_list = list(___) # clean the values in a list
print("\nFixed list:")
print(outcomes_list)
| 28.37037 | 77 | 0.669713 |
acfbde3e8618a48eee03f842129765851b306709 | 86 | py | Python | awsprocesscreds/compat.py | jcooter/awsprocesscreds | 24f32fad336e376dc259b29edaaba9f9637fa7cc | [
"Apache-2.0"
] | 136 | 2017-11-29T19:46:40.000Z | 2022-02-13T08:03:00.000Z | awsprocesscreds/compat.py | jcooter/awsprocesscreds | 24f32fad336e376dc259b29edaaba9f9637fa7cc | [
"Apache-2.0"
] | 39 | 2017-11-29T17:32:09.000Z | 2021-11-24T05:41:56.000Z | awsprocesscreds/compat.py | jcooter/awsprocesscreds | 24f32fad336e376dc259b29edaaba9f9637fa7cc | [
"Apache-2.0"
] | 57 | 2017-11-29T16:43:54.000Z | 2022-03-25T15:41:02.000Z | import six
if six.PY3:
from html import escape
else:
from cgi import escape
| 10.75 | 27 | 0.697674 |
acfbde719048e9155b444ef39086441fe376872d | 2,877 | py | Python | zerver/webhooks/opsgenie/view.py | bensteinberg/zulip | 4911bc3c4d6220e15f8c82cf95d4780797b4de8d | [
"Apache-2.0"
] | 2 | 2021-02-02T01:29:32.000Z | 2021-02-02T01:30:51.000Z | zerver/webhooks/opsgenie/view.py | bensteinberg/zulip | 4911bc3c4d6220e15f8c82cf95d4780797b4de8d | [
"Apache-2.0"
] | 1 | 2016-07-16T16:54:33.000Z | 2016-07-16T16:54:33.000Z | zerver/webhooks/opsgenie/view.py | bensteinberg/zulip | 4911bc3c4d6220e15f8c82cf95d4780797b4de8d | [
"Apache-2.0"
] | 1 | 2020-12-03T17:08:44.000Z | 2020-12-03T17:08:44.000Z | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view('OpsGenie')
@has_request_variables
def api_opsgenie_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
# construct the body of the message
info = {
"additional_info": '',
"alert_type": payload['action'],
"alert_id": payload['alert']['alertId'],
"integration_name": payload['integrationName'],
"tags": ', '.join('`' + tag + '`' for tag in payload['alert'].get('tags', [])),
}
topic = info['integration_name']
bullet_template = "* **{key}**: {value}\n"
if 'note' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Note',
value=payload['alert']['note'],
)
if 'recipient' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Recipient',
value=payload['alert']['recipient'],
)
if 'addedTags' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Tags added',
value=payload['alert']['addedTags'],
)
if 'team' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Team added',
value=payload['alert']['team'],
)
if 'owner' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Assigned owner',
value=payload['alert']['owner'],
)
if 'escalationName' in payload:
info['additional_info'] += bullet_template.format(
key='Escalation',
value=payload['escalationName'],
)
if 'removedTags' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Tags removed',
value=payload['alert']['removedTags'],
)
if 'message' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Message',
value=payload['alert']['message'],
)
if info['tags']:
info['additional_info'] += bullet_template.format(
key='Tags',
value=info['tags'],
)
body_template = """
[OpsGenie alert for {integration_name}](https://app.opsgenie.com/alert/V2#/show/{alert_id}):
* **Type**: {alert_type}
{additional_info}
""".strip()
body = body_template.format(**info)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| 33.847059 | 92 | 0.605492 |
acfbde78dd199f5f2e999e50aa5a954d774356cf | 1,018 | py | Python | 33/portscanner_mp_queue.py | tonybaloney/cpython-book-samples | d61d252d63461b114c3c02329a88a74dc8c51956 | [
"CC0-1.0"
] | 160 | 2020-03-16T06:34:09.000Z | 2022-03-20T19:33:14.000Z | 33/portscanner_mp_queue.py | tonybaloney/cpython-book-samples | d61d252d63461b114c3c02329a88a74dc8c51956 | [
"CC0-1.0"
] | 5 | 2020-11-26T12:02:14.000Z | 2021-06-01T06:24:15.000Z | 33/portscanner_mp_queue.py | tonybaloney/cpython-book-samples | d61d252d63461b114c3c02329a88a74dc8c51956 | [
"CC0-1.0"
] | 43 | 2020-05-31T04:48:28.000Z | 2022-03-08T17:22:45.000Z | import multiprocessing as mp
import time
import socket
timeout = 1
def check_port(host: str, port: int, results: mp.Queue):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
if result == 0:
results.put(port)
sock.close()
if __name__ == '__main__':
start = time.time()
processes = []
scan_range = range(80, 100)
host = "localhost" # replace with a host you own
mp.set_start_method('spawn')
pool_manager = mp.Manager()
with mp.Pool(len(scan_range)) as pool:
outputs = pool_manager.Queue()
for port in scan_range:
processes.append(pool.apply_async(check_port,
(host, port, outputs)))
for process in processes:
process.get()
while not outputs.empty():
print("Port {0} is open".format(outputs.get()))
print("Completed scan in {0} seconds".format(time.time() - start)) | 30.848485 | 74 | 0.608055 |
acfbdeb86296cf2828dad9f1c3adac81bc9cfb34 | 538 | py | Python | tests/components/conftest.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 6 | 2016-11-25T06:36:27.000Z | 2021-11-16T11:20:23.000Z | tests/components/conftest.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | tests/components/conftest.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 2 | 2019-08-04T13:39:43.000Z | 2020-02-07T23:01:23.000Z | """Fixtures for component testing."""
import pytest
from homeassistant.components import zeroconf
from tests.async_mock import patch
zeroconf.orig_install_multiple_zeroconf_catcher = (
zeroconf.install_multiple_zeroconf_catcher
)
zeroconf.install_multiple_zeroconf_catcher = lambda zc: None
@pytest.fixture(autouse=True)
def prevent_io():
"""Fixture to prevent certain I/O from happening."""
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[],
):
yield
| 24.454545 | 70 | 0.754647 |
acfbe0b8fde8219a63f23d93b5b1571a297dc7df | 3,288 | py | Python | grzegorz_clients/api.py | Programvareverkstedet/grzegroz_clients | 70a707aa97b8b544185c51d34d62dc3351641020 | [
"BSD-3-Clause"
] | 1 | 2019-11-25T21:14:00.000Z | 2019-11-25T21:14:00.000Z | grzegorz_clients/api.py | Programvareverkstedet/grzegorz_clients | 70a707aa97b8b544185c51d34d62dc3351641020 | [
"BSD-3-Clause"
] | null | null | null | grzegorz_clients/api.py | Programvareverkstedet/grzegorz_clients | 70a707aa97b8b544185c51d34d62dc3351641020 | [
"BSD-3-Clause"
] | null | null | null | import requests, json
from urllib.parse import urlencode
from functools import wraps
# This must be set to be able to use it on remote hosts
BASE_URL = "http://localhost:8080/api"
def set_endpoint(base_url:str):
global BASE_URL
BASE_URL = base_url
# Exceptions:
class APIError(Exception): pass
# decorator:
# (TODO): Add logging
def request_delete(func):
@wraps(func)
def new_func(*args, **kwargs):
url, data = func(*args, **kwargs)
if type(data) is dict: data = json.dumps(data)
response = requests.delete(f"{BASE_URL}/{url}", data=data)
data = json.loads(response.text)
if "error" not in data or data["error"] != False:
print(data)
raise APIError(data["error"])
return data["success"]
return new_func
def request_post(func):
@wraps(func)
def new_func(*args, **kwargs):
url, data = func(*args, **kwargs)
if type(data) is dict: data = json.dumps(data)
response = requests.post(f"{BASE_URL}/{url}", data=data)
data = json.loads(response.text)
if "error" not in data or data["error"] != False:
print(data)
raise APIError(data["error"])
return data["success"]
return new_func
def request_get(func):
@wraps(func)
def new_func(*args, **kwargs):
url = func(*args, **kwargs)
response = requests.get(f"{BASE_URL}/{url}")
data = json.loads(response.text)
if "error" not in data or data["error"] != False:
raise APIError(data["errortext"])
return data["value"]
return new_func
# methods:
@request_post
def load_path(path:str, data: dict = None):
args = urlencode(locals())
return f"load?{args}", data
@request_get
def is_playing():
return "play"
@request_post
def set_playing(play: bool):
args = urlencode(locals())
return f"play?{args}", None
@request_get
def get_volume():
return "volume"
@request_post
def set_volume(volume: int): # between 0 and 100 (you may also exceed 100)
args = urlencode(locals())
return f"volume?{args}", None
@request_get
def get_playlist():
return "playlist"
@request_post
def playlist_next():
return "playlist/next", None
@request_post
def playlist_goto(index: int):
args = urlencode(locals())
return f"playlist/goto?{args}", None
@request_post
def playlist_previous():
return "playlist/previous", None
@request_post
def playlist_shuffle():
return "playlist/shuffle", None
@request_delete
def playlist_clear():
return "playlist", None
@request_delete
def playlist_remove(index: int):
args = urlencode(locals())
return f"playlist?{args}", None
@request_post
def playlist_move(index1: int, index2: int):
args = urlencode(locals())
return f"playlist/move?{args}", None
@request_get
def get_playlist_looping():
return "playlist/loop"
@request_post
def playlist_set_looping(looping: bool):
return f"playlist/loop?loop={str(bool(looping)).lower()}", None
@request_get
def get_playback_pos():
return "time"
@request_post
def seek_absolute(pos: float):
args = urlencode(locals())
return f"time?{args}", None
@request_post
def seek_percent(percent: int):
args = urlencode(locals())
return f"time?{args}", None
| 24.721805 | 74 | 0.66028 |
acfbe0f4e7c9f061537a4eabae51997f8c407fc3 | 9,850 | py | Python | testhub/testlib/nni_job/mnist-tfv1/mnist_npu.py | banrieen/PerfBoard | 855c7249de9075a8bc33149938782245d580d558 | [
"MIT"
] | 146 | 2021-04-15T03:32:12.000Z | 2021-09-10T06:06:42.000Z | testhub/testlib/nni_job/mnist-tfv1/mnist_npu.py | banrieen/PerfBoard | 855c7249de9075a8bc33149938782245d580d558 | [
"MIT"
] | 3 | 2021-05-26T05:20:26.000Z | 2021-08-05T02:07:14.000Z | testhub/testlib/nni_job/mnist-tfv1/mnist_npu.py | banrieen/MachineDevil | b9d8d70bc7e8d0113a9176aa16ec0e7fbda523ca | [
"MIT"
] | 29 | 2021-04-03T06:46:43.000Z | 2021-07-11T08:35:19.000Z | """A deep MNIST classifier using convolutional layers."""
import argparse
import logging
import math
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from npu_bridge.estimator import npu_ops
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
import nni
FLAGS = None
logger = logging.getLogger('mnist_AutoML')
class MnistNetwork(object):
'''
MnistNetwork is for initializing and building basic network for mnist.
'''
def __init__(self,
channel_1_num,
channel_2_num,
conv_size,
hidden_size,
pool_size,
learning_rate,
x_dim=784,
y_dim=10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
self.conv_size = conv_size
self.hidden_size = hidden_size
self.pool_size = pool_size
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x')
self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.train_step = None
self.accuracy = None
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size,
self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable(
[last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(
h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def download_mnist_retry(data_dir, max_num_retries=20):
"""Try to download mnist dataset and avoid errors"""
for _ in range(max_num_retries):
try:
return input_data.read_data_sets(data_dir, one_hot=True)
except tf.errors.AlreadyExistsError:
time.sleep(1)
raise Exception("Failed to download MNIST.")
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = download_mnist_retry(params['data_dir'])
print('Mnist download data done.')
logger.debug('Mnist download data done.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'],
channel_2_num=params['channel_2_num'],
conv_size=params['conv_size'],
hidden_size=params['hidden_size'],
pool_size=params['pool_size'],
learning_rate=params['learning_rate'])
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
#npu config
config = tf.ConfigProto()
custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["use_off_line"].b = True
config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
test_acc = 0.0
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for i in range(params['batch_num']):
batch = mnist.train.next_batch(params['batch_size'])
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: 1 - params['dropout_rate']}
)
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
nni.report_intermediate_result(test_acc)
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
nni.report_final_result(test_acc)
logger.debug('Final result is %g', test_acc)
logger.debug('Send final result done.')
def get_params():
''' Get parameters from command line '''
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='/data/mnist-tfv1', help="data directory")
parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate")
parser.add_argument("--channel_1_num", type=int, default=32)
parser.add_argument("--channel_2_num", type=int, default=64)
parser.add_argument("--conv_size", type=int, default=5)
parser.add_argument("--pool_size", type=int, default=2)
parser.add_argument("--hidden_size", type=int, default=1024)
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument("--batch_num", type=int, default=2000)
parser.add_argument("--batch_size", type=int, default=32)
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
try:
# get parameters form tuner
tuner_params = nni.get_next_parameter()
logger.debug(tuner_params)
params = vars(get_params())
params.update(tuner_params)
main(params)
except Exception as exception:
logger.exception(exception)
raise
| 39.717742 | 104 | 0.620914 |
acfbe2f61a8d19e29de16fc24324d77c1e4301aa | 6,834 | py | Python | espnet2/enh/layers/complex_utils.py | YoshikiMas/espnet | 793b999a50af484a5eaf6227ef7556b48514ef15 | [
"Apache-2.0"
] | 1 | 2022-03-25T14:41:05.000Z | 2022-03-25T14:41:05.000Z | espnet2/enh/layers/complex_utils.py | YoshikiMas/espnet | 793b999a50af484a5eaf6227ef7556b48514ef15 | [
"Apache-2.0"
] | 2 | 2019-04-23T04:43:33.000Z | 2019-05-13T13:06:52.000Z | espnet2/enh/layers/complex_utils.py | YoshikiMas/espnet | 793b999a50af484a5eaf6227ef7556b48514ef15 | [
"Apache-2.0"
] | null | null | null | """Beamformer module."""
from distutils.version import LooseVersion
from typing import Sequence
from typing import Tuple
from typing import Union
import torch
from torch_complex import functional as FC
from torch_complex.tensor import ComplexTensor
EPS = torch.finfo(torch.double).eps
is_torch_1_8_plus = LooseVersion(torch.__version__) >= LooseVersion("1.8.0")
is_torch_1_9_plus = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
def new_complex_like(
ref: Union[torch.Tensor, ComplexTensor],
real_imag: Tuple[torch.Tensor, torch.Tensor],
):
if isinstance(ref, ComplexTensor):
return ComplexTensor(*real_imag)
elif is_torch_complex_tensor(ref):
return torch.complex(*real_imag)
else:
raise ValueError(
"Please update your PyTorch version to 1.9+ for complex support."
)
def is_torch_complex_tensor(c):
return (
not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c)
)
def is_complex(c):
return isinstance(c, ComplexTensor) or is_torch_complex_tensor(c)
def to_double(c):
if not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c):
return c.to(dtype=torch.complex128)
else:
return c.double()
def to_float(c):
if not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c):
return c.to(dtype=torch.complex64)
else:
return c.float()
def cat(seq: Sequence[Union[ComplexTensor, torch.Tensor]], *args, **kwargs):
if not isinstance(seq, (list, tuple)):
raise TypeError(
"cat(): argument 'tensors' (position 1) must be tuple of Tensors, "
"not Tensor"
)
if isinstance(seq[0], ComplexTensor):
return FC.cat(seq, *args, **kwargs)
else:
return torch.cat(seq, *args, **kwargs)
def complex_norm(
c: Union[torch.Tensor, ComplexTensor], dim=-1, keepdim=False
) -> torch.Tensor:
if not is_complex(c):
raise TypeError("Input is not a complex tensor.")
if is_torch_complex_tensor(c):
return torch.norm(c, dim=dim, keepdim=keepdim)
else:
if dim is None:
return torch.sqrt((c.real**2 + c.imag**2).sum() + EPS)
else:
return torch.sqrt(
(c.real**2 + c.imag**2).sum(dim=dim, keepdim=keepdim) + EPS
)
def einsum(equation, *operands):
# NOTE: Do not mix ComplexTensor and torch.complex in the input!
# NOTE (wangyou): Until PyTorch 1.9.0, torch.einsum does not support
# mixed input with complex and real tensors.
if len(operands) == 1:
if isinstance(operands[0], (tuple, list)):
operands = operands[0]
complex_module = FC if isinstance(operands[0], ComplexTensor) else torch
return complex_module.einsum(equation, *operands)
elif len(operands) != 2:
op0 = operands[0]
same_type = all(op.dtype == op0.dtype for op in operands[1:])
if same_type:
_einsum = FC.einsum if isinstance(op0, ComplexTensor) else torch.einsum
return _einsum(equation, *operands)
else:
raise ValueError("0 or More than 2 operands are not supported.")
a, b = operands
if isinstance(a, ComplexTensor) or isinstance(b, ComplexTensor):
return FC.einsum(equation, a, b)
elif is_torch_1_9_plus and (torch.is_complex(a) or torch.is_complex(b)):
if not torch.is_complex(a):
o_real = torch.einsum(equation, a, b.real)
o_imag = torch.einsum(equation, a, b.imag)
return torch.complex(o_real, o_imag)
elif not torch.is_complex(b):
o_real = torch.einsum(equation, a.real, b)
o_imag = torch.einsum(equation, a.imag, b)
return torch.complex(o_real, o_imag)
else:
return torch.einsum(equation, a, b)
else:
return torch.einsum(equation, a, b)
def inverse(
c: Union[torch.Tensor, ComplexTensor]
) -> Union[torch.Tensor, ComplexTensor]:
if isinstance(c, ComplexTensor):
return c.inverse2()
else:
return c.inverse()
def matmul(
a: Union[torch.Tensor, ComplexTensor], b: Union[torch.Tensor, ComplexTensor]
) -> Union[torch.Tensor, ComplexTensor]:
# NOTE: Do not mix ComplexTensor and torch.complex in the input!
# NOTE (wangyou): Until PyTorch 1.9.0, torch.matmul does not support
# multiplication between complex and real tensors.
if isinstance(a, ComplexTensor) or isinstance(b, ComplexTensor):
return FC.matmul(a, b)
elif is_torch_1_9_plus and (torch.is_complex(a) or torch.is_complex(b)):
if not torch.is_complex(a):
o_real = torch.matmul(a, b.real)
o_imag = torch.matmul(a, b.imag)
return torch.complex(o_real, o_imag)
elif not torch.is_complex(b):
o_real = torch.matmul(a.real, b)
o_imag = torch.matmul(a.imag, b)
return torch.complex(o_real, o_imag)
else:
return torch.matmul(a, b)
else:
return torch.matmul(a, b)
def trace(a: Union[torch.Tensor, ComplexTensor]):
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
return FC.trace(a)
def reverse(a: Union[torch.Tensor, ComplexTensor], dim=0):
if isinstance(a, ComplexTensor):
return FC.reverse(a, dim=dim)
else:
return torch.flip(a, dims=(dim,))
def solve(b: Union[torch.Tensor, ComplexTensor], a: Union[torch.Tensor, ComplexTensor]):
"""Solve the linear equation ax = b."""
# NOTE: Do not mix ComplexTensor and torch.complex in the input!
# NOTE (wangyou): Until PyTorch 1.9.0, torch.solve does not support
# mixed input with complex and real tensors.
if isinstance(a, ComplexTensor) or isinstance(b, ComplexTensor):
if isinstance(a, ComplexTensor) and isinstance(b, ComplexTensor):
return FC.solve(b, a, return_LU=False)
else:
return matmul(inverse(a), b)
elif is_torch_1_9_plus and (torch.is_complex(a) or torch.is_complex(b)):
if torch.is_complex(a) and torch.is_complex(b):
return torch.linalg.solve(a, b)
else:
return matmul(inverse(a), b)
else:
if is_torch_1_8_plus:
return torch.linalg.solve(a, b)
else:
return torch.solve(b, a)[0]
def stack(seq: Sequence[Union[ComplexTensor, torch.Tensor]], *args, **kwargs):
if not isinstance(seq, (list, tuple)):
raise TypeError(
"stack(): argument 'tensors' (position 1) must be tuple of Tensors, "
"not Tensor"
)
if isinstance(seq[0], ComplexTensor):
return FC.stack(seq, *args, **kwargs)
else:
return torch.stack(seq, *args, **kwargs)
| 35.046154 | 88 | 0.643254 |
acfbe31403a256cbcf7bb0e868e7c10cefef6cc7 | 1,395 | py | Python | Array/Contains Duplicate III.py | shua2018ti/Google | 3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b | [
"MIT"
] | 87 | 2015-07-15T20:41:09.000Z | 2022-03-08T13:55:38.000Z | Array/Contains Duplicate III.py | shua2018ti/Google | 3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b | [
"MIT"
] | 59 | 2015-03-19T22:26:41.000Z | 2015-07-25T17:58:08.000Z | Array/Contains Duplicate III.py | shua2018ti/Google | 3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b | [
"MIT"
] | 45 | 2015-07-15T20:41:12.000Z | 2022-02-01T20:18:07.000Z | '''
Given an array of integers, find out whether there are two distinct indices i and j in the array such that the difference between
nums[i] and nums[j] is at most t and the difference between i and j is at most k.
'''
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @param {integer} t
# @return {boolean}
def containsNearbyAlmostDuplicate(self, nums, k, t):
if k <= 0 or t < 0: return False # 这里check k <= 0, t < 0: [0], 0, 0 return False
idict = collections.OrderedDict() # 用 ordered dict to track the order of the key
for num in nums:
bucket = num if t == 0 else num // t # 这里要考虑到 t == 0 的情况
for m in (idict.get(bucket-1), idict.get(bucket), idict.get(bucket+1)):
if m != None and abs(num - m) <= t: # here we could not use "if m ", since if m == 0: it will also return False!
return True
if len(idict) == k: # 这里先check长度,再将新的num存入idict,因为倘若新的元素满足条件,在上面的check中已经check过.
idict.popitem(False)
idict[bucket] = num
return False
# testcase:
# Input: [3,6,0,2], 2, 2
# Output: false
# Expected: true
# https://leetcode.com/discuss/38176/python-ordereddict
# https://leetcode.com/discuss/38206/ac-solution-in-java-using-o-n-bucket-with-explanation
| 38.75 | 130 | 0.58638 |
acfbe425a0b9e68f85284f59e4ff8616d889ebdf | 1,501 | py | Python | rubix_admin/command_line.py | qubole/rubix-admin | 0419d0cd80c72debf7d1c113cc6c377f4f4739dd | [
"Apache-2.0"
] | 5 | 2017-12-04T08:48:41.000Z | 2020-06-02T06:57:43.000Z | rubix_admin/command_line.py | qubole/rubix-admin | 0419d0cd80c72debf7d1c113cc6c377f4f4739dd | [
"Apache-2.0"
] | 13 | 2017-08-28T12:15:54.000Z | 2020-08-18T22:58:29.000Z | rubix_admin/command_line.py | qubole/rubix-admin | 0419d0cd80c72debf7d1c113cc6c377f4f4739dd | [
"Apache-2.0"
] | 4 | 2017-10-04T22:43:46.000Z | 2019-01-09T23:44:20.000Z | #!/bin/env python
import os
import sys
import traceback
import logging
from parsers import setup_parsers
from rubix_admin.admin_config import AdminConfig
def main():
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(module)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# I am using this slightly complicated trick to pass config in the constructor of
# other packages. Better way to do this ?
config_parser, argparser = setup_parsers()
config_args, remaining_argv = config_parser.parse_known_args()
config = AdminConfig.load_config(config_args)
print config
args, remaining_argv = argparser.parse_known_args(remaining_argv)
args.remaining_argv = remaining_argv
if args.debug:
ch.setLevel(logging.DEBUG)
root.setLevel(logging.DEBUG)
logging.debug("Debug is ON!")
if args.log_file is not None:
fh = logging.FileHandler(args.log_file, mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
root.setLevel(logging.DEBUG)
root.addHandler(fh)
try:
args.config = config
args.func(args)
finally:
logging.debug("Cleaning up")
if __name__ == '__main__':
try:
sys.exit(main())
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(3)
| 26.333333 | 91 | 0.681546 |
acfbe435ad5d09f692c2371c5bbe07936864f4e5 | 6,666 | py | Python | activeClassifier/visualisation/visualise_ActCl.py | dHonerkamp/ActiveClassifier | 052675277153594db64261cd56699a057e633de2 | [
"Apache-2.0"
] | null | null | null | activeClassifier/visualisation/visualise_ActCl.py | dHonerkamp/ActiveClassifier | 052675277153594db64261cd56699a057e633de2 | [
"Apache-2.0"
] | null | null | null | activeClassifier/visualisation/visualise_ActCl.py | dHonerkamp/ActiveClassifier | 052675277153594db64261cd56699a057e633de2 | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from activeClassifier.visualisation.base import Visualiser
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
class Visualization_ActCl(Visualiser):
def __init__(self, model, FLAGS):
super().__init__(model, FLAGS)
self.fetch = {'step' : model.global_step,
'epoch' : model.epoch_num,
'phase' : model.phase,
'x' : model.x_MC,
'y' : model.y_MC,
'locs' : model.actions,
'gl_composed' : model.glimpses_composed,
'clf' : model.classification,
'decisions' : model.decisions,
'state_believes' : model.state_believes,
'G' : model.G,
'glimpse_nll' : model.glimpse_nlls_posterior,
'glimpse' : model.obs,
'glimpse_reconstr': model.glimpse_reconstr,
}
if self.visualisation_level > 0:
folders = ['glimpses', 'reconstr']
if self.visualisation_level > 1:
if (self.planner != 'RL'):
folders.append('planning')
self.fetch.update({'exp_exp_obs' : model.exp_exp_obs,
'exp_obs': model.exp_obs,
'H_exp_exp_obs' : model.H_exp_exp_obs,
'exp_H' : model.exp_H,
'potential_actions' : model.potential_actions,})
for f in folders:
os.makedirs(os.path.join(FLAGS.path, f), exist_ok=True)
def visualise(self, sess, feed, suffix=None, nr_obs_overview=8, nr_obs_reconstr=5):
if self.visualisation_level > 0:
d = self._eval_feed(sess, feed)
prefix = 'epoch{}_phase{}'.format(d['epoch'], d['phase'])
if suffix:
prefix += '_' + suffix
self.plot_overview(d, prefix, nr_obs_overview)
self.plot_reconstr(d, prefix, nr_obs_reconstr)
if self.visualisation_level > 1:
if (self.planner != 'RL') & (d['epoch'] >= self.pre_train_epochs):
self.plot_planning(d, prefix, nr_examples=1) # one plot for each policy
def plot_reconstr(self, d, prefix, nr_examples, folder_name='reconstr'):
nr_examples = min(nr_examples, self.batch_size_eff)
nax = 1 + self.num_classes
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
gl_preds = self._glimpse_reshp(d['glimpse_reconstr']) # [T, B, hyp, scale[0], scales*scale[0]]
for i in range(nr_examples):
f, axes = plt.subplots(self.num_glimpses + 1, nax, figsize=(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1)))
axes = axes.reshape([self.num_glimpses + 1, nax])
self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
for t in range(self.num_glimpses):
axes[t+1, 0].imshow(gl[t, i], cmap='gray')
axes[t+1, 0].set_title('Label: {}, clf: {}'.format(d['y'][i], d['clf'][i]))
ranked_nll = np.argsort(d['glimpse_nll'][t, i, :])
ps = softmax(-d['glimpse_nll'][t, i, :])
for j, hyp in enumerate(ranked_nll):
axes[t+1, j+1].imshow(gl_preds[t, i, hyp], cmap='gray')
axes[t+1, j+1].set_title('{}, post-c: {:.2f}, nll: {:.2f}, p: {:.2f}'.format(hyp, d['state_believes'][t, i, hyp], d['glimpse_nll'][t, i, hyp], ps[hyp]))
# plt.setp(axes, xticks=[], yticks=[])
[ax.set_axis_off() for ax in axes.ravel()]
f.tight_layout()
f.savefig('{}/{}/{}_{}_n{}.png'.format(self.path, folder_name, d['step'], prefix, i), bbox_inches='tight')
plt.close(f)
def plot_planning(self, d, prefix, nr_examples, folder_name='planning'):
# T x [True glimpse, exp_exp_obs, exp_obs...]
nax_x = 2 + self.num_classes
nax_y = self.num_glimpses + 1
exp_exp_obs = self._glimpse_reshp(d['exp_exp_obs']) # [T, B, n_policies, scale[0], scales*scale[0]]
exp_obs = self._glimpse_reshp(d['exp_obs']) # [T, B, n_policies, num_classes, scale[0], scales*scale[0]]
for i in range(nr_examples):
for k in range(self.num_policies):
f, axes = plt.subplots(nax_y, nax_x, figsize=(5 * self.num_scales * nax_x, 5 * nax_y))
axes = axes.reshape([nax_y, nax_x])
self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
for t in range(self.num_glimpses):
# potential location under evaluation
locs = d['potential_actions'][t, i, k]
axes[t + 1, 0].imshow(d['x'][i].reshape(self.img_shape_squeezed), cmap='gray')
axes[t + 1, 0].scatter(locs[1], locs[0], marker='x', facecolors='cyan', linewidth=2.5, s=0.25 * (5 * 8 * 24))
axes[t + 1, 0].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2, width=self.scale_sizes[0], height=self.scale_sizes[0], edgecolor='cyan', facecolor='none', linewidth=2.5))
axes[t + 1, 0].set_title('Label: {}, clf: {}'.format(d['y'][i], d['clf'][i]))
axes[t + 1, 1].imshow(exp_exp_obs[t, i, k], cmap='gray')
axes[t + 1, 1].set_title('G: {:.2f}, H_: {:.2f}, exp_H: {:.2f}, extr: {:.2f}'.format(d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1]))
ranked_hyp = np.argsort(d['state_believes'][t, i, :])
for j, hyp in enumerate(ranked_hyp[::-1]):
axes[t + 1, j + 2].imshow(exp_obs[t, i, k, hyp], cmap='gray')
axes[t + 1, j + 2].set_title('Hyp: {}, prob: {:.2f}'.format(hyp, d['state_believes'][t, i, hyp]))
[ax.set_axis_off() for ax in axes.ravel()]
f.tight_layout()
f.savefig('{}/{}/{}_{}_n{}_k{}.png'.format(self.path, folder_name, d['step'], prefix, i, k), bbox_inches='tight')
plt.close(f)
| 51.276923 | 199 | 0.512751 |
acfbe611a8b7a542be0531c537e455a861071c58 | 8,208 | py | Python | dagmm/dagmm.py | gentaman/DAGMM | a9159cf253005119a1f1c5f17c53c2fbfcf9bd4d | [
"MIT"
] | null | null | null | dagmm/dagmm.py | gentaman/DAGMM | a9159cf253005119a1f1c5f17c53c2fbfcf9bd4d | [
"MIT"
] | null | null | null | dagmm/dagmm.py | gentaman/DAGMM | a9159cf253005119a1f1c5f17c53c2fbfcf9bd4d | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
from dagmm.compression_net import CompressionNet
from dagmm.estimation_net import EstimationNet
from dagmm.gmm import GMM
from os import makedirs
from os.path import exists, join
class DAGMM:
""" Deep Autoencoding Gaussian Mixture Model.
This implementation is based on the paper:
Bo Zong+ (2018) Deep Autoencoding Gaussian Mixture Model
for Unsupervised Anomaly Detection, ICLR 2018
(this is UNOFFICIAL implementation)
"""
MODEL_FILENAME = "DAGMM_model"
SCALER_FILENAME = "DAGMM_scaler"
def __init__(self, comp_hiddens, comp_activation,
est_hiddens, est_activation, est_dropout_ratio=0.5,
minibatch_size=1024, epoch_size=100,
learning_rate=0.0001, lambda1=0.1, lambda2=0.0001,
normalize=True, random_seed=123):
"""
Parameters
----------
comp_hiddens : list of int
sizes of hidden layers of compression network
For example, if the sizes are [n1, n2],
structure of compression network is:
input_size -> n1 -> n2 -> n1 -> input_sizes
comp_activation : function
activation function of compression network
est_hiddens : list of int
sizes of hidden layers of estimation network.
The last element of this list is assigned as n_comp.
For example, if the sizes are [n1, n2],
structure of estimation network is:
input_size -> n1 -> n2 (= n_comp)
est_activation : function
activation function of estimation network
est_dropout_ratio : float (optional)
dropout ratio of estimation network applied during training
if 0 or None, dropout is not applied.
minibatch_size: int (optional)
mini batch size during training
epoch_size : int (optional)
epoch size during training
learning_rate : float (optional)
learning rate during training
lambda1 : float (optional)
a parameter of loss function (for energy term)
lambda2 : float (optional)
a parameter of loss function
(for sum of diagonal elements of covariance)
normalize : bool (optional)
specify whether input data need to be normalized.
by default, input data is normalized.
random_seed : int (optional)
random seed used when fit() is called.
"""
self.comp_net = CompressionNet(comp_hiddens, comp_activation)
self.est_net = EstimationNet(est_hiddens, est_activation)
self.est_dropout_ratio = est_dropout_ratio
n_comp = est_hiddens[-1]
self.gmm = GMM(n_comp)
self.minibatch_size = minibatch_size
self.epoch_size = epoch_size
self.learning_rate = learning_rate
self.lambda1 = lambda1
self.lambda2 = lambda2
self.normalize = normalize
self.scaler = None
self.seed = random_seed
self.graph = None
self.sess = None
def __del__(self):
if self.sess is not None:
self.sess.close()
def fit(self, x):
""" Fit the DAGMM model according to the given data.
Parameters
----------
x : array-like, shape (n_samples, n_features)
Training data.
"""
n_samples, n_features = x.shape
if self.normalize:
self.scaler = scaler = StandardScaler()
x = scaler.fit_transform(x)
with tf.Graph().as_default() as graph:
self.graph = graph
tf.set_random_seed(self.seed)
np.random.seed(seed=self.seed)
# Create Placeholder
self.input = input = tf.placeholder(
dtype=tf.float32, shape=[None, n_features])
self.drop = drop = tf.placeholder(dtype=tf.float32, shape=[])
# Build graph
z, x_dash = self.comp_net.inference(input)
gamma = self.est_net.inference(z, drop)
self.gmm.fit(z, gamma)
energy = self.gmm.energy(z)
self.x_dash = x_dash
# Loss function
loss = (self.comp_net.reconstruction_error(input, x_dash) +
self.lambda1 * tf.reduce_mean(energy) +
self.lambda2 * self.gmm.cov_diag_loss())
# Minimizer
minimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
# Number of batch
n_batch = (n_samples - 1) // self.minibatch_size + 1
# Create tensorflow session and initilize
init = tf.global_variables_initializer()
self.sess = tf.Session(graph=graph)
self.sess.run(init)
# Training
idx = np.arange(x.shape[0])
np.random.shuffle(idx)
for epoch in range(self.epoch_size):
for batch in range(n_batch):
i_start = batch * self.minibatch_size
i_end = (batch + 1) * self.minibatch_size
x_batch = x[idx[i_start:i_end]]
self.sess.run(minimizer, feed_dict={
input:x_batch, drop:self.est_dropout_ratio})
if (epoch + 1) % 100 == 0:
loss_val = self.sess.run(loss, feed_dict={input:x, drop:0})
print(f" epoch {epoch+1}/{self.epoch_size} : loss = {loss_val:.3f}")
# Fix GMM parameter
fix = self.gmm.fix_op()
self.sess.run(fix, feed_dict={input:x, drop:0})
self.energy = self.gmm.energy(z)
tf.add_to_collection("save", self.input)
tf.add_to_collection("save", self.energy)
self.saver = tf.train.Saver()
def predict(self, x):
""" Calculate anormaly scores (sample energy) on samples in X.
Parameters
----------
x : array-like, shape (n_samples, n_features)
Data for which anomaly scores are calculated.
n_features must be equal to n_features of the fitted data.
Returns
-------
energies : array-like, shape (n_samples)
Calculated sample energies.
"""
if self.sess is None:
raise Exception("Trained model does not exist.")
if self.normalize:
x = self.scaler.transform(x)
energies = self.sess.run(self.energy, feed_dict={self.input:x})
return energies
def save(self, fdir):
""" Save trained model to designated directory.
This method have to be called after training.
(If not, throw an exception)
Parameters
----------
fdir : str
Path of directory trained model is saved.
If not exists, it is created automatically.
"""
if self.sess is None:
raise Exception("Trained model does not exist.")
if not exists(fdir):
makedirs(fdir)
model_path = join(fdir, self.MODEL_FILENAME)
self.saver.save(self.sess, model_path)
if self.normalize:
scaler_path = join(fdir, self.SCALER_FILENAME)
joblib.dump(self.scaler, scaler_path)
def restore(self, fdir):
""" Restore trained model from designated directory.
Parameters
----------
fdir : str
Path of directory trained model is saved.
"""
if not exists(fdir):
raise Exception("Model directory does not exist.")
model_path = join(fdir, self.MODEL_FILENAME)
meta_path = model_path + ".meta"
with tf.Graph().as_default() as graph:
self.graph = graph
self.sess = tf.Session(graph=graph)
self.saver = tf.train.import_meta_graph(meta_path)
self.saver.restore(self.sess, model_path)
self.input, self.energy = tf.get_collection("save")
if self.normalize:
scaler_path = join(fdir, self.SCALER_FILENAME)
self.scaler = joblib.load(scaler_path)
| 34.2 | 88 | 0.588816 |
acfbe701601bd2b15edaa92de2c78c0a24498a5b | 3,128 | py | Python | learning_log/settings.py | albertomendess/django-learning-log | 53edc1932406da6585361f49e0662080eac7fdd8 | [
"MIT"
] | null | null | null | learning_log/settings.py | albertomendess/django-learning-log | 53edc1932406da6585361f49e0662080eac7fdd8 | [
"MIT"
] | null | null | null | learning_log/settings.py | albertomendess/django-learning-log | 53edc1932406da6585361f49e0662080eac7fdd8 | [
"MIT"
] | null | null | null | """
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^+&_-ih7tm)eu^bb@+$xxo7c@-+7laz(1hv3d*-d%+j5w41ziw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My_aplications
'learning_logs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.024 | 91 | 0.694373 |
acfbe721397195d5826ccec5e0e748fa6af2fee8 | 1,804 | py | Python | google-cloud-vision-v1p3beta1/synth.py | trambui09/google-cloud-ruby | 9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa | [
"Apache-2.0"
] | 8 | 2021-04-24T02:35:09.000Z | 2022-01-29T03:05:45.000Z | google-cloud-vision-v1p3beta1/synth.py | trambui09/google-cloud-ruby | 9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa | [
"Apache-2.0"
] | 1 | 2021-02-24T07:42:47.000Z | 2021-02-24T07:42:47.000Z | google-cloud-vision-v1p3beta1/synth.py | trambui09/google-cloud-ruby | 9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa | [
"Apache-2.0"
] | 2 | 2021-06-02T18:47:14.000Z | 2021-09-18T07:08:17.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
from subprocess import call
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICMicrogenerator()
library = gapic.ruby_library(
"vision", "v1p3beta1",
extra_proto_files=["google/cloud/common_resources.proto"],
generator_args={
"ruby-cloud-gem-name": "google-cloud-vision-v1p3beta1",
"ruby-cloud-title": "Cloud Vision V1p3beta1",
"ruby-cloud-description": "Cloud Vision API allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content.",
"ruby-cloud-env-prefix": "VISION",
"ruby-cloud-grpc-service-config": "google/cloud/vision/v1p3beta1/vision_grpc_service_config.json",
"ruby-cloud-product-url": "https://cloud.google.com/vision",
"ruby-cloud-api-id": "vision.googleapis.com",
"ruby-cloud-api-shortname": "vision",
}
)
s.copy(library, merge=ruby.global_merge)
call('ruby synth/generate_helpers.rb', shell=True)
| 40.088889 | 263 | 0.742239 |
acfbe7b2db89055a10f369832dc177e88793364e | 54,671 | py | Python | tests/unit/test_tuner.py | AhmadShayeq/sagemaker-python-sdk | def1416ee3d5bdba3007dcebf9c1b58e10330c39 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_tuner.py | AhmadShayeq/sagemaker-python-sdk | def1416ee3d5bdba3007dcebf9c1b58e10330c39 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_tuner.py | AhmadShayeq/sagemaker-python-sdk | def1416ee3d5bdba3007dcebf9c1b58e10330c39 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import copy
import os
import re
import pytest
from mock import Mock, patch
from sagemaker import Predictor, TrainingInput, utils
from sagemaker.amazon.amazon_estimator import RecordSet
from sagemaker.estimator import Framework
from sagemaker.mxnet import MXNet
from sagemaker.parameter import ParameterRange
from sagemaker.tuner import (
_TuningJob,
create_identical_dataset_and_algorithm_tuner,
create_transfer_learning_tuner,
HyperparameterTuner,
)
from sagemaker.workflow.functions import JsonGet, Join
from sagemaker.workflow.parameters import ParameterString, ParameterInteger
from .tuner_test_utils import * # noqa: F403
@pytest.fixture()
def sagemaker_session():
boto_mock = Mock(name="boto_session", region_name=REGION)
sms = Mock(name="sagemaker_session", boto_session=boto_mock, s3_client=None, s3_resource=None)
sms.boto_region_name = REGION
sms.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
sms.config = None
sms.sagemaker_client.describe_endpoint = Mock(return_value=ENDPOINT_DESC)
sms.sagemaker_client.describe_endpoint_config = Mock(return_value=ENDPOINT_CONFIG_DESC)
return sms
@pytest.fixture()
def estimator(sagemaker_session):
return Estimator(
IMAGE_NAME,
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
output_path="s3://bucket/prefix",
sagemaker_session=sagemaker_session,
)
@pytest.fixture()
def tuner(estimator):
return HyperparameterTuner(
estimator, OBJECTIVE_METRIC_NAME, HYPERPARAMETER_RANGES, METRIC_DEFINITIONS
)
def test_prepare_for_training(tuner):
hp1 = JsonGet(step_name="stepname", property_file="pf", json_path="jp")
hp2 = Join(on="/", values=["1", "2", ParameterString(name="ps", default_value="3")])
static_hyperparameters = {
"validated": 1,
"another_one": 0,
"hp1": hp1,
"hp2": hp2,
}
tuner.estimator.set_hyperparameters(**static_hyperparameters)
tuner._prepare_for_tuning()
assert tuner._current_job_name.startswith(IMAGE_NAME)
assert len(tuner.static_hyperparameters) == 3
assert tuner.static_hyperparameters["another_one"] == "0"
assert tuner.static_hyperparameters["hp1"].expr == {
"Std:Join": {
"On": "",
"Values": [
{
"Std:JsonGet": {
"PropertyFile": {"Get": "Steps.stepname.PropertyFiles.pf"},
"Path": "jp",
},
},
],
}
}
assert tuner.static_hyperparameters["hp2"] == hp2
def test_prepare_for_tuning_with_amazon_estimator(tuner, sagemaker_session):
tuner.estimator = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
sagemaker_session=sagemaker_session,
)
tuner._prepare_for_tuning()
assert "sagemaker_estimator_class_name" not in tuner.static_hyperparameters
assert "sagemaker_estimator_module" not in tuner.static_hyperparameters
def test_prepare_for_tuning_include_estimator_cls(tuner):
tuner._prepare_for_tuning(include_cls_metadata=True)
assert "sagemaker_estimator_class_name" in tuner.static_hyperparameters
assert "sagemaker_estimator_module" in tuner.static_hyperparameters
def test_prepare_for_tuning_with_job_name(tuner):
static_hyperparameters = {"validated": 1, "another_one": 0}
tuner.estimator.set_hyperparameters(**static_hyperparameters)
tuner._prepare_for_tuning(job_name="some-other-job-name")
assert tuner._current_job_name == "some-other-job-name"
def test_validate_parameter_ranges_number_validation_error(sagemaker_session):
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
)
invalid_hyperparameter_ranges = {"num_components": IntegerParameter(-1, 2)}
with pytest.raises(ValueError) as e:
HyperparameterTuner(
estimator=pca,
objective_metric_name=OBJECTIVE_METRIC_NAME,
hyperparameter_ranges=invalid_hyperparameter_ranges,
metric_definitions=METRIC_DEFINITIONS,
)
assert "Value must be an integer greater than zero" in str(e)
def test_validate_parameter_ranges_string_value_validation_error(sagemaker_session):
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
)
invalid_hyperparameter_ranges = {"algorithm_mode": CategoricalParameter([0, 5])}
with pytest.raises(ValueError) as e:
HyperparameterTuner(
estimator=pca,
objective_metric_name=OBJECTIVE_METRIC_NAME,
hyperparameter_ranges=invalid_hyperparameter_ranges,
metric_definitions=METRIC_DEFINITIONS,
)
assert 'Value must be one of "regular" and "randomized"' in str(e)
def test_fit_pca(sagemaker_session, tuner):
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
)
pca.algorithm_mode = "randomized"
pca.subtract_mean = True
pca.extra_components = 5
tuner.estimator = pca
tags = [{"Name": "some-tag-without-a-value"}]
tuner.tags = tags
tuner._hyperparameter_ranges = HYPERPARAMETER_RANGES_TWO
records = RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)
tuner.fit(records, mini_batch_size=9999)
_, _, tune_kwargs = sagemaker_session.create_tuning_job.mock_calls[0]
assert tuner.estimator.mini_batch_size == 9999
assert tune_kwargs["job_name"].startswith("pca")
assert tune_kwargs["tags"] == tags
assert len(tune_kwargs["tuning_config"]["parameter_ranges"]["IntegerParameterRanges"]) == 1
assert tune_kwargs["tuning_config"]["early_stopping_type"] == "Off"
assert tuner.estimator.mini_batch_size == 9999
assert "training_config" in tune_kwargs
assert "training_config_list" not in tune_kwargs
assert len(tune_kwargs["training_config"]["static_hyperparameters"]) == 4
assert tune_kwargs["training_config"]["static_hyperparameters"]["extra_components"] == "5"
assert "estimator_name" not in tune_kwargs["training_config"]
assert "objective_type" not in tune_kwargs["training_config"]
assert "objective_metric_name" not in tune_kwargs["training_config"]
assert "parameter_ranges" not in tune_kwargs["training_config"]
def test_fit_pca_with_early_stopping(sagemaker_session, tuner):
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
)
tuner.estimator = pca
tuner.early_stopping_type = "Auto"
records = RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)
tuner.fit(records, mini_batch_size=9999)
_, _, tune_kwargs = sagemaker_session.create_tuning_job.mock_calls[0]
assert tune_kwargs["job_name"].startswith("pca")
assert tune_kwargs["tuning_config"]["early_stopping_type"] == "Auto"
def test_fit_pca_with_vpc_config(sagemaker_session, tuner):
subnets = ["foo"]
security_group_ids = ["bar"]
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
subnets=subnets,
security_group_ids=security_group_ids,
)
tuner.estimator = pca
records = RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)
tuner.fit(records, mini_batch_size=9999)
_, _, tune_kwargs = sagemaker_session.create_tuning_job.mock_calls[0]
assert tune_kwargs["training_config"]["vpc_config"] == {
"Subnets": subnets,
"SecurityGroupIds": security_group_ids,
}
def test_training_input_mode(sagemaker_session, tuner):
expected_input_mode = "Pipe"
script_path = os.path.join(DATA_DIR, "mxnet_mnist", "failure_script.py")
mxnet = MXNet(
entry_point=script_path,
framework_version=FRAMEWORK_VERSION,
py_version=PY_VERSION,
role=ROLE,
instance_count=INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
sagemaker_session=sagemaker_session,
)
tuner.estimator = mxnet
tags = [{"Name": "some-tag-without-a-value"}]
tuner.tags = tags
hyperparameter_ranges = {
"num_components": IntegerParameter(2, 4),
"algorithm_mode": CategoricalParameter(["regular", "randomized"]),
}
tuner._hyperparameter_ranges = hyperparameter_ranges
tuner.fit(inputs=TrainingInput("s3://mybucket/train_manifest", input_mode=expected_input_mode))
actual_input_mode = sagemaker_session.method_calls[1][2]["training_config"]["input_mode"]
assert actual_input_mode == expected_input_mode
def test_fit_pca_with_inter_container_traffic_encryption_flag(sagemaker_session, tuner):
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
encrypt_inter_container_traffic=True,
)
tuner.estimator = pca
records = RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)
tuner.fit(records, mini_batch_size=9999)
_, _, tune_kwargs = sagemaker_session.create_tuning_job.mock_calls[0]
assert tune_kwargs["job_name"].startswith("pca")
assert tune_kwargs["training_config"]["encrypt_inter_container_traffic"] is True
@pytest.mark.parametrize(
"inputs,include_cls_metadata,estimator_kwargs,error_message",
[
(
RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1),
{ESTIMATOR_NAME_TWO: True},
{},
re.compile(
"Argument 'inputs' must be a dictionary using \\['estimator_name', 'estimator_name_two'\\] as keys"
),
),
(
{ESTIMATOR_NAME: RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)},
False,
{},
re.compile(
"Argument 'include_cls_metadata' must be a dictionary using \\['estimator_name', "
"'estimator_name_two'\\] as keys"
),
),
(
{ESTIMATOR_NAME: RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)},
{ESTIMATOR_NAME_TWO: True},
False,
re.compile(
"Argument 'estimator_kwargs' must be a dictionary using \\['estimator_name', "
"'estimator_name_two'\\] as keys"
),
),
(
{
ESTIMATOR_NAME: RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1),
"Invalid estimator": RecordSet(s3_data=INPUTS, num_records=10, feature_dim=5),
},
{ESTIMATOR_NAME_TWO: True},
None,
re.compile(
"The keys of argument 'inputs' must be a subset of \\['estimator_name', 'estimator_name_two'\\]"
),
),
],
)
def test_fit_multi_estimators_invalid_inputs(
sagemaker_session, inputs, include_cls_metadata, estimator_kwargs, error_message
):
(tuner, estimator_one, estimator_two) = _create_multi_estimator_tuner(sagemaker_session)
with pytest.raises(ValueError, match=error_message):
tuner.fit(
inputs=inputs,
include_cls_metadata=include_cls_metadata,
estimator_kwargs=estimator_kwargs,
)
def test_fit_multi_estimators(sagemaker_session):
(tuner, estimator_one, estimator_two) = _create_multi_estimator_tuner(sagemaker_session)
records = {ESTIMATOR_NAME_TWO: RecordSet(s3_data=INPUTS, num_records=1, feature_dim=1)}
estimator_kwargs = {ESTIMATOR_NAME_TWO: {"mini_batch_size": 4000}}
tuner.fit(inputs=records, include_cls_metadata={}, estimator_kwargs=estimator_kwargs)
_, _, tune_kwargs = sagemaker_session.create_tuning_job.mock_calls[0]
assert tune_kwargs["job_name"].startswith(BASE_JOB_NAME)
assert tune_kwargs["tags"] == TAGS
assert tune_kwargs["tuning_config"]["strategy"] == STRATEGY
assert tune_kwargs["tuning_config"]["max_jobs"] == MAX_JOBS
assert tune_kwargs["tuning_config"]["max_parallel_jobs"] == MAX_PARALLEL_JOBS
assert tune_kwargs["tuning_config"]["early_stopping_type"] == EARLY_STOPPING_TYPE
assert "tuning_objective" not in tune_kwargs["tuning_config"]
assert "parameter_ranges" not in tune_kwargs["tuning_config"]
assert "training_config" not in tune_kwargs
assert "training_config_list" in tune_kwargs
assert len(tune_kwargs["training_config_list"]) == 2
training_config_one = tune_kwargs["training_config_list"][0]
training_config_two = tune_kwargs["training_config_list"][1]
assert training_config_one["estimator_name"] == ESTIMATOR_NAME
assert training_config_one["objective_type"] == "Minimize"
assert training_config_one["objective_metric_name"] == OBJECTIVE_METRIC_NAME
assert training_config_one["input_config"] is None
assert training_config_one["image_uri"] == estimator_one.training_image_uri()
assert training_config_one["metric_definitions"] == METRIC_DEFINITIONS
assert (
training_config_one["static_hyperparameters"]["sagemaker_estimator_module"]
== '"sagemaker.mxnet.estimator"'
)
_assert_parameter_ranges(
HYPERPARAMETER_RANGES,
training_config_one["parameter_ranges"],
isinstance(estimator_one, Framework),
)
assert training_config_two["estimator_name"] == ESTIMATOR_NAME_TWO
assert training_config_two["objective_type"] == "Minimize"
assert training_config_two["objective_metric_name"] == OBJECTIVE_METRIC_NAME_TWO
assert len(training_config_two["input_config"]) == 1
assert training_config_two["input_config"][0]["DataSource"]["S3DataSource"]["S3Uri"] == INPUTS
assert training_config_two["image_uri"] == estimator_two.training_image_uri()
assert training_config_two["metric_definitions"] is None
assert training_config_two["static_hyperparameters"]["mini_batch_size"] == "4000"
_assert_parameter_ranges(
HYPERPARAMETER_RANGES_TWO,
training_config_two["parameter_ranges"],
isinstance(estimator_two, Framework),
)
def _create_multi_estimator_tuner(sagemaker_session):
mxnet_script_path = os.path.join(DATA_DIR, "mxnet_mnist", "failure_script.py")
mxnet = MXNet(
entry_point=mxnet_script_path,
framework_version=FRAMEWORK_VERSION,
py_version=PY_VERSION,
role=ROLE,
instance_count=INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
sagemaker_session=sagemaker_session,
)
pca = PCA(
ROLE,
INSTANCE_COUNT,
INSTANCE_TYPE,
NUM_COMPONENTS,
base_job_name="pca",
sagemaker_session=sagemaker_session,
)
pca.algorithm_mode = "randomized"
pca.subtract_mean = True
pca.extra_components = 5
tuner = HyperparameterTuner.create(
base_tuning_job_name=BASE_JOB_NAME,
estimator_dict={ESTIMATOR_NAME: mxnet, ESTIMATOR_NAME_TWO: pca},
objective_metric_name_dict={
ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME,
ESTIMATOR_NAME_TWO: OBJECTIVE_METRIC_NAME_TWO,
},
hyperparameter_ranges_dict={
ESTIMATOR_NAME: HYPERPARAMETER_RANGES,
ESTIMATOR_NAME_TWO: HYPERPARAMETER_RANGES_TWO,
},
metric_definitions_dict={ESTIMATOR_NAME: METRIC_DEFINITIONS},
strategy=STRATEGY,
objective_type=OBJECTIVE_TYPE,
max_jobs=MAX_JOBS,
max_parallel_jobs=MAX_PARALLEL_JOBS,
tags=TAGS,
warm_start_config=WARM_START_CONFIG,
early_stopping_type=EARLY_STOPPING_TYPE,
)
return tuner, mxnet, pca
def _assert_parameter_ranges(expected, actual, is_framework_estimator):
continuous_ranges = []
integer_ranges = []
categorical_ranges = []
for (name, param_range) in expected.items():
if isinstance(param_range, ContinuousParameter):
continuous_ranges.append(param_range.as_tuning_range(name))
elif isinstance(param_range, IntegerParameter):
integer_ranges.append(param_range.as_tuning_range(name))
else:
categorical_range = (
param_range.as_json_range(name)
if is_framework_estimator
else param_range.as_tuning_range(name)
)
categorical_ranges.append(categorical_range)
assert continuous_ranges == actual["ContinuousParameterRanges"]
assert integer_ranges == actual["IntegerParameterRanges"]
assert categorical_ranges == actual["CategoricalParameterRanges"]
def test_attach_tuning_job_with_estimator_from_hyperparameters(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
assert tuner.latest_tuning_job.name == JOB_NAME
assert tuner.base_tuning_job_name == JOB_NAME
assert tuner._current_job_name == JOB_NAME
assert tuner.objective_metric_name == OBJECTIVE_METRIC_NAME
assert tuner.max_jobs == 1
assert tuner.max_parallel_jobs == 1
assert tuner.metric_definitions == METRIC_DEFINITIONS
assert tuner.strategy == "Bayesian"
assert tuner.objective_type == "Minimize"
assert tuner.early_stopping_type == "Off"
assert isinstance(tuner.estimator, PCA)
assert tuner.estimator.role == ROLE
assert tuner.estimator.instance_count == 1
assert tuner.estimator.max_run == 24 * 60 * 60
assert tuner.estimator.input_mode == "File"
assert tuner.estimator.output_path == BUCKET_NAME
assert tuner.estimator.output_kms_key == ""
assert "_tuning_objective_metric" not in tuner.estimator.hyperparameters()
assert tuner.estimator.hyperparameters()["num_components"] == "10"
def test_attach_tuning_job_with_estimator_from_hyperparameters_with_early_stopping(
sagemaker_session,
):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
job_details["HyperParameterTuningJobConfig"]["TrainingJobEarlyStoppingType"] = "Auto"
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
assert tuner.latest_tuning_job.name == JOB_NAME
assert tuner.early_stopping_type == "Auto"
assert isinstance(tuner.estimator, PCA)
def test_attach_tuning_job_with_job_details(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
HyperparameterTuner.attach(
JOB_NAME, sagemaker_session=sagemaker_session, job_details=job_details
)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job.assert_not_called
def test_attach_tuning_job_with_estimator_from_image(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
job_details["TrainingJobDefinition"]["AlgorithmSpecification"][
"TrainingImage"
] = "1111.amazonaws.com/pca:1"
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
assert isinstance(tuner.estimator, PCA)
def test_attach_tuning_job_with_estimator_from_kwarg(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(
JOB_NAME, sagemaker_session=sagemaker_session, estimator_cls="sagemaker.estimator.Estimator"
)
assert isinstance(tuner.estimator, Estimator)
def test_attach_with_no_specified_estimator(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
del job_details["TrainingJobDefinition"]["StaticHyperParameters"]["sagemaker_estimator_module"]
del job_details["TrainingJobDefinition"]["StaticHyperParameters"][
"sagemaker_estimator_class_name"
]
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
assert isinstance(tuner.estimator, Estimator)
def test_attach_with_generated_job_name(sagemaker_session):
job_name = utils.name_from_base(BASE_JOB_NAME, max_length=32, short=True)
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
job_details["HyperParameterTuningJobName"] = job_name
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(job_name, sagemaker_session=sagemaker_session)
assert BASE_JOB_NAME == tuner.base_tuning_job_name
def test_attach_with_warm_start_config(sagemaker_session):
warm_start_config = WarmStartConfig(
warm_start_type=WarmStartTypes.TRANSFER_LEARNING, parents={"p1", "p2"}
)
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
job_details["WarmStartConfig"] = warm_start_config.to_input_req()
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
assert tuner.warm_start_config.type == warm_start_config.type
assert tuner.warm_start_config.parents == warm_start_config.parents
def test_attach_tuning_job_with_multi_estimators(sagemaker_session):
job_details = copy.deepcopy(MULTI_ALGO_TUNING_JOB_DETAILS)
tuner = HyperparameterTuner.attach(
JOB_NAME,
sagemaker_session=sagemaker_session,
estimator_cls={ESTIMATOR_NAME_TWO: "sagemaker.estimator.Estimator"},
job_details=job_details,
)
assert tuner.latest_tuning_job.name == JOB_NAME
assert tuner.strategy == "Bayesian"
assert tuner.objective_type == "Minimize"
assert tuner.max_jobs == 4
assert tuner.max_parallel_jobs == 2
assert tuner.early_stopping_type == "Off"
assert tuner.warm_start_config is None
assert tuner.estimator is None
assert tuner.objective_metric_name is None
assert tuner._hyperparameter_ranges is None
assert tuner.metric_definitions is None
assert tuner.estimator_dict is not None
assert tuner.objective_metric_name_dict is not None
assert tuner._hyperparameter_ranges_dict is not None
assert tuner.metric_definitions_dict is not None
assert len(tuner.estimator_dict) == 2
estimator_names = tuner.estimator_dict.keys()
assert tuner.objective_metric_name_dict.keys() == estimator_names
assert tuner._hyperparameter_ranges_dict.keys() == estimator_names
assert set(tuner.metric_definitions_dict.keys()).issubset(set(estimator_names))
assert isinstance(tuner.estimator_dict[ESTIMATOR_NAME], PCA)
assert isinstance(tuner.estimator_dict[ESTIMATOR_NAME_TWO], Estimator)
assert tuner.objective_metric_name_dict[ESTIMATOR_NAME] == OBJECTIVE_METRIC_NAME
assert tuner.objective_metric_name_dict[ESTIMATOR_NAME_TWO] == OBJECTIVE_METRIC_NAME_TWO
parameter_ranges_one = tuner._hyperparameter_ranges_dict[ESTIMATOR_NAME]
assert len(parameter_ranges_one) == 1
assert isinstance(parameter_ranges_one.get("mini_batch_size", None), IntegerParameter)
parameter_ranges_two = tuner._hyperparameter_ranges_dict[ESTIMATOR_NAME_TWO]
assert len(parameter_ranges_two) == 2
assert isinstance(parameter_ranges_two.get("kernel", None), CategoricalParameter)
assert isinstance(parameter_ranges_two.get("tree_count", None), IntegerParameter)
assert len(tuner.metric_definitions_dict) == 1
assert tuner.metric_definitions_dict[ESTIMATOR_NAME_TWO] == METRIC_DEFINITIONS
def test_serialize_parameter_ranges(tuner):
hyperparameter_ranges = tuner.hyperparameter_ranges()
for key, value in HYPERPARAMETER_RANGES.items():
assert hyperparameter_ranges[value.__name__ + "ParameterRanges"][0]["Name"] == key
def test_analytics(tuner):
tuner.latest_tuning_job = _TuningJob(tuner.sagemaker_session, "testjob")
tuner_analytics = tuner.analytics()
assert tuner_analytics is not None
assert tuner_analytics.name.find("testjob") > -1
def test_serialize_categorical_ranges_for_frameworks(sagemaker_session, tuner):
tuner.estimator = MXNet(
entry_point=SCRIPT_NAME,
framework_version=FRAMEWORK_VERSION,
py_version=PY_VERSION,
role=ROLE,
instance_count=INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
sagemaker_session=sagemaker_session,
)
hyperparameter_ranges = tuner.hyperparameter_ranges()
assert hyperparameter_ranges["CategoricalParameterRanges"][0]["Name"] == "blank"
assert hyperparameter_ranges["CategoricalParameterRanges"][0]["Values"] == ['"0"', '"5"']
def test_serialize_nonexistent_parameter_ranges(tuner):
temp_hyperparameter_ranges = HYPERPARAMETER_RANGES.copy()
parameter_type = temp_hyperparameter_ranges["validated"].__name__
temp_hyperparameter_ranges["validated"] = None
tuner._hyperparameter_ranges = temp_hyperparameter_ranges
ranges = tuner.hyperparameter_ranges()
assert len(ranges.keys()) == 3
assert not ranges[parameter_type + "ParameterRanges"]
def test_stop_tuning_job(sagemaker_session, tuner):
sagemaker_session.stop_tuning_job = Mock(name="stop_hyper_parameter_tuning_job")
tuner.latest_tuning_job = _TuningJob(sagemaker_session, JOB_NAME)
tuner.stop_tuning_job()
sagemaker_session.stop_tuning_job.assert_called_once_with(name=JOB_NAME)
def test_stop_tuning_job_no_tuning_job(tuner):
with pytest.raises(ValueError) as e:
tuner.stop_tuning_job()
assert "No tuning job available" in str(e)
def test_best_tuning_job(tuner):
tuning_job_description = {"BestTrainingJob": {"TrainingJobName": JOB_NAME}}
tuner.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_hyper_parameter_tuning_job", return_value=tuning_job_description
)
tuner.latest_tuning_job = _TuningJob(tuner.estimator.sagemaker_session, JOB_NAME)
best_training_job = tuner.best_training_job()
assert best_training_job == JOB_NAME
tuner.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job.assert_called_once_with(
HyperParameterTuningJobName=JOB_NAME
)
def test_best_tuning_job_no_latest_job(tuner):
with pytest.raises(Exception) as e:
tuner.best_training_job()
assert "No tuning job available" in str(e)
def test_best_tuning_job_no_best_job(tuner):
tuning_job_description = {"TuningJobName": "a_job"}
tuner.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_hyper_parameter_tuning_job", return_value=tuning_job_description
)
tuner.latest_tuning_job = _TuningJob(tuner.estimator.sagemaker_session, JOB_NAME)
with pytest.raises(Exception) as e:
tuner.best_training_job()
tuner.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job.assert_called_once_with(
HyperParameterTuningJobName=JOB_NAME
)
assert "Best training job not available for tuning job:" in str(e)
def test_best_estimator(tuner):
tuner.sagemaker_session.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=TRAINING_JOB_DESCRIPTION
)
tuner.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_hyper_parameter_tuning_job",
return_value={"BestTrainingJob": {"TrainingJobName": TRAINING_JOB_NAME}},
)
tuner.sagemaker_session.sagemaker_client.list_tags = Mock(
name="list_tags", return_value=LIST_TAGS_RESULT
)
tuner.sagemaker_session.log_for_jobs = Mock(name="log_for_jobs")
tuner.latest_tuning_job = _TuningJob(tuner.sagemaker_session, JOB_NAME)
best_estimator = tuner.best_estimator()
assert best_estimator is not None
assert best_estimator.latest_training_job is not None
assert best_estimator.latest_training_job.job_name == TRAINING_JOB_NAME
assert best_estimator.sagemaker_session == tuner.sagemaker_session
tuner.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job.assert_called_once_with(
HyperParameterTuningJobName=JOB_NAME
)
tuner.sagemaker_session.sagemaker_client.describe_training_job.assert_called_once_with(
TrainingJobName=TRAINING_JOB_NAME
)
def test_deploy_default(tuner):
tuner.sagemaker_session.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=TRAINING_JOB_DESCRIPTION
)
tuner.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_hyper_parameter_tuning_job",
return_value={"BestTrainingJob": {"TrainingJobName": TRAINING_JOB_NAME}},
)
tuner.sagemaker_session.sagemaker_client.list_tags = Mock(
name="list_tags", return_value=LIST_TAGS_RESULT
)
tuner.sagemaker_session.log_for_jobs = Mock(name="log_for_jobs")
tuner.latest_tuning_job = _TuningJob(tuner.sagemaker_session, JOB_NAME)
predictor = tuner.deploy(INSTANCE_COUNT, INSTANCE_TYPE)
tuner.sagemaker_session.create_model.assert_called_once()
args = tuner.sagemaker_session.create_model.call_args[0]
assert args[0].startswith(TRAINING_JOB_NAME)
assert args[1] == ROLE
assert args[2]["Image"] == IMAGE_NAME
assert args[2]["ModelDataUrl"] == MODEL_DATA
assert isinstance(predictor, Predictor)
assert predictor.endpoint_name.startswith(TRAINING_JOB_NAME)
assert predictor.sagemaker_session == tuner.sagemaker_session
def test_deploy_estimator_dict(tuner):
tuner.estimator_dict = {ESTIMATOR_NAME: tuner.estimator}
tuner.estimator = None
tuner.sagemaker_session.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=TRAINING_JOB_DESCRIPTION
)
tuner.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_hyper_parameter_tuning_job",
return_value={
"BestTrainingJob": {
"TrainingJobName": TRAINING_JOB_NAME,
"TrainingJobDefinitionName": ESTIMATOR_NAME,
}
},
)
tuner.sagemaker_session.sagemaker_client.list_tags = Mock(
name="list_tags", return_value=LIST_TAGS_RESULT
)
tuner.sagemaker_session.log_for_jobs = Mock(name="log_for_jobs")
tuner.latest_tuning_job = _TuningJob(tuner.sagemaker_session, JOB_NAME)
predictor = tuner.deploy(INSTANCE_COUNT, INSTANCE_TYPE)
tuner.sagemaker_session.create_model.assert_called_once()
args = tuner.sagemaker_session.create_model.call_args[0]
assert args[0].startswith(TRAINING_JOB_NAME)
assert args[1] == ROLE
assert args[2]["Image"] == IMAGE_NAME
assert args[2]["ModelDataUrl"] == MODEL_DATA
assert isinstance(predictor, Predictor)
assert predictor.endpoint_name.startswith(TRAINING_JOB_NAME)
assert predictor.sagemaker_session == tuner.sagemaker_session
@patch("sagemaker.tuner.HyperparameterTuner.best_estimator")
@patch("sagemaker.tuner.HyperparameterTuner._get_best_training_job")
def test_deploy_optional_params(_get_best_training_job, best_estimator, tuner):
tuner.fit()
estimator = Mock()
best_estimator.return_value = estimator
training_job = "best-job-ever"
_get_best_training_job.return_value = training_job
accelerator = "ml.eia1.medium"
endpoint_name = "foo"
model_name = "bar"
kms_key = "key"
kwargs = {"some_arg": "some_value"}
tuner.deploy(
INSTANCE_COUNT,
INSTANCE_TYPE,
accelerator_type=accelerator,
endpoint_name=endpoint_name,
wait=False,
model_name=model_name,
kms_key=kms_key,
**kwargs,
)
best_estimator.assert_called_with(training_job)
estimator.deploy.assert_called_with(
initial_instance_count=INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
serializer=None,
deserializer=None,
accelerator_type=accelerator,
endpoint_name=endpoint_name,
wait=False,
model_name=model_name,
kms_key=kms_key,
data_capture_config=None,
**kwargs,
)
def test_wait(tuner):
tuner.latest_tuning_job = _TuningJob(tuner.estimator.sagemaker_session, JOB_NAME)
tuner.estimator.sagemaker_session.wait_for_tuning_job = Mock(name="wait_for_tuning_job")
tuner.wait()
tuner.estimator.sagemaker_session.wait_for_tuning_job.assert_called_once_with(JOB_NAME)
def test_fit_no_inputs(tuner, sagemaker_session):
script_path = os.path.join(DATA_DIR, "mxnet_mnist", "failure_script.py")
tuner.estimator = MXNet(
entry_point=script_path,
framework_version=FRAMEWORK_VERSION,
py_version=PY_VERSION,
role=ROLE,
instance_count=INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
sagemaker_session=sagemaker_session,
)
tuner.fit()
_, _, tune_kwargs = sagemaker_session.create_tuning_job.mock_calls[0]
assert tune_kwargs["training_config"]["input_config"] is None
def test_identical_dataset_and_algorithm_tuner(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
parent_tuner = tuner.identical_dataset_and_algorithm_tuner(additional_parents={"p1", "p2"})
assert parent_tuner.warm_start_config.type == WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM
assert parent_tuner.warm_start_config.parents == {tuner.latest_tuning_job.name, "p1", "p2"}
def test_transfer_learning_tuner_with_estimator(sagemaker_session, estimator):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
parent_tuner = tuner.transfer_learning_tuner(
additional_parents={"p1", "p2"}, estimator=estimator
)
assert parent_tuner.warm_start_config.type == WarmStartTypes.TRANSFER_LEARNING
assert parent_tuner.warm_start_config.parents == {tuner.latest_tuning_job.name, "p1", "p2"}
assert parent_tuner.estimator == estimator and parent_tuner.estimator != tuner.estimator
def test_transfer_learning_tuner(sagemaker_session):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = HyperparameterTuner.attach(JOB_NAME, sagemaker_session=sagemaker_session)
parent_tuner = tuner.transfer_learning_tuner(additional_parents={"p1", "p2"})
assert parent_tuner.warm_start_config.type == WarmStartTypes.TRANSFER_LEARNING
assert parent_tuner.warm_start_config.parents == {tuner.latest_tuning_job.name, "p1", "p2"}
assert parent_tuner.estimator == tuner.estimator
@pytest.mark.parametrize(
"estimator_dict,obj_metric_name_dict,param_ranges_dict,metric_def_dict",
[
(
{ESTIMATOR_NAME: ESTIMATOR},
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
),
(
{ESTIMATOR_NAME: ESTIMATOR, ESTIMATOR_NAME_TWO: ESTIMATOR_TWO},
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME, ESTIMATOR_NAME_TWO: OBJECTIVE_METRIC_NAME_TWO},
{
ESTIMATOR_NAME: HYPERPARAMETER_RANGES,
ESTIMATOR_NAME_TWO: {"gamma": ContinuousParameter(0, 1.5)},
},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
),
],
)
def test_create_tuner(estimator_dict, obj_metric_name_dict, param_ranges_dict, metric_def_dict):
tuner = HyperparameterTuner.create(
base_tuning_job_name=BASE_JOB_NAME,
estimator_dict=estimator_dict,
objective_metric_name_dict=obj_metric_name_dict,
hyperparameter_ranges_dict=param_ranges_dict,
metric_definitions_dict=metric_def_dict,
strategy="Bayesian",
objective_type="Minimize",
max_jobs=MAX_JOBS,
max_parallel_jobs=MAX_PARALLEL_JOBS,
tags=TAGS,
warm_start_config=WARM_START_CONFIG,
early_stopping_type="Auto",
)
assert tuner is not None
assert tuner.estimator_dict == estimator_dict
assert tuner.objective_metric_name_dict == obj_metric_name_dict
assert tuner._hyperparameter_ranges_dict == param_ranges_dict
assert tuner.metric_definitions_dict == metric_def_dict
assert tuner.base_tuning_job_name == BASE_JOB_NAME
assert tuner.strategy == "Bayesian"
assert tuner.objective_type == "Minimize"
assert tuner.max_jobs == MAX_JOBS
assert tuner.max_parallel_jobs == MAX_PARALLEL_JOBS
assert tuner.tags == TAGS
assert tuner.warm_start_config == WARM_START_CONFIG
assert tuner.early_stopping_type == "Auto"
assert tuner.sagemaker_session == SAGEMAKER_SESSION
@pytest.mark.parametrize(
"estimator_dict,obj_metric_name_dict,param_ranges_dict,metric_def_dict,error_message",
[
(
{},
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
re.compile("At least one estimator should be provided"),
),
(
None,
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
re.compile("At least one estimator should be provided"),
),
(
{None: ESTIMATOR},
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
"Estimator names cannot be None",
),
(
{ESTIMATOR_NAME: ESTIMATOR},
OBJECTIVE_METRIC_NAME,
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
re.compile(
"Argument 'objective_metric_name_dict' must be a dictionary using \\['estimator_name'\\] as keys"
),
),
(
{ESTIMATOR_NAME: ESTIMATOR},
{ESTIMATOR_NAME + "1": OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
re.compile(
"The keys of argument 'objective_metric_name_dict' must be the same as \\['estimator_name'\\]"
),
),
(
{ESTIMATOR_NAME: ESTIMATOR},
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME + "1": HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME: METRIC_DEFINITIONS},
re.compile(
"The keys of argument 'hyperparameter_ranges_dict' must be the same as \\['estimator_name'\\]"
),
),
(
{ESTIMATOR_NAME: ESTIMATOR},
{ESTIMATOR_NAME: OBJECTIVE_METRIC_NAME},
{ESTIMATOR_NAME: HYPERPARAMETER_RANGES},
{ESTIMATOR_NAME + "1": METRIC_DEFINITIONS},
re.compile(
"The keys of argument 'metric_definitions_dict' must be a subset of \\['estimator_name'\\]"
),
),
],
)
def test_create_tuner_negative(
estimator_dict, obj_metric_name_dict, param_ranges_dict, metric_def_dict, error_message
):
with pytest.raises(ValueError, match=error_message):
HyperparameterTuner.create(
base_tuning_job_name=BASE_JOB_NAME,
estimator_dict=estimator_dict,
objective_metric_name_dict=obj_metric_name_dict,
hyperparameter_ranges_dict=param_ranges_dict,
metric_definitions_dict=metric_def_dict,
strategy="Bayesian",
objective_type="Minimize",
max_jobs=MAX_JOBS,
max_parallel_jobs=MAX_PARALLEL_JOBS,
tags=TAGS,
)
#################################################################################
# _ParameterRange Tests
def test_continuous_parameter():
cont_param = ContinuousParameter(0.1, 1e-2)
assert isinstance(cont_param, ParameterRange)
assert cont_param.__name__ == "Continuous"
def test_continuous_parameter_ranges():
cont_param = ContinuousParameter(0.1, 1e-2)
ranges = cont_param.as_tuning_range("some")
assert len(ranges.keys()) == 4
assert ranges["Name"] == "some"
assert ranges["MinValue"] == "0.1"
assert ranges["MaxValue"] == "0.01"
assert ranges["ScalingType"] == "Auto"
def test_continuous_parameter_scaling_type():
cont_param = ContinuousParameter(0.1, 2, scaling_type="ReverseLogarithmic")
cont_range = cont_param.as_tuning_range("range")
assert cont_range["ScalingType"] == "ReverseLogarithmic"
def test_integer_parameter():
int_param = IntegerParameter(1, 2)
assert isinstance(int_param, ParameterRange)
assert int_param.__name__ == "Integer"
def test_integer_parameter_ranges():
int_param = IntegerParameter(1, 2)
ranges = int_param.as_tuning_range("some")
assert len(ranges.keys()) == 4
assert ranges["Name"] == "some"
assert ranges["MinValue"] == "1"
assert ranges["MaxValue"] == "2"
assert ranges["ScalingType"] == "Auto"
def test_integer_parameter_ranges_with_pipeline_parameter():
min = ParameterInteger(name="p", default_value=2)
max = JsonGet(step_name="sn", property_file="pf", json_path="jp")
scale = ParameterString(name="scale", default_value="Auto")
int_param = IntegerParameter(min, max)
ranges = int_param.as_tuning_range("some")
assert len(ranges.keys()) == 4
assert ranges["Name"] == "some"
assert ranges["MinValue"].expr == {
"Std:Join": {
"On": "",
"Values": [
{"Get": "Parameters.p"},
],
}
}
assert ranges["MaxValue"].expr == {
"Std:Join": {
"On": "",
"Values": [
{
"Std:JsonGet": {
"PropertyFile": {"Get": "Steps.sn.PropertyFiles.pf"},
"Path": "jp",
}
}
],
}
}
assert ranges["ScalingType"] == scale
def test_integer_parameter_scaling_type():
int_param = IntegerParameter(2, 3, scaling_type="Linear")
int_range = int_param.as_tuning_range("range")
assert int_range["ScalingType"] == "Linear"
def test_categorical_parameter_list():
cat_param = CategoricalParameter(["a", "z"])
assert isinstance(cat_param, ParameterRange)
assert cat_param.__name__ == "Categorical"
def test_categorical_parameter_list_ranges():
cat_param = CategoricalParameter([1, 10])
ranges = cat_param.as_tuning_range("some")
assert len(ranges.keys()) == 2
assert ranges["Name"] == "some"
assert ranges["Values"] == ["1", "10"]
def test_categorical_parameter_value():
cat_param = CategoricalParameter("a")
assert isinstance(cat_param, ParameterRange)
def test_categorical_parameter_value_ranges():
cat_param = CategoricalParameter("a")
ranges = cat_param.as_tuning_range("some")
assert len(ranges.keys()) == 2
assert ranges["Name"] == "some"
assert ranges["Values"] == ["a"]
#################################################################################
# _TuningJob Tests
def test_start_new(tuner, sagemaker_session):
tuning_job = _TuningJob(sagemaker_session, JOB_NAME)
tuner.static_hyperparameters = {}
started_tuning_job = tuning_job.start_new(tuner, INPUTS)
assert started_tuning_job.sagemaker_session == sagemaker_session
sagemaker_session.create_tuning_job.assert_called_once()
def test_stop(sagemaker_session):
tuning_job = _TuningJob(sagemaker_session, JOB_NAME)
tuning_job.stop()
sagemaker_session.stop_tuning_job.assert_called_once_with(name=JOB_NAME)
def test_tuning_job_wait(sagemaker_session):
sagemaker_session.wait_for_tuning_job = Mock(name="wait_for_tuning_job")
tuning_job = _TuningJob(sagemaker_session, JOB_NAME)
tuning_job.wait()
sagemaker_session.wait_for_tuning_job.assert_called_once_with(JOB_NAME)
#################################################################################
# WarmStartConfig Tests
@pytest.mark.parametrize(
"type, parents",
[
(WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM, {"p1", "p2", "p3"}),
(WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM, {"p1", "p3", "p3"}),
(WarmStartTypes.TRANSFER_LEARNING, {"p3"}),
],
)
def test_warm_start_config_init(type, parents):
warm_start_config = WarmStartConfig(warm_start_type=type, parents=parents)
assert warm_start_config.type == type, "Warm start type initialization failed."
assert warm_start_config.parents == set(
parents
), "Warm start parents config initialization failed."
warm_start_config_req = warm_start_config.to_input_req()
assert warm_start_config.type == WarmStartTypes(warm_start_config_req["WarmStartType"])
for parent in warm_start_config_req["ParentHyperParameterTuningJobs"]:
assert parent["HyperParameterTuningJobName"] in parents
@pytest.mark.parametrize(
"type, parents",
[
("InvalidType", {"p1", "p2", "p3"}),
(None, {"p1", "p2", "p3"}),
("", {"p1", "p2", "p3"}),
(WarmStartTypes.TRANSFER_LEARNING, None),
(WarmStartTypes.TRANSFER_LEARNING, {}),
],
)
def test_warm_start_config_init_negative(type, parents):
with pytest.raises(ValueError):
WarmStartConfig(warm_start_type=type, parents=parents)
@pytest.mark.parametrize(
"warm_start_config_req",
[
({}),
(None),
({"WarmStartType": "TransferLearning"}),
({"ParentHyperParameterTuningJobs": []}),
],
)
def test_prepare_warm_start_config_cls_negative(warm_start_config_req):
warm_start_config = WarmStartConfig.from_job_desc(warm_start_config_req)
assert warm_start_config is None, "Warm start config should be None for invalid type/parents"
@pytest.mark.parametrize(
"warm_start_config_req",
[
(
{
"WarmStartType": "TransferLearning",
"ParentHyperParameterTuningJobs": [
{"HyperParameterTuningJobName": "p1"},
{"HyperParameterTuningJobName": "p2"},
],
}
),
(
{
"WarmStartType": "IdenticalDataAndAlgorithm",
"ParentHyperParameterTuningJobs": [
{"HyperParameterTuningJobName": "p1"},
{"HyperParameterTuningJobName": "p1"},
],
}
),
],
)
def test_prepare_warm_start_config_cls(warm_start_config_req):
warm_start_config = WarmStartConfig.from_job_desc(warm_start_config_req)
assert warm_start_config.type == WarmStartTypes(
warm_start_config_req["WarmStartType"]
), "Warm start type initialization failed."
for p in warm_start_config_req["ParentHyperParameterTuningJobs"]:
assert (
p["HyperParameterTuningJobName"] in warm_start_config.parents
), "Warm start parents config initialization failed."
@pytest.mark.parametrize("additional_parents", [{"p1", "p2"}, {}, None])
def test_create_identical_dataset_and_algorithm_tuner(sagemaker_session, additional_parents):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = create_identical_dataset_and_algorithm_tuner(
parent=JOB_NAME, additional_parents=additional_parents, sagemaker_session=sagemaker_session
)
assert tuner.warm_start_config.type == WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM
if additional_parents:
additional_parents.add(JOB_NAME)
assert tuner.warm_start_config.parents == additional_parents
else:
assert tuner.warm_start_config.parents == {JOB_NAME}
@pytest.mark.parametrize("additional_parents", [{"p1", "p2"}, {}, None])
def test_create_transfer_learning_tuner(sagemaker_session, estimator, additional_parents):
job_details = copy.deepcopy(TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
tuner = create_transfer_learning_tuner(
parent=JOB_NAME,
additional_parents=additional_parents,
sagemaker_session=sagemaker_session,
estimator=estimator,
)
assert tuner.warm_start_config.type == WarmStartTypes.TRANSFER_LEARNING
assert tuner.estimator == estimator
if additional_parents:
additional_parents.add(JOB_NAME)
assert tuner.warm_start_config.parents == additional_parents
else:
assert tuner.warm_start_config.parents == {JOB_NAME}
@pytest.mark.parametrize(
"warm_start_type",
[WarmStartTypes.TRANSFER_LEARNING, WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM],
)
def test_create_warm_start_tuner_with_multi_estimator_dict(
sagemaker_session, estimator, warm_start_type
):
job_details = copy.deepcopy(MULTI_ALGO_TUNING_JOB_DETAILS)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
additional_parents = {"p1", "p2"}
with pytest.raises(
ValueError,
match="Warm start is not supported currently for tuners with multiple estimators",
):
if warm_start_type == WarmStartTypes.TRANSFER_LEARNING:
create_transfer_learning_tuner(
parent=JOB_NAME,
additional_parents=additional_parents,
sagemaker_session=sagemaker_session,
estimator=estimator,
)
else:
create_identical_dataset_and_algorithm_tuner(
parent=JOB_NAME,
additional_parents=additional_parents,
sagemaker_session=sagemaker_session,
)
@pytest.mark.parametrize(
"warm_start_type",
[WarmStartTypes.TRANSFER_LEARNING, WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM],
)
def test_create_warm_start_tuner_with_single_estimator_dict(
sagemaker_session, estimator, warm_start_type
):
job_details = _convert_tuning_job_details(TUNING_JOB_DETAILS, ESTIMATOR_NAME)
sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job = Mock(
name="describe_tuning_job", return_value=job_details
)
additional_parents = {"p1", "p2"}
if warm_start_type == WarmStartTypes.TRANSFER_LEARNING:
tuner = create_transfer_learning_tuner(
parent=JOB_NAME,
additional_parents=additional_parents,
sagemaker_session=sagemaker_session,
estimator=estimator,
)
else:
tuner = create_identical_dataset_and_algorithm_tuner(
parent=JOB_NAME,
additional_parents=additional_parents,
sagemaker_session=sagemaker_session,
)
assert tuner.warm_start_config.type == warm_start_type
assert tuner.estimator is None
assert tuner.estimator_dict is not None
assert len(tuner.estimator_dict) == 1
if warm_start_type == WarmStartTypes.TRANSFER_LEARNING:
assert tuner.estimator_dict[ESTIMATOR_NAME] == estimator
else:
assert isinstance(tuner.estimator_dict[ESTIMATOR_NAME], PCA)
additional_parents.add(JOB_NAME)
assert tuner.warm_start_config.parents == additional_parents
def test_describe(tuner):
tuner.describe()
tuner.sagemaker_session.describe_tuning_job.assert_called_once()
def _convert_tuning_job_details(job_details, estimator_name):
"""Convert a tuning job description using the 'TrainingJobDefinition' field into a new one using a single-item
'TrainingJobDefinitions' field (list).
"""
assert "TrainingJobDefinition" in job_details
job_details_copy = copy.deepcopy(job_details)
training_details = job_details_copy.pop("TrainingJobDefinition")
# When the 'TrainingJobDefinitions' field is used, the 'DefinitionName' field is required for each item in it.
training_details["DefinitionName"] = estimator_name
# When the 'TrainingJobDefinitions' field is used, tuning objective and parameter ranges must be set in each item
# in it instead of the tuning job config.
training_details["TuningObjective"] = job_details_copy["HyperParameterTuningJobConfig"].pop(
"HyperParameterTuningJobObjective"
)
training_details["HyperParameterRanges"] = job_details_copy[
"HyperParameterTuningJobConfig"
].pop("ParameterRanges")
job_details_copy["TrainingJobDefinitions"] = [training_details]
return job_details_copy
| 35.944116 | 117 | 0.714126 |
acfbe88d5f80eca5919e0cef66813205d3741535 | 469 | py | Python | tests/test_mysql_cli.py | icy1900/MySQL-AutoXtraBackup | dfdf86ba4d1fe15a35cececa4934cb7f247e448f | [
"MIT"
] | 134 | 2015-04-17T15:05:13.000Z | 2022-01-06T20:51:37.000Z | tests/test_mysql_cli.py | icy1900/MySQL-AutoXtraBackup | dfdf86ba4d1fe15a35cececa4934cb7f247e448f | [
"MIT"
] | 316 | 2015-04-22T07:40:46.000Z | 2021-11-08T12:09:02.000Z | tests/test_mysql_cli.py | icy1900/MySQL-AutoXtraBackup | dfdf86ba4d1fe15a35cececa4934cb7f247e448f | [
"MIT"
] | 80 | 2015-04-30T19:25:24.000Z | 2021-11-09T10:32:54.000Z | class TestMySQLCLi:
def test_create_mysql_client_command(self, return_bck_obj):
result = '/usr/bin/mysql --defaults-file= -uroot --password=12345 --socket=/var/run/mysqld/mysqld.sock -e "select 1"'
sql = "select 1"
assert return_bck_obj.mysql_cli.create_mysql_client_command(sql) == result
def test_mysql_run_command(self, return_bck_obj):
sql = "select 1"
assert return_bck_obj.mysql_cli.mysql_run_command(sql) is True
| 46.9 | 125 | 0.716418 |
acfbe8f0258fcf5103918eb85f861c5a1bf1b02c | 360 | py | Python | query-system-prototype/backend/initDB.py | jortner0210/ORNL-SULI-AI4HDR | e544e4ce242d362a2d3151855fefbb7253eca99e | [
"Unlicense"
] | null | null | null | query-system-prototype/backend/initDB.py | jortner0210/ORNL-SULI-AI4HDR | e544e4ce242d362a2d3151855fefbb7253eca99e | [
"Unlicense"
] | null | null | null | query-system-prototype/backend/initDB.py | jortner0210/ORNL-SULI-AI4HDR | e544e4ce242d362a2d3151855fefbb7253eca99e | [
"Unlicense"
] | null | null | null | import argparse
from Database import initDB
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--db_loc', default="./", help="Desired location for database.")
parser.add_argument('--db_name', default="newDatabase", help="Name of database.")
args = parser.parse_args()
initDB(args.db_loc, args.db_name)
| 24 | 88 | 0.702778 |
acfbe96103694ae6dc2ebf64f26064c855952da9 | 5,715 | py | Python | gamifiededucation/settings.py | wallysalami/gamified-education | 720a96c40f2cbd061979f86b8541b2f0b4b7cddd | [
"MIT"
] | 10 | 2018-07-04T18:21:00.000Z | 2021-12-25T07:41:12.000Z | gamifiededucation/settings.py | wallysalami/gamified-education | 720a96c40f2cbd061979f86b8541b2f0b4b7cddd | [
"MIT"
] | null | null | null | gamifiededucation/settings.py | wallysalami/gamified-education | 720a96c40f2cbd061979f86b8541b2f0b4b7cddd | [
"MIT"
] | 1 | 2021-09-06T17:57:53.000Z | 2021-09-06T17:57:53.000Z | """
Django settings for gamifiededucation project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import json
import dj_database_url
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Define folder and URL for uploads
MEDIA_ROOT = BASE_DIR + '/static/images/uploads/'
MEDIA_URL = '/media/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'DONT_USE_THIS_IN_PRODUCTION')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', 'True') == 'True'
ALLOWED_HOSTS = json.loads(os.environ.get('ALLOWED_HOSTS', '["*"]'))
# SECURITY WARNING: set those to True if website uses HTTPS
CSRF_COOKIE_SECURE = os.environ.get('CSRF_COOKIE_SECURE', 'False') == 'True'
SESSION_COOKIE_SECURE = os.environ.get('SESSION_COOKIE_SECURE', 'False') == 'True'
SECURE_SSL_REDIRECT = os.environ.get('SECURE_SSL_REDIRECT', 'False') == 'True'
# SECURITY WARNING: recommended Django settings
SECURE_CONTENT_TYPE_NOSNIFF = os.environ.get('SECURE_CONTENT_TYPE_NOSNIFF', 'True') == 'True'
SECURE_BROWSER_XSS_FILTER = os.environ.get('SECURE_BROWSER_XSS_FILTER', 'True') == 'True'
X_FRAME_OPTIONS = os.environ.get('X_FRAME_OPTIONS', 'DENY')
# Application definition
INSTALLED_APPS = [
'material.theme.amber',
'material',
'jquery',
'captcha',
'course.apps.CourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gamifiededucation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), './templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gamifiededucation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DATABASE_NAME', 'gamified_education'),
'USER': os.environ.get('DATABASE_USER', 'postgres'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD', ''),
'HOST': os.environ.get('DATABASE_HOST', '127.0.0.1'),
'PORT': os.environ.get('DATABASE_PORT', '5432'),
}
}
DATABASES['default'].update(dj_database_url.config(conn_max_age=500))
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = ['course.backend.UsernameOrEmailBackend']
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = os.environ.get('LANGUAGE_CODE', 'en')
LANGUAGES = [
('pt-br', _('Portuguese (Brazil)')),
('en', _('English')),
]
LOCALE_PATHS = (os.path.join(BASE_DIR, 'course/locale'), os.path.join(BASE_DIR, 'gamifiededucation/locale'))
TIME_ZONE = os.environ.get('TIME_ZONE', 'UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Email settings
EMAIL_HOST = os.environ.get('EMAIL_HOST', '')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 25)
EMAIL_USE_TLS = os.environ.get('EMAIL_USE_TLS', False)
EMAIL_REPLY_TO = json.loads(os.environ.get('EMAIL_REPLY_TO', "[]"))
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', '')
RECAPTCHA_PUBLIC_KEY = os.environ.get('RECAPTCHA_PUBLIC_KEY', '')
RECAPTCHA_PRIVATE_KEY = os.environ.get('RECAPTCHA_PRIVATE_KEY', '')
NOCAPTCHA = True
PASSWORD_RESET_TIMEOUT_DAYS = 30
| 32.288136 | 108 | 0.714086 |
acfbea74e67420975b7f4dce10862ebdc3684e17 | 3,353 | py | Python | siliconcompiler/tools/ghdl/ghdl.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | siliconcompiler/tools/ghdl/ghdl.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | siliconcompiler/tools/ghdl/ghdl.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
import siliconcompiler
#####################################################################
# Make Docs
#####################################################################
def make_docs():
'''
GHDL is an open-source analyzer, compiler, simulator and
(experimental) synthesizer for VHDL. It allows you to analyse
and elaborate sources for generating machine code from your design.
Native program execution is the only way for high speed simulation.
Documentation: https://ghdl.readthedocs.io/en/latest
Sources: https://github.com/ghdl/ghdl
Installation: https://github.com/ghdl/ghdl
'''
chip = siliconcompiler.Chip()
chip.set('arg','step','import')
chip.set('arg','index','<index>')
setup(chip)
return chip
################################
# Setup Tool (pre executable)
################################
def setup(chip):
''' Per tool function that returns a dynamic options string based on
the dictionary settings.
'''
# Standard Setup
tool = 'ghdl'
clobber = False
step = chip.get('arg','step')
index = chip.get('arg','index')
chip.set('eda', tool, 'copy', 'false', clobber=clobber)
chip.set('eda', tool, 'exe', 'ghdl', clobber=clobber)
chip.set('eda', tool, 'vswitch', '--version', clobber=clobber)
chip.set('eda', tool, 'version', '2.0.0-dev', clobber=clobber)
chip.set('eda', tool, 'threads', step, index, '4', clobber=clobber)
chip.set('eda', tool, 'option', step, index, '', clobber=clobber)
# Schema requirements
chip.add('eda', tool, 'require', step, index, 'source')
################################
# Custom runtime options
################################
def runtime_options(chip):
''' Custom runtime options, returnst list of command line options.
'''
step = chip.get('arg', 'step')
index = chip.get('arg', 'index')
options = []
# Synthesize inputs and output Verilog netlist
options.append('--synth')
options.append('--out=verilog')
# Add sources
for value in chip.find_files('source'):
options.append(value)
# Set top module
design = chip.get('design')
options.append('-e')
options.append(design)
return options
################################
# Version Check
################################
def parse_version(stdout):
# first line: GHDL 2.0.0-dev (1.0.0.r827.ge49cb7b9) [Dunoon edition]
return stdout.split()[1]
################################
# Post_process (post executable)
################################
def post_process(chip):
''' Tool specific function to run after step execution
'''
# Hack: since ghdl outputs netlist to stdout, we produce the Verilog output
# by copying the log.
design = chip.get('design')
step = chip.get('arg', 'step')
infile = f'{step}.log'
outfile = os.path.join('outputs', f'{design}.v')
shutil.copyfile(infile, outfile)
return 0
##################################################
if __name__ == "__main__":
# File being executed
prefix = os.path.splitext(os.path.basename(__file__))[0]
output = prefix + '.json'
# create a chip instance
chip = siliconcompiler.Chip()
# load configuration
setup(chip, step='import', index='0')
# write out results
chip.writecfg(output)
| 26.611111 | 79 | 0.560692 |
acfbea924a512dd49450278b437542a2f92cf0e4 | 1,225 | py | Python | lib/db/__init__.py | dmuth/twitter-sentiment-analysis | e8d5960f6cacfb2db0e4b30956e7cae422da8ffd | [
"Apache-2.0"
] | 2 | 2018-08-08T00:29:31.000Z | 2020-03-02T13:50:13.000Z | lib/db/__init__.py | dmuth/twitter-sentiment-analysis | e8d5960f6cacfb2db0e4b30956e7cae422da8ffd | [
"Apache-2.0"
] | 1 | 2021-06-01T22:23:37.000Z | 2021-06-01T22:23:37.000Z | lib/db/__init__.py | dmuth/twitter-sentiment-analysis | e8d5960f6cacfb2db0e4b30956e7cae422da8ffd | [
"Apache-2.0"
] | 1 | 2021-08-01T19:24:28.000Z | 2021-08-01T19:24:28.000Z | #
# This class is used as our database wrapper.
#
import sqlite3
class db():
conn = ""
def __init__(self):
#self.conn = sqlite3.connect("tweets.db")
self.conn = sqlite3.connect("tweets.db", 10)
# Autocommit
self.conn.isolation_level = None
def execute(self, query, args = None):
#
# Doing some type checking, as I got caught by this too many times.
#
if type(args) != type(None) and type(args) != type([]) and type(args) != type(()):
raise("Second argument, if present, must be a list!")
if (args):
retval = self.conn.execute(query, args)
else:
retval = self.conn.execute(query)
return(retval)
#
# Wrapper to create a table if it does not exist
#
# @param string table The name of the table to create
# @param string settings A string with the schema for the table.
#
def createTable(self, table, settings):
#
# Query to see if the table already exists. If it does, stop.
#
query = "SELECT name from sqlite_master WHERE name = '%s'" % table
results = self.conn.execute(query)
for row in results:
return True
#
# Table doesn't exist? Go forth and create it!
#
query = ("CREATE TABLE %s (%s)" % (table, settings))
self.conn.execute(query)
| 20.081967 | 84 | 0.656327 |
acfbeb183e8792ee7c057d2f7200e273f0a15bda | 348 | py | Python | symspellpy/__init__.py | clinia/symspellpy | 892f821a0f16220a21ca7adb695fe7b61db6e240 | [
"MIT"
] | 562 | 2018-08-14T06:41:00.000Z | 2022-03-31T08:20:37.000Z | symspellpy/__init__.py | clinia/symspellpy | 892f821a0f16220a21ca7adb695fe7b61db6e240 | [
"MIT"
] | 107 | 2018-08-17T03:16:20.000Z | 2022-03-09T16:17:56.000Z | symspellpy/__init__.py | clinia/symspellpy | 892f821a0f16220a21ca7adb695fe7b61db6e240 | [
"MIT"
] | 111 | 2018-08-15T07:48:33.000Z | 2022-01-31T17:42:01.000Z | """symspellpy
.. moduleauthor:: mmb L <mammothb@hotmail.com>
.. moduleauthor:: Wolf Garbe <wolf.garbe@faroo.com>
"""
from .__version__ import (__title__, __description__, __version__,
__author__, __author_email__, __license__)
from . import editdistance
from . import helpers
from .symspellpy import SymSpell, Verbosity
| 29 | 68 | 0.721264 |
acfbec73f26a2e82cc148183cf9db60d40afa3f1 | 34,264 | py | Python | reagent/types.py | wall-ed-coder/ReAgent | 14d9906d74f943e74c6a6f95d129e18741168f9c | [
"BSD-3-Clause"
] | null | null | null | reagent/types.py | wall-ed-coder/ReAgent | 14d9906d74f943e74c6a6f95d129e18741168f9c | [
"BSD-3-Clause"
] | null | null | null | reagent/types.py | wall-ed-coder/ReAgent | 14d9906d74f943e74c6a6f95d129e18741168f9c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import dataclasses
import logging
# The dataclasses in this file should be vanilla dataclass to have minimal overhead
from dataclasses import dataclass, field
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
# Triggering registration to registries
import reagent.core.result_types # noqa
import torch
import torch.nn.functional as F
from reagent.base_dataclass import BaseDataClass
from reagent.core.configuration import param_hash
from reagent.core.dataclasses import dataclass as pydantic_dataclass
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.model_utils.seq2slate_utils import DECODER_START_SYMBOL, subsequent_mask
from reagent.preprocessing.types import InputColumn
from reagent.torch_utils import gather
if IS_FB_ENVIRONMENT:
import reagent.core.fb.fb_result_types # noqa
class NoDuplicatedWarningLogger:
def __init__(self, logger):
self.logger = logger
self.msg = set()
def warning(self, msg):
if msg not in self.msg:
self.logger.warning(msg)
self.msg.add(msg)
logger = logging.getLogger(__name__)
no_dup_logger = NoDuplicatedWarningLogger(logger)
def isinstance_namedtuple(x):
return isinstance(x, tuple) and hasattr(x, "_fields")
@dataclass
class TensorDataClass(BaseDataClass):
def __getattr__(self, attr):
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError
tensor_attr = getattr(torch.Tensor, attr, None)
if tensor_attr is None or not callable(tensor_attr):
logger.error(
f"Attempting to call {self.__class__.__name__}.{attr} on "
f"{type(self)} (instance of TensorDataClass)."
)
if tensor_attr is None:
raise AttributeError(
f"{self.__class__.__name__}doesn't have {attr} attribute."
)
else:
raise RuntimeError(f"{self.__class__.__name__}.{attr} is not callable.")
def continuation(*args, **kwargs):
def f(v):
# if possible, returns v.attr(*args, **kwargs).
# otws, return v
if isinstance(v, (torch.Tensor, TensorDataClass)):
return getattr(v, attr)(*args, **kwargs)
elif isinstance(v, dict):
return {kk: f(vv) for kk, vv in v.items()}
elif isinstance(v, tuple):
return tuple(f(vv) for vv in v)
return v
return type(self)(**f(self.__dict__))
return continuation
def cuda(self, *args, **kwargs):
cuda_tensor = {}
for k, v in self.__dict__.items(): # noqa F402
if isinstance(v, torch.Tensor):
kwargs["non_blocking"] = kwargs.get("non_blocking", True)
cuda_tensor[k] = v.cuda(*args, **kwargs)
elif isinstance(v, TensorDataClass):
cuda_tensor[k] = v.cuda(*args, **kwargs)
else:
cuda_tensor[k] = v
return type(self)(**cuda_tensor)
# (offset, value)
IdListFeatureValue = Tuple[torch.Tensor, torch.Tensor]
# (offset, key, value)
IdScoreListFeatureValue = Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
# name -> value
IdListFeature = Dict[str, IdListFeatureValue]
IdScoreListFeature = Dict[str, IdScoreListFeatureValue]
# id -> value
ServingIdListFeature = Dict[int, IdListFeatureValue]
ServingIdScoreListFeature = Dict[int, IdScoreListFeatureValue]
#####
# FIXME: These config types are misplaced but we need to write FBL config adapter
# if we moved them.
######
@pydantic_dataclass
class IdListFeatureConfig(BaseDataClass):
name: str
# integer feature ID
feature_id: int
# name of the embedding table to use
id_mapping_name: str
@pydantic_dataclass
class IdScoreListFeatureConfig(BaseDataClass):
name: str
# integer feature ID
feature_id: int
# name of the embedding table to use
id_mapping_name: str
@pydantic_dataclass
class FloatFeatureInfo(BaseDataClass):
name: str
feature_id: int
@pydantic_dataclass
class IdMapping(object):
__hash__ = param_hash
ids: List[int] = field(default_factory=list)
def __post_init_post_parse__(self):
"""
used in preprocessing
ids list represents mapping from idx -> value
we want the reverse: from feature to embedding table indices
"""
self._id2index: Dict[int, int] = {}
@property
def id2index(self) -> Dict[int, int]:
# pyre-fixme[16]: `IdMapping` has no attribute `_id2index`.
if not self._id2index:
self._id2index = {id: i for i, id in enumerate(self.ids)}
return self._id2index
@property
def table_size(self):
return len(self.ids)
@pydantic_dataclass
class ModelFeatureConfig(BaseDataClass):
float_feature_infos: List[FloatFeatureInfo] = field(default_factory=list)
# table name -> id mapping
id_mapping_config: Dict[str, IdMapping] = field(default_factory=dict)
# id_list_feature_configs is feature_id -> list of values
id_list_feature_configs: List[IdListFeatureConfig] = field(default_factory=list)
# id_score_list_feature_configs is feature_id -> (keys -> values)
id_score_list_feature_configs: List[IdScoreListFeatureConfig] = field(
default_factory=list
)
def __post_init_post_parse__(self):
both_lists = self.id_list_feature_configs + self.id_score_list_feature_configs
if not self.only_dense:
# sanity check for keys in mapping config
ids = [config.feature_id for config in both_lists]
names = [config.name for config in both_lists]
assert len(ids) == len(set(ids)), f"duplicates in ids: {ids}"
assert len(names) == len(set(names)), f"duplicates in names: {names}"
assert len(ids) == len(names), f"{len(ids)} != {len(names)}"
self._id2name = {config.feature_id: config.name for config in both_lists}
self._name2id = {config.name: config.feature_id for config in both_lists}
self._id2config = {config.feature_id: config for config in both_lists}
self._name2config = {config.name: config for config in both_lists}
@property
def only_dense(self):
return not (self.id_list_feature_configs or self.id_score_list_feature_configs)
@property
def id2name(self):
return self._id2name
@property
def name2id(self):
return self._name2id
@property
def id2config(self):
return self._id2config
@property
def name2config(self):
return self._name2config
######
# dataclasses for internal API
######
@dataclass
class ValuePresence(TensorDataClass):
value: torch.Tensor
presence: Optional[torch.Tensor]
@dataclass
class ActorOutput(TensorDataClass):
action: torch.Tensor
log_prob: Optional[torch.Tensor] = None
squashed_mean: Optional[torch.Tensor] = None
@dataclass
class DocList(TensorDataClass):
# the shape is (batch_size, num_candidates, num_document_features)
float_features: torch.Tensor
# the shapes below are (batch_size, num_candidates)
# mask indicates whether the candidate is present or not; its dtype is torch.bool
# pyre-fixme[8]: Attribute has type `Tensor`; used as `None`.
mask: torch.Tensor = None
# value is context dependent; it could be action probability or the score
# of the document from another model
# pyre-fixme[8]: Attribute has type `Tensor`; used as `None`.
value: torch.Tensor = None
def __post_init__(self):
assert (
len(self.float_features.shape) == 3
), f"Unexpected shape: {self.float_features.shape}"
if self.mask is None:
self.mask = self.float_features.new_ones(
self.float_features.shape[:2], dtype=torch.bool
)
if self.value is None:
self.value = self.float_features.new_ones(self.float_features.shape[:2])
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def select_slate(self, action: torch.Tensor):
row_idx = torch.repeat_interleave(
torch.arange(action.shape[0]).unsqueeze(1), action.shape[1], dim=1
)
mask = self.mask[row_idx, action]
# Make sure the indices are in the right range
assert mask.to(torch.bool).all()
float_features = self.float_features[row_idx, action]
value = self.value[row_idx, action]
return DocList(float_features, mask, value)
def as_feature_data(self):
_batch_size, _slate_size, feature_dim = self.float_features.shape
return FeatureData(self.float_features.view(-1, feature_dim))
@dataclass
class FeatureData(TensorDataClass):
# For dense features, shape is (batch_size, feature_dim)
float_features: torch.Tensor
id_list_features: IdListFeature = dataclasses.field(default_factory=dict)
id_score_list_features: IdScoreListFeature = dataclasses.field(default_factory=dict)
# For sequence, shape is (stack_size, batch_size, feature_dim)
stacked_float_features: Optional[torch.Tensor] = None
# For ranking algos,
candidate_docs: Optional[DocList] = None
# Experimental: sticking this here instead of putting it in float_features
# because a lot of places derive the shape of float_features from
# normalization parameters.
time_since_first: Optional[torch.Tensor] = None
def __post_init__(self):
def usage():
return (
f"For sequence features, use `stacked_float_features`."
f"For document features, use `candidate_doc_float_features`."
)
if self.float_features.ndim == 3:
no_dup_logger.warning(f"`float_features` should be 2D.\n{usage()}")
elif self.float_features.ndim != 2:
raise ValueError(
f"float_features should be 2D; got {self.float_features.shape}.\n{usage()}"
)
@property
def has_float_features_only(self) -> bool:
return (
not self.id_list_features
and self.time_since_first is None
and self.candidate_docs is None
)
def get_tiled_batch(self, num_tiles: int):
assert (
self.has_float_features_only
), f"only works for float features now: {self}"
"""
tiled_feature should be (batch_size * num_tiles, feature_dim)
forall i in [batch_size],
tiled_feature[i*num_tiles:(i+1)*num_tiles] should be feat[i]
"""
feat = self.float_features
assert (
len(feat.shape) == 2
), f"Need feat shape to be (batch_size, feature_dim), got {feat.shape}."
batch_size, _ = feat.shape
tiled_feat = feat.repeat_interleave(repeats=num_tiles, dim=0)
return FeatureData(float_features=tiled_feat)
def concat_user_doc(self):
assert not self.has_float_features_only, "only works when DocList present"
assert self.float_features.dim() == 2 # batch_size x state_dim
batch_size, state_dim = self.float_features.shape
# batch_size x num_docs x candidate_dim
assert self.candidate_docs.float_features.dim() == 3
assert len(self.candidate_docs.float_features) == batch_size
_, num_docs, candidate_dim = self.candidate_docs.float_features.shape
state_tiled = (
torch.repeat_interleave(self.float_features, num_docs, dim=0)
.reshape(batch_size, num_docs, state_dim)
.float()
)
return torch.cat((state_tiled, self.candidate_docs.float_features), dim=2)
def _embed_states(x: FeatureData) -> FeatureData:
"""
Get dense feature from float and doc features.
TODO: make this an embedder.
"""
assert x.candidate_docs is not None
def _concat_state_candidates(state: torch.Tensor, candidates: torch.Tensor):
"""
Expect
state.shape = (n, state_dim),
candidate.shape = (n, num_candidates, candidate_dim),
Result has shape (n, state_dim + candidate_dim)
[state, mean of candidates]
"""
n = state.shape[0]
assert len(state.shape) == 2, f"{state.shape} != (batch_size, user_dim)"
assert (
len(candidates.shape) == 3
), f"{candidates.shape} != (batch_size, num_candidates, candidate_dim)"
assert candidates.shape[0] == n, f"{candidates.shape} 0th dim != {n}"
# TODO: have an embedder here
# NOTE: mean aggregation is not very effective here
candidates_embedding = candidates.view(n, -1)
return torch.cat([state, candidates_embedding], dim=1)
return FeatureData(
float_features=_concat_state_candidates(
x.float_features,
# pyre-fixme[16]: `Optional` has no attribute `float_features`.
x.candidate_docs.float_features,
)
)
class TensorFeatureData(torch.nn.Module):
"""
Primarily for using in nn.Sequential
"""
def forward(self, input: torch.Tensor) -> FeatureData:
assert isinstance(input, torch.Tensor)
return FeatureData(input)
class ServingFeatureData(NamedTuple):
float_features_with_presence: Tuple[torch.Tensor, torch.Tensor]
id_list_features: ServingIdListFeature
id_score_list_features: ServingIdScoreListFeature
@dataclass
class ExtraData(TensorDataClass):
mdp_id: Optional[torch.Tensor] = None
sequence_number: Optional[torch.Tensor] = None
action_probability: Optional[torch.Tensor] = None
max_num_actions: Optional[int] = None
metrics: Optional[torch.Tensor] = None
@classmethod
def from_dict(cls, d):
return cls(**{f.name: d.get(f.name, None) for f in dataclasses.fields(cls)})
@dataclass
class PreprocessedRankingInput(TensorDataClass):
state: FeatureData
src_seq: FeatureData
src_src_mask: Optional[torch.Tensor] = None
tgt_in_seq: Optional[FeatureData] = None
tgt_out_seq: Optional[FeatureData] = None
tgt_tgt_mask: Optional[torch.Tensor] = None
slate_reward: Optional[torch.Tensor] = None
position_reward: Optional[torch.Tensor] = None
# all indices will be +2 to account for padding
# symbol (0) and decoder_start_symbol (1)
src_in_idx: Optional[torch.Tensor] = None
tgt_in_idx: Optional[torch.Tensor] = None
tgt_out_idx: Optional[torch.Tensor] = None
tgt_out_probs: Optional[torch.Tensor] = None
# store ground-truth target sequences
optim_tgt_in_idx: Optional[torch.Tensor] = None
optim_tgt_out_idx: Optional[torch.Tensor] = None
optim_tgt_in_seq: Optional[FeatureData] = None
optim_tgt_out_seq: Optional[FeatureData] = None
extras: Optional[ExtraData] = field(default_factory=ExtraData)
def batch_size(self) -> int:
return self.state.float_features.size()[0]
@classmethod
def from_input(
cls,
state: torch.Tensor,
candidates: torch.Tensor,
device: torch.device,
action: Optional[torch.Tensor] = None,
optimal_action: Optional[torch.Tensor] = None,
logged_propensities: Optional[torch.Tensor] = None,
slate_reward: Optional[torch.Tensor] = None,
position_reward: Optional[torch.Tensor] = None,
extras: Optional[ExtraData] = None,
):
"""
Build derived fields (indices & masks) from raw input
"""
# Shape checking
assert len(state.shape) == 2
assert len(candidates.shape) == 3
state = state.to(device)
candidates = candidates.to(device)
if action is not None:
assert len(action.shape) == 2
action = action.to(device)
if logged_propensities is not None:
assert (
len(logged_propensities.shape) == 2
and logged_propensities.shape[1] == 1
)
logged_propensities = logged_propensities.to(device)
batch_size, candidate_num, candidate_dim = candidates.shape
if slate_reward is not None:
assert len(slate_reward.shape) == 2 and slate_reward.shape[1] == 1
slate_reward = slate_reward.to(device)
if position_reward is not None:
# pyre-fixme[16]: `Optional` has no attribute `shape`.
assert position_reward.shape == action.shape
position_reward = position_reward.to(device)
src_in_idx = (
torch.arange(candidate_num, device=device).repeat(batch_size, 1) + 2
)
src_src_mask = (
(torch.ones(batch_size, candidate_num, candidate_num))
.type(torch.int8)
.to(device)
)
def process_tgt_seq(action):
if action is not None:
_, output_size = action.shape
# Account for decoder starting symbol and padding symbol
candidates_augment = torch.cat(
(
torch.zeros(batch_size, 2, candidate_dim, device=device),
candidates,
),
dim=1,
)
tgt_out_idx = action + 2
tgt_in_idx = torch.full(
(batch_size, output_size), DECODER_START_SYMBOL, device=device
)
tgt_in_idx[:, 1:] = tgt_out_idx[:, :-1]
tgt_out_seq = gather(candidates_augment, tgt_out_idx)
tgt_in_seq = torch.zeros(
batch_size, output_size, candidate_dim, device=device
)
tgt_in_seq[:, 1:] = tgt_out_seq[:, :-1]
tgt_tgt_mask = subsequent_mask(output_size, device)
else:
tgt_in_idx = None
tgt_out_idx = None
tgt_in_seq = None
tgt_out_seq = None
tgt_tgt_mask = None
return tgt_in_idx, tgt_out_idx, tgt_in_seq, tgt_out_seq, tgt_tgt_mask
(
tgt_in_idx,
tgt_out_idx,
tgt_in_seq,
tgt_out_seq,
tgt_tgt_mask,
) = process_tgt_seq(action)
(
optim_tgt_in_idx,
optim_tgt_out_idx,
optim_tgt_in_seq,
optim_tgt_out_seq,
_,
) = process_tgt_seq(optimal_action)
return cls.from_tensors(
state=state,
src_seq=candidates,
src_src_mask=src_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_out_seq=tgt_out_seq,
tgt_tgt_mask=tgt_tgt_mask,
slate_reward=slate_reward,
position_reward=position_reward,
src_in_idx=src_in_idx,
tgt_in_idx=tgt_in_idx,
tgt_out_idx=tgt_out_idx,
tgt_out_probs=logged_propensities,
optim_tgt_in_idx=optim_tgt_in_idx,
optim_tgt_out_idx=optim_tgt_out_idx,
optim_tgt_in_seq=optim_tgt_in_seq,
optim_tgt_out_seq=optim_tgt_out_seq,
extras=extras,
)
@classmethod
def from_tensors(
cls,
state: torch.Tensor,
src_seq: torch.Tensor,
src_src_mask: Optional[torch.Tensor] = None,
tgt_in_seq: Optional[torch.Tensor] = None,
tgt_out_seq: Optional[torch.Tensor] = None,
tgt_tgt_mask: Optional[torch.Tensor] = None,
slate_reward: Optional[torch.Tensor] = None,
position_reward: Optional[torch.Tensor] = None,
src_in_idx: Optional[torch.Tensor] = None,
tgt_in_idx: Optional[torch.Tensor] = None,
tgt_out_idx: Optional[torch.Tensor] = None,
tgt_out_probs: Optional[torch.Tensor] = None,
optim_tgt_in_idx: Optional[torch.Tensor] = None,
optim_tgt_out_idx: Optional[torch.Tensor] = None,
optim_tgt_in_seq: Optional[torch.Tensor] = None,
optim_tgt_out_seq: Optional[torch.Tensor] = None,
extras: Optional[ExtraData] = None,
**kwargs,
):
assert isinstance(state, torch.Tensor)
assert isinstance(src_seq, torch.Tensor)
assert src_src_mask is None or isinstance(src_src_mask, torch.Tensor)
assert tgt_in_seq is None or isinstance(tgt_in_seq, torch.Tensor)
assert tgt_out_seq is None or isinstance(tgt_out_seq, torch.Tensor)
assert tgt_tgt_mask is None or isinstance(tgt_tgt_mask, torch.Tensor)
assert slate_reward is None or isinstance(slate_reward, torch.Tensor)
assert position_reward is None or isinstance(position_reward, torch.Tensor)
assert src_in_idx is None or isinstance(src_in_idx, torch.Tensor)
assert tgt_in_idx is None or isinstance(tgt_in_idx, torch.Tensor)
assert tgt_out_idx is None or isinstance(tgt_out_idx, torch.Tensor)
assert tgt_out_probs is None or isinstance(tgt_out_probs, torch.Tensor)
assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor)
assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor)
assert optim_tgt_in_seq is None or isinstance(optim_tgt_in_seq, torch.Tensor)
assert optim_tgt_out_seq is None or isinstance(optim_tgt_out_seq, torch.Tensor)
assert extras is None or isinstance(extras, ExtraData)
return cls(
state=FeatureData(float_features=state),
src_seq=FeatureData(float_features=src_seq),
src_src_mask=src_src_mask,
tgt_in_seq=FeatureData(float_features=tgt_in_seq)
if tgt_in_seq is not None
else None,
tgt_out_seq=FeatureData(float_features=tgt_out_seq)
if tgt_out_seq is not None
else None,
tgt_tgt_mask=tgt_tgt_mask,
slate_reward=slate_reward,
position_reward=position_reward,
src_in_idx=src_in_idx,
tgt_in_idx=tgt_in_idx,
tgt_out_idx=tgt_out_idx,
tgt_out_probs=tgt_out_probs,
optim_tgt_in_idx=optim_tgt_in_idx,
optim_tgt_out_idx=optim_tgt_out_idx,
optim_tgt_in_seq=FeatureData(float_features=optim_tgt_in_seq)
if optim_tgt_in_seq is not None
else None,
optim_tgt_out_seq=FeatureData(float_features=optim_tgt_out_seq)
if optim_tgt_out_seq is not None
else None,
extras=extras if extras is not None else None,
)
def __post_init__(self):
if (
isinstance(self.state, torch.Tensor)
or isinstance(self.src_seq, torch.Tensor)
or isinstance(self.tgt_in_seq, torch.Tensor)
or isinstance(self.tgt_out_seq, torch.Tensor)
or isinstance(self.optim_tgt_in_seq, torch.Tensor)
or isinstance(self.optim_tgt_out_seq, torch.Tensor)
):
raise ValueError(
f"Use from_tensors() {type(self.state)} {type(self.src_seq)} "
f"{type(self.tgt_in_seq)} {type(self.tgt_out_seq)} "
f"{type(self.optim_tgt_in_seq)} {type(self.optim_tgt_out_seq)} "
)
@dataclass
class BaseInput(TensorDataClass):
"""
Base class for all inputs, both raw and preprocessed
"""
state: FeatureData
next_state: FeatureData
reward: torch.Tensor
time_diff: torch.Tensor
step: Optional[torch.Tensor]
not_terminal: torch.Tensor
def __len__(self):
return self.state.float_features.size()[0]
def batch_size(self):
return len(self)
def as_dict_shallow(self):
return {
"state": self.state,
"next_state": self.next_state,
"reward": self.reward,
"time_diff": self.time_diff,
"step": self.step,
"not_terminal": self.not_terminal,
}
@staticmethod
def from_dict(batch):
id_list_features = batch.get(InputColumn.STATE_ID_LIST_FEATURES, None) or {}
id_score_list_features = (
batch.get(InputColumn.STATE_ID_SCORE_LIST_FEATURES, None) or {}
)
next_id_list_features = (
batch.get(InputColumn.NEXT_STATE_ID_LIST_FEATURES, None) or {}
)
next_id_score_list_features = (
batch.get(InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES, None) or {}
)
# TODO: handle value/mask of DocList
filler_mask_val = None
doc_list = None
candidate_features = batch.get(InputColumn.CANDIDATE_FEATURES, None)
if candidate_features is not None:
filler_mask_val = torch.zeros(
(candidate_features.shape[0], candidate_features.shape[1])
)
doc_list = DocList(
float_features=candidate_features,
mask=filler_mask_val.clone().bool(),
value=filler_mask_val.clone().float(),
)
next_doc_list = None
next_candidate_features = batch.get(InputColumn.NEXT_CANDIDATE_FEATURES, None)
if next_candidate_features is not None:
assert filler_mask_val is not None
next_doc_list = DocList(
float_features=next_candidate_features,
mask=filler_mask_val.clone().bool(),
value=filler_mask_val.clone().float(),
)
return BaseInput(
state=FeatureData(
float_features=batch[InputColumn.STATE_FEATURES],
id_list_features=id_list_features,
id_score_list_features=id_score_list_features,
candidate_docs=doc_list,
),
next_state=FeatureData(
float_features=batch[InputColumn.NEXT_STATE_FEATURES],
id_list_features=next_id_list_features,
id_score_list_features=next_id_score_list_features,
candidate_docs=next_doc_list,
),
reward=batch[InputColumn.REWARD],
time_diff=batch[InputColumn.TIME_DIFF],
step=batch.get(InputColumn.STEP, None),
not_terminal=batch[InputColumn.NOT_TERMINAL],
)
@dataclass
class DiscreteDqnInput(BaseInput):
action: torch.Tensor
next_action: torch.Tensor
possible_actions_mask: torch.Tensor
possible_next_actions_mask: torch.Tensor
extras: ExtraData
@classmethod
def from_dict(cls, batch):
base = super().from_dict(batch)
return cls(
state=base.state,
next_state=base.next_state,
reward=base.reward,
time_diff=base.time_diff,
step=base.step,
not_terminal=base.not_terminal,
action=batch[InputColumn.ACTION],
next_action=batch[InputColumn.NEXT_ACTION],
possible_actions_mask=batch[InputColumn.POSSIBLE_ACTIONS_MASK],
possible_next_actions_mask=batch[InputColumn.POSSIBLE_NEXT_ACTIONS_MASK],
extras=batch[InputColumn.EXTRAS],
)
@dataclass
class SlateQInput(BaseInput):
"""
The shapes of `reward`, `reward_mask`, & `next_item_mask` are
`(batch_size, slate_size)`.
`reward_mask` indicated whether the reward could be observed, e.g.,
the item got into viewport or not.
"""
action: torch.Tensor
next_action: torch.Tensor
reward_mask: torch.Tensor
extras: Optional[ExtraData] = None
@classmethod
def from_dict(cls, d):
action = d["action"]
next_action = d["next_action"]
return cls(
state=FeatureData(
float_features=d["state_features"],
candidate_docs=DocList(
float_features=d["candidate_features"],
mask=d["item_mask"],
value=d["item_probability"],
),
),
next_state=FeatureData(
float_features=d["next_state_features"],
candidate_docs=DocList(
float_features=d["next_candidate_features"],
mask=d["next_item_mask"],
value=d["next_item_probability"],
),
),
action=action,
next_action=next_action,
reward=d["position_reward"],
reward_mask=d["reward_mask"],
time_diff=d["time_diff"],
not_terminal=d["not_terminal"],
step=None,
extras=ExtraData.from_dict(d),
)
@dataclass
class ParametricDqnInput(BaseInput):
action: FeatureData
next_action: FeatureData
possible_actions: FeatureData
possible_actions_mask: torch.Tensor
possible_next_actions: FeatureData
possible_next_actions_mask: torch.Tensor
extras: Optional[ExtraData] = None
@classmethod
def from_dict(cls, batch):
return cls(
state=FeatureData(float_features=batch["state_features"]),
action=FeatureData(float_features=batch["action"]),
next_state=FeatureData(float_features=batch["next_state_features"]),
next_action=FeatureData(float_features=batch["next_action"]),
possible_actions=FeatureData(float_features=batch["possible_actions"]),
possible_actions_mask=batch["possible_actions_mask"],
possible_next_actions=FeatureData(
float_features=batch["possible_next_actions"]
),
possible_next_actions_mask=batch["possible_next_actions_mask"],
reward=batch["reward"],
not_terminal=batch["not_terminal"],
time_diff=batch["time_diff"],
step=batch["step"],
extras=batch["extras"],
)
@dataclass
class PolicyNetworkInput(BaseInput):
action: FeatureData
next_action: FeatureData
extras: Optional[ExtraData] = None
@classmethod
def from_dict(cls, batch):
base = super().from_dict(batch)
# TODO: Implement ExtraData.from_dict
extras = batch.get("extras", None)
return cls(
action=FeatureData(float_features=batch["action"]),
next_action=FeatureData(float_features=batch["next_action"]),
extras=extras,
**base.as_dict_shallow(),
)
@dataclass
class PolicyGradientInput(TensorDataClass):
state: FeatureData
action: torch.Tensor
reward: torch.Tensor
log_prob: torch.Tensor
possible_actions_mask: Optional[torch.Tensor] = None
@classmethod
def input_prototype(cls):
num_classes = 5
batch_size = 10
state_dim = 3
action_dim = 2
return cls(
state=FeatureData(float_features=torch.randn(batch_size, state_dim)),
action=F.one_hot(torch.randint(high=num_classes, size=(batch_size,))),
reward=torch.rand(batch_size),
log_prob=torch.log(torch.rand(batch_size)),
possible_actions_mask=torch.ones(batch_size, action_dim),
)
@dataclass
class MemoryNetworkInput(BaseInput):
action: torch.Tensor
valid_seq_len: Optional[torch.Tensor] = None
valid_next_seq_len: Optional[torch.Tensor] = None
extras: ExtraData = field(default_factory=ExtraData)
def __len__(self):
if len(self.state.float_features.size()) == 2:
return self.state.float_features.size()[0]
elif len(self.state.float_features.size()) == 3:
return self.state.float_features.size()[1]
else:
raise NotImplementedError()
@dataclass
class PreprocessedTrainingBatch(TensorDataClass):
training_input: Union[PreprocessedRankingInput]
# TODO: deplicate this and move into individual ones.
extras: ExtraData = field(default_factory=ExtraData)
def batch_size(self):
return self.training_input.state.float_features.size()[0]
@dataclass
class SlateScoreBatch:
mdp_id: torch.Tensor
sequence_number: torch.Tensor
scores: torch.Tensor
training_input: PolicyGradientInput
@dataclass
class MemoryNetworkOutput(TensorDataClass):
mus: torch.Tensor
sigmas: torch.Tensor
logpi: torch.Tensor
reward: torch.Tensor
not_terminal: torch.Tensor
last_step_lstm_hidden: torch.Tensor
last_step_lstm_cell: torch.Tensor
all_steps_lstm_hidden: torch.Tensor
@dataclass
class Seq2RewardOutput(TensorDataClass):
acc_reward: torch.Tensor
@dataclass
class DqnPolicyActionSet(TensorDataClass):
greedy: int
softmax: Optional[int] = None
greedy_act_name: Optional[str] = None
softmax_act_name: Optional[str] = None
softmax_act_prob: Optional[float] = None
@dataclass
class PlanningPolicyOutput(TensorDataClass):
# best action to take next
next_best_continuous_action: Optional[torch.Tensor] = None
next_best_discrete_action_one_hot: Optional[torch.Tensor] = None
next_best_discrete_action_idx: Optional[int] = None
@dataclass
class RankingOutput(TensorDataClass):
# a tensor of integer indices w.r.t. to possible candidates
# the values are offset by 2 to account for padding and decoder-starter symbol
# shape: batch_size, tgt_seq_len
# e.g., there are candidates C0, C1, C2, C3, C4, and the ranked order is
# C4, C1, C2, C3, C0. Then the ranked_tgt_out_idx = [6, 3, 4, 5, 2]
ranked_tgt_out_idx: Optional[torch.Tensor] = None
# generative probability of ranked tgt sequences at each decoding step
# shape: batch_size, tgt_seq_len, candidate_size
ranked_per_symbol_probs: Optional[torch.Tensor] = None
# generative probability of ranked tgt sequences
# shape: batch_size, 1
ranked_per_seq_probs: Optional[torch.Tensor] = None
# log probabilities of given tgt sequences are used in REINFORCE
# shape: batch_size, 1 if Seq2SlateMode == PER_SEQ_LOG_PROB_MODE
# shape: batch_size, tgt_seq_len if Seq2SlateMode == PER_SYMBOL_LOG_PROB_DIST_MODE
log_probs: Optional[torch.Tensor] = None
# encoder scores in tgt_out_idx order
encoder_scores: Optional[torch.Tensor] = None
@dataclass
class RewardNetworkOutput(TensorDataClass):
predicted_reward: torch.Tensor
| 35.654527 | 91 | 0.648377 |
acfbec8fe78fa3fc2c9a168211d935ec003c2328 | 2,227 | py | Python | util/loader/pre_process.py | wm-bupt/STSN-for-oracle-recognition | f220aec0ba93d48231a228001f2c380c72304d06 | [
"Apache-2.0"
] | null | null | null | util/loader/pre_process.py | wm-bupt/STSN-for-oracle-recognition | f220aec0ba93d48231a228001f2c380c72304d06 | [
"Apache-2.0"
] | null | null | null | util/loader/pre_process.py | wm-bupt/STSN-for-oracle-recognition | f220aec0ba93d48231a228001f2c380c72304d06 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from torchvision import transforms
import os
from PIL import Image, ImageOps
import numbers
import torch, random, math
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class PlaceCrop(object):
"""Crops the given PIL.Image at the particular index.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (w, h), a square crop (size, size) is
made.
"""
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
def image_train(resize_size=256, crop_size=224, alexnet=False, transform_our=0):
if not alexnet:
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
if not transform_our:
return transforms.Compose([
ResizeImage(resize_size),
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
def image_test(resize_size=256, crop_size=224, alexnet=False, transform_our=0):
if not alexnet:
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
start_center = (resize_size - crop_size - 1) / 2
if not transform_our:
return transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
])
| 30.094595 | 92 | 0.570274 |
acfbecd1c918694f02dfc0740d073178fbf31db9 | 17,074 | py | Python | portal/migrations/0022_auto__add_mediafile__add_field_mediaitem_media_files.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 6 | 2015-04-05T01:28:23.000Z | 2022-02-06T17:29:47.000Z | portal/migrations/0022_auto__add_mediafile__add_field_mediaitem_media_files.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 2 | 2022-01-05T23:07:10.000Z | 2022-03-30T17:52:45.000Z | portal/migrations/0022_auto__add_mediafile__add_field_mediaitem_media_files.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 2 | 2022-02-06T17:29:53.000Z | 2022-02-26T17:23:09.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MediaFile'
db.create_table('portal_mediafile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('url', self.gf('django.db.models.fields.URLField')(max_length=512, blank=True)),
('file_format', self.gf('django.db.models.fields.CharField')(max_length=20)),
('size', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('media_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['portal.MediaItem'])),
('mediatype', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('portal', ['MediaFile'])
# Copying 'mp3URL' and etc in 'MediaFile' instances
if not db.dry_run:
for mediaitem_object in orm['portal.MediaItem'].objects.all():
# MP3
if not mediaitem_object.mp3URL == '':
mediaFile = orm['portal.MediaFile'].objects.create(title=mediaitem_object.title+" mp3", url=mediaitem_object.mp3URL, size=mediaitem_object.mp3Size, file_format="MP3", media_item = mediaitem_object, mediatype = 'audio')
mediaFile.save()
# MP4
if not mediaitem_object.mp4URL == '':
mediaFile = orm['portal.MediaFile'].objects.create(title=mediaitem_object.title+" mp4", url=mediaitem_object.mp4URL, size=mediaitem_object.mp4Size, file_format="MP4", media_item = mediaitem_object, mediatype = 'video')
mediaFile.save()
# OGG
if not mediaitem_object.oggURL == '':
mediaFile = orm['portal.MediaFile'].objects.create(title=mediaitem_object.title+" ogg", url=mediaitem_object.oggURL, size=mediaitem_object.oggSize, file_format="OGG", media_item = mediaitem_object, mediatype = 'audio')
mediaFile.save()
# WEBM
if not mediaitem_object.webmURL == '':
mediaFile = orm['portal.MediaFile'].objects.create(title=mediaitem_object.title+" WebM", url=mediaitem_object.webmURL, size=mediaitem_object.webmSize, file_format="WEBM", media_item = mediaitem_object, mediatype = 'video')
mediaFile.save()
def backwards(self, orm):
# Deleting model 'MediaFile'
db.delete_table('portal_mediafile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'portal.channel': {
'Meta': {'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'portal.collection': {
'Meta': {'object_name': 'Collection'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['portal.MediaItem']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'portal.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.MediaItem']"}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'timecode': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'portal.hotfolder': {
'Meta': {'object_name': 'Hotfolder'},
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'defaultName': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'folderName': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'portal.mediafile': {
'Meta': {'object_name': 'MediaFile'},
'file_format': ('django.db.models.fields.CharField', [], {'default': "'MP3'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.MediaItem']"}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'audio'", 'max_length': '20'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'})
},
'portal.mediaitem': {
'Meta': {'object_name': 'MediaItem'},
'assemblyid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'audioThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encodingDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC-BY'", 'max_length': '200'}),
'linkURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mp3Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp3URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mp4Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp4URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'oggSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'oggURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'originalFile': ('django.db.models.fields.files.FileField', [], {'max_length': '2048'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'torrentDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'torrentURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videoThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'webmSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'webmURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'portal.submittal': {
'Meta': {'object_name': 'Submittal'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_audioThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']", 'null': 'True', 'blank': 'True'}),
'media_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'media_kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'media_license': ('django.db.models.fields.CharField', [], {'default': "'CC-BY'", 'max_length': '200'}),
'media_linkURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_mp3URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_mp4URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_oggURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media_title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'media_torrentDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media_torrentURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_videoThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'media_webmURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['portal']
| 79.046296 | 242 | 0.565597 |
acfbed6b3d6c128a74f3aace6bcb71206adb40df | 2,623 | py | Python | pyhermes/publisher.py | romcheg/pyhermes | c4d3af0b0fd02353ecc0937a409ef860d5c5c4d2 | [
"Apache-2.0"
] | 5 | 2016-04-14T15:32:16.000Z | 2016-04-21T05:51:32.000Z | pyhermes/publisher.py | romcheg/pyhermes | c4d3af0b0fd02353ecc0937a409ef860d5c5c4d2 | [
"Apache-2.0"
] | 1 | 2016-06-06T08:54:30.000Z | 2016-06-06T09:41:15.000Z | pyhermes/publisher.py | romcheg/pyhermes | c4d3af0b0fd02353ecc0937a409ef860d5c5c4d2 | [
"Apache-2.0"
] | 1 | 2016-04-20T05:47:01.000Z | 2016-04-20T05:47:01.000Z | import json
import logging
import requests
from requests.exceptions import ConnectionError, HTTPError, Timeout
from pyhermes.exceptions import HermesPublishException
from pyhermes.settings import HERMES_SETTINGS
logger = logging.getLogger(__name__)
HERMES_VALID_RESPONSE_CODES = {201, 202}
def _strip_topic_group(topic):
"""
Standardize topic name (remove group name from the beginning)
"""
group_name = HERMES_SETTINGS.PUBLISHING_GROUP['groupName']
if topic.startswith(group_name):
topic = topic[len(group_name):]
return topic
def _get_full_topic_name(topic):
"""
"""
if not topic.startswith(HERMES_SETTINGS.PUBLISHING_GROUP['groupName']):
topic = '{}.{}'.format(
HERMES_SETTINGS.PUBLISHING_GROUP['groupName'], topic
)
return topic
def _handle_request_adapter(request_session):
"""
Handle custom rout-mapping
See http://docs.python-requests.org/en/master/user/advanced/#transport-adapters # noqa
for details
"""
if HERMES_SETTINGS.URL_ADAPTER:
request_session.mount(*HERMES_SETTINGS.URL_ADAPTER())
def publish(topic, data):
"""
Push an event to the Hermes.
Args:
topic: name of the topic
data: data to push
Returns:
message id from Hermes
"""
# TODO: try-except
if not HERMES_SETTINGS.ENABLED:
logger.debug('Hermes integration is disabled')
return
json_data = json.dumps(data)
headers = {'Content-Type': 'application/json'}
url = "{}/topics/{}".format(
HERMES_SETTINGS.BASE_URL, _get_full_topic_name(topic)
)
logger.debug(
'Pushing message to topic "{}" (url: "{}") with data: {}'.format(
topic, url, json_data
)
)
with requests.Session() as session:
_handle_request_adapter(session)
try:
resp = session.post(url, headers=headers, data=json_data)
except (ConnectionError, HTTPError, Timeout) as e:
message = 'Error pushing event to Hermes: {}.'.format(e)
logger.exception(message)
raise HermesPublishException(message)
if resp.status_code not in HERMES_VALID_RESPONSE_CODES:
message = 'Bad response code during Hermes push: {}.'.format(
resp.status_code
)
logger.error(message)
raise HermesPublishException(message)
hermes_event_id = resp.headers.get('Hermes-Message-Id')
logger.info(
'Event with topic "{}"" sent to Hermes with event_id={}'.format(
topic, hermes_event_id
)
)
return hermes_event_id
| 27.904255 | 91 | 0.6565 |
acfbed85d03023bd9875d6e09c8fb1a3784cb3fa | 4,948 | py | Python | app.py | brunodoamaral/clip-search | cf03b739fe4fc1c71466571a67244f23799709b3 | [
"MIT"
] | 8 | 2021-12-30T02:18:53.000Z | 2022-02-13T18:57:59.000Z | app.py | brunodoamaral/clip-search | cf03b739fe4fc1c71466571a67244f23799709b3 | [
"MIT"
] | 1 | 2021-09-23T02:12:41.000Z | 2021-09-23T02:12:41.000Z | app.py | brunodoamaral/clip-search | cf03b739fe4fc1c71466571a67244f23799709b3 | [
"MIT"
] | null | null | null | # coding: utf-8
import argparse
from pathlib import Path, PurePosixPath
import numpy as np
from flask import Flask, Response, jsonify, request, send_from_directory
from PIL import Image
from indexer import ImagesIndexer
import requests
IMAGES_PREFIX_URL = PurePosixPath('/images')
THUMBS_PREFIX_URL = PurePosixPath('/thumb')
MAX_TOP_N = 100
ROUND_NUM = 1_000_000
############ Helper functions ############
def round_float(x):
# TODO: make round num work
return float(x) # round(x * ROUND_NUM) / ROUND_NUM)
def emb_to_list(emb):
if emb.ndim == 2:
assert emb.shape[0] == 1, 'Multidimension embedding: ' + str(emb.shape)
emb = emb[0]
return list(map(round_float, emb))
################ Flask app ###############
app = Flask(
__name__,
static_url_path='/',
static_folder='./frontend/public/'
)
@app.route('/get-embedding', methods=['POST', 'GET'])
def get_embedding():
results = {}
if request.method == 'POST':
uploaded_files = request.files.getlist("fileToUpload[]")
for file in uploaded_files:
emb = INDEX.encode_image(Image.open(file.stream))
results[file.filename] = emb_to_list(emb)
results['_mean_'] = emb_to_list(np.mean(list(results.values()), 0))
else:
if 'prompt' in request.args:
emb = INDEX.encode_prompt(request.args['prompt'])
results = emb_to_list(emb)
elif 'src_image' in request.args:
src_image = Path(request.args['src_image']).relative_to(IMAGES_PREFIX_URL)
if '..' not in str(src_image):
path_image = images_path / src_image
if path_image.exists():
emb = INDEX.encode_image(Image.open(path_image))
results = emb_to_list(emb)
return jsonify(results)
@app.route('/search', methods=['POST'])
def do_the_magic():
# Read request objects
params = request.get_json()
top_n = params.get('num-results', '100')
top_n = min(MAX_TOP_N, int(top_n))
query = np.array(params['query'], dtype=np.float32)[np.newaxis]
query_excludes = set(params.get('query_excludes', []))
similarity, query_result, query_fnames = INDEX.search(query, top_n + len(query_excludes))
similarity = similarity[0]
query_result = query_result[0]
query_fnames = query_fnames[0]
pre_result_dict = [
{
'fname': str(IMAGES_PREFIX_URL / f),
'thumb': str(THUMBS_PREFIX_URL / f)
}
for f in query_fnames
]
result_dict = []
for result, sim in zip(pre_result_dict, similarity):
if result['fname'] not in query_excludes:
result['similarity'] = float(sim)
result_dict.append(result)
# Limit results
if len(result_dict) == top_n:
break
return jsonify(result_dict)
@app.route(str(IMAGES_PREFIX_URL / '<path:path>'))
def send_image(path):
return send_from_directory(images_path, path)
@app.route(str(THUMBS_PREFIX_URL / '<path:path>'))
def send_thumb(path):
return Response(INDEX.thumbnail(path), mimetype='image/jpeg')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('images_path', type=str, help='Path to images folder')
parser.add_argument('--rotation-invariant', help='Average embeddings of 4 rotations on image inputs', default=False, action='store_true')
parser.add_argument('-p', '--port', type=int, help='Port to start server', default=5000)
parser.add_argument('-s', '--host', type=str, help='Host to start server', default='0.0.0.0')
parser.add_argument('--dev', help='Start in dev mode', default=False, action='store_true')
args = parser.parse_args()
images_path = Path(args.images_path)
rotation_invariant = args.rotation_invariant
INDEX = ImagesIndexer(images_path, do_rotate_images=rotation_invariant)
# Add dev env
if args.dev:
print('Go to ./frontend folder and run: npm install && npm run dev')
@app.route('/', methods=['GET', 'POST'])
def _proxy(*args, **kwargs):
resp = requests.request(
method=request.method,
url=request.url.replace(request.host_url, 'http//localhost:8000'),
headers={key: value for (key, value) in request.headers if key != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
app.run(host=args.host, port=args.port, debug=args.dev)
| 32.552632 | 141 | 0.625909 |
acfbedc48edc2d837984db6481d752e16d339dd9 | 430 | py | Python | invenio_rdm_records/version.py | Pineirin/invenio-rdm-records | 629f05186645679f3ee347e418af9fe417fbfa8a | [
"MIT"
] | null | null | null | invenio_rdm_records/version.py | Pineirin/invenio-rdm-records | 629f05186645679f3ee347e418af9fe417fbfa8a | [
"MIT"
] | null | null | null | invenio_rdm_records/version.py | Pineirin/invenio-rdm-records | 629f05186645679f3ee347e418af9fe417fbfa8a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2021 CERN.
# Copyright (C) 2019-2021 Northwestern University.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-RDM-Records.
This file is imported by ``invenio_rdm_records.__init__``,
and parsed by ``setup.py``.
"""
__version__ = '0.31.3'
| 26.875 | 77 | 0.72093 |
acfbee1a044650fd78591094253dd3ffd4bba7a7 | 13,967 | py | Python | market_maker/ws/ws_thread.py | hh2010/sample-market-maker | 7c5404c531ae51bbffff6f2b90f1e2704e89bb22 | [
"Apache-2.0"
] | null | null | null | market_maker/ws/ws_thread.py | hh2010/sample-market-maker | 7c5404c531ae51bbffff6f2b90f1e2704e89bb22 | [
"Apache-2.0"
] | null | null | null | market_maker/ws/ws_thread.py | hh2010/sample-market-maker | 7c5404c531ae51bbffff6f2b90f1e2704e89bb22 | [
"Apache-2.0"
] | null | null | null | import sys
import websocket
import threading
import traceback
import ssl
from time import sleep
import time
import json
import decimal
import datetime as dt
import logging
from market_maker.settings import settings
from market_maker.auth.APIKeyAuth import generate_expires, generate_signature
from market_maker.utils.log import setup_custom_logger
from market_maker.utils.math import toNearest
from future.utils import iteritems
from future.standard_library import hooks
with hooks(): # Python 2/3 compat
from urllib.parse import urlparse, urlunparse
# Connects to BitMEX websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without heavily polling the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll as often as it wants.
class BitMEXWebsocket():
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 1000
def __init__(self):
self.logger = logging.getLogger('root')
self.__reset()
def __del__(self):
self.exit()
def connect(self, endpoint="", symbol="XBTUSD", shouldAuth=False):
'''Connect to the websocket and initialize data stores.'''
self.logger.debug("Connecting WebSocket.")
self.symbol = symbol
self.shouldAuth = shouldAuth
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
#subscriptions = [sub + ':' + symbol for sub in ["quote", "trade"]]
#subscriptions = [sub + ':' + symbol for sub in ["orderBookL2", "trade"]]
subscriptions = [sub + ':' + symbol for sub in ["trade"]]
#subscriptions += ["instrument"] # We want all of them
if self.shouldAuth:
subscriptions += [sub + ':' + symbol for sub in ["order", "execution"]]
subscriptions += ["margin", "position"]
# Get WS URL and connect.
urlParts = list(urlparse(endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe=" + ",".join(subscriptions)
wsURL = urlunparse(urlParts)
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL)
self.logger.info('Connected to WS. Waiting for data images, this may take a moment...')
# Connected. Wait for partials
self.__wait_for_symbol(symbol)
if self.shouldAuth:
self.__wait_for_account()
self.logger.info('Got all market data. Starting.')
#
# Data methods
#
def get_instrument(self, symbol):
instruments = self.data['instrument']
matchingInstruments = [i for i in instruments if i['symbol'] == symbol]
if len(matchingInstruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matchingInstruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1
return instrument
def get_ticker(self, symbol):
'''Return a ticker object. Generated from instrument.'''
instrument = self.get_instrument(symbol)
# If this is an index, we have to get the data from the last trade.
if instrument['symbol'][0] == '.':
ticker = {}
ticker['mid'] = ticker['buy'] = ticker['sell'] = ticker['last'] = instrument['markPrice']
# Normal instrument
else:
bid = instrument['bidPrice'] or instrument['lastPrice']
ask = instrument['askPrice'] or instrument['lastPrice']
ticker = {
"last": instrument['lastPrice'],
"buy": bid,
"sell": ask,
"mid": (bid + ask) / 2
}
# The instrument has a tickSize. Use it to round values.
return {k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)}
def funds(self):
return self.data['margin'][0]
def market_depth(self, symbol):
raise NotImplementedError('orderBook is not subscribed; use askPrice and bidPrice on instrument')
# return self.data['orderBook25'][0]
def open_orders(self, clOrdIDPrefix):
orders = self.data['order']
# Filter to only open orders (leavesQty > 0) and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and o['leavesQty'] > 0]
def position(self, symbol):
positions = self.data['position']
pos = [p for p in positions if p['symbol'] == symbol]
if len(pos) == 0:
# No position found; stub it
return {'avgCostPrice': 0, 'avgEntryPrice': 0, 'currentQty': 0, 'symbol': symbol}
return pos[0]
def recent_trades(self):
return self.data['trade']
#
# Lifecycle methods
#
def error(self, err):
self._error = err
self.logger.error(err)
self.exit()
def exit(self):
self.exited = True
self.ws.close()
#
# Private methods
#
def __connect(self, wsURL):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
ssl_defaults = ssl.get_default_verify_paths()
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=self.__get_auth()
)
setup_custom_logger('websocket', log_level=settings.LOG_LEVEL)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt=sslopt_ca_certs))
self.wst.daemon = True
self.wst.start()
self.logger.info("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.logger.error("Couldn't connect to WS! Exiting.")
self.exit()
sys.exit(1)
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.shouldAuth is False:
return []
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
nonce = generate_expires()
return [
"api-expires: " + str(nonce),
"api-signature: " + generate_signature(settings.API_SECRET, 'GET', '/realtime', nonce, ''),
"api-key:" + settings.API_KEY
]
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order'} <= set(self.data):
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
def __send_command(self, command, args):
'''Send a raw command.'''
self.ws.send(json.dumps({"op": command, "args": args or []}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
receive_time = dt.datetime.fromtimestamp(time.time())
receive_time = dt.datetime.strftime(receive_time, "%H:%M:%S.%f")
# not sure why this loggeradapter not working
#self.logger = logging.LoggerAdapter(self.logger, extra={'receive_time': receive_time})
print(receive_time)
message = json.loads(message)
self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
if message['success']:
self.logger.debug("Subscribed to %s." % message['subscribe'])
else:
self.error("Unable to subscribe to %s. Error: \"%s\" Please check and restart." %
(message['request']['args'][0], message['error']))
elif 'status' in message:
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
elif action:
if table not in self.data:
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][(BitMEXWebsocket.MAX_TABLE_LEN // 2):]
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
continue # No item found to update. Could happen before push
# Log executions
if table == 'order':
is_canceled = 'ordStatus' in updateData and updateData['ordStatus'] == 'Canceled'
if 'cumQty' in updateData and not is_canceled:
contExecuted = updateData['cumQty'] - item['cumQty']
if contExecuted > 0:
instrument = self.get_instrument(item['symbol'])
self.logger.info("Execution: %s %d Contracts of %s at %.*f" %
(item['side'], contExecuted, item['symbol'],
instrument['tickLog'], item['price']))
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error(traceback.format_exc())
def __on_open(self):
self.logger.debug("Websocket Opened.")
def __on_close(self):
self.logger.info('Websocket Closed')
self.exit()
def __on_error(self, ws, error):
if not self.exited:
self.error(error)
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
if __name__ == "__main__":
# create console handler and set level to debug
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
#fh = logging.FileHandler('/home/hh2010/2019-12-09-pm-trd.log')
#fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
#logger.addHandler(fh)
ws = BitMEXWebsocket()
ws.logger = logger
ws.connect("https://www.bitmex.com/api/v1")
while(ws.ws.sock.connected):
sleep(1)
| 39.678977 | 119 | 0.567337 |
acfbee2d1238888c99502d14b79be9a64f8eb00b | 1,114 | py | Python | setup.py | Otartist/disrank2 | ff556f2c7496cae7cae303576da14c5242ef748f | [
"MIT"
] | null | null | null | setup.py | Otartist/disrank2 | ff556f2c7496cae7cae303576da14c5242ef748f | [
"MIT"
] | 1 | 2022-01-02T11:37:17.000Z | 2022-01-02T11:37:17.000Z | setup.py | Otartist/disrank2 | ff556f2c7496cae7cae303576da14c5242ef748f | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
# See note below for more information about classifiers
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Education",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
]
setup(
name="disrank2",
version="0.0.1",
description="Simple lib for discord rank card",
long_description=open("README.md").read(),
long_description_content_type='text/markdown',
url="https://github.com/Otartist/disrank2",
author="Otartist",
license="MIT", # note the American spelling
classifiers=classifiers,
keywords="discord discord-rank discord-profile discord-leveling", # used when people are searching for a module, keywords separated with a space
packages=find_packages(),
install_requires=[
"Pillow"
], # a list of other Python modules which this module depends on. For example RPi.GPIO
include_package_data=True
)
| 35.935484 | 149 | 0.695691 |
acfbeeab409a45670d980aa68730fc8940baa44b | 3,729 | py | Python | tutorials/Tutorial4_FAQ_style_QA.py | dertilo/haystack | b674325d28ddc0c4d0f976ed746f45c5bd51d687 | [
"Apache-2.0"
] | 3 | 2021-06-16T15:49:57.000Z | 2021-09-01T16:52:15.000Z | tutorials/Tutorial4_FAQ_style_QA.py | dertilo/haystack | b674325d28ddc0c4d0f976ed746f45c5bd51d687 | [
"Apache-2.0"
] | null | null | null | tutorials/Tutorial4_FAQ_style_QA.py | dertilo/haystack | b674325d28ddc0c4d0f976ed746f45c5bd51d687 | [
"Apache-2.0"
] | null | null | null | from haystack import Finder
from haystack.database.elasticsearch import ElasticsearchDocumentStore
from haystack.retriever.dense import EmbeddingRetriever
from haystack.utils import print_answers
import pandas as pd
import requests
import logging
import subprocess
import time
## "FAQ-Style QA": Utilizing existing FAQs for Question Answering
# While *extractive Question Answering* works on pure texts and is therefore more generalizable, there's also a common alternative that utilizes existing FAQ data.
#
# Pros:
# - Very fast at inference time
# - Utilize existing FAQ data
# - Quite good control over answers
#
# Cons:
# - Generalizability: We can only answer questions that are similar to existing ones in FAQ
#
# In some use cases, a combination of extractive QA and FAQ-style can also be an interesting option.
LAUNCH_ELASTICSEARCH=True
if LAUNCH_ELASTICSEARCH:
logging.info("Starting Elasticsearch ...")
status = subprocess.run(
['docker run -d -p 9200:9200 -e "discovery.type=single-node" elasticsearch:7.6.2'], shell=True
)
if status.returncode:
raise Exception("Failed to launch Elasticsearch. If you want to connect to an existing Elasticsearch instance"
"then set LAUNCH_ELASTICSEARCH in the script to False.")
time.sleep(15)
### Init the DocumentStore
# In contrast to Tutorial 1 (extractive QA), we:
#
# * specify the name of our `text_field` in Elasticsearch that we want to return as an answer
# * specify the name of our `embedding_field` in Elasticsearch where we'll store the embedding of our question and that is used later for calculating our similarity to the incoming user question
# * set `excluded_meta_data=["question_emb"]` so that we don't return the huge embedding vectors in our search results
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="",
index="document",
text_field="answer",
embedding_field="question_emb",
embedding_dim=768,
excluded_meta_data=["question_emb"])
### Create a Retriever using embeddings
# Instead of retrieving via Elasticsearch's plain BM25, we want to use vector similarity of the questions (user question vs. FAQ ones).
# We can use the `EmbeddingRetriever` for this purpose and specify a model that we use for the embeddings.
#
retriever = EmbeddingRetriever(document_store=document_store, embedding_model="deepset/sentence_bert", gpu=False)
# Download a csv containing some FAQ data
# Here: Some question-answer pairs related to COVID-19
temp = requests.get("https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/faqs/faq_covidbert.csv")
open('small_faq_covid.csv', 'wb').write(temp.content)
# Get dataframe with columns "question", "answer" and some custom metadata
df = pd.read_csv("small_faq_covid.csv")
# Minimal cleaning
df.fillna(value="", inplace=True)
df["question"] = df["question"].apply(lambda x: x.strip())
print(df.head())
# Get embeddings for our questions from the FAQs
questions = list(df["question"].values)
df["question_emb"] = retriever.embed_queries(texts=questions)
# Convert Dataframe to list of dicts and index them in our DocumentStore
docs_to_index = df.to_dict(orient="records")
document_store.write_documents(docs_to_index)
# Init reader & and use Finder to get answer (same as in Tutorial 1)
finder = Finder(reader=None, retriever=retriever)
prediction = finder.get_answers_via_similar_questions(question="How is the virus spreading?", top_k_retriever=10)
print_answers(prediction, details="all")
| 46.037037 | 194 | 0.727809 |
acfbef7a437d4ccfb05dfd5d204b2f8c8c4df8d0 | 3,547 | py | Python | bootstrap/bootstrap.py | annefou/the-littlest-jupyterhub | 25cfdc5fa5d8c5a567282f32372d22fa4dedb5d2 | [
"BSD-3-Clause"
] | null | null | null | bootstrap/bootstrap.py | annefou/the-littlest-jupyterhub | 25cfdc5fa5d8c5a567282f32372d22fa4dedb5d2 | [
"BSD-3-Clause"
] | null | null | null | bootstrap/bootstrap.py | annefou/the-littlest-jupyterhub | 25cfdc5fa5d8c5a567282f32372d22fa4dedb5d2 | [
"BSD-3-Clause"
] | null | null | null | """
Bootstrap an installation of TLJH.
Sets up just enough TLJH environments to invoke tljh.installer.
This script is run as:
curl <script-url> | sudo python3 -
Constraints:
- Entire script should be compatible with Python 3.6 (We run on Ubuntu 18.04+)
- Script should parse in Python 3.4 (since we exit with useful error message on Ubuntu 14.04+)
- Use stdlib modules only
"""
import os
import subprocess
import sys
import logging
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return subprocess.check_output([
'/bin/bash', '-c',
"source /etc/os-release && echo ${{{key}}}".format(key=key)
]).decode().strip()
def main():
# Support only Ubuntu 18.04+
distro = get_os_release_variable('ID')
version = float(get_os_release_variable('VERSION_ID'))
if distro != 'ubuntu':
print('The Littlest JupyterHub currently supports Ubuntu Linux only')
sys.exit(1)
elif float(version) < 18.04:
print('The Littlest JupyterHub requires Ubuntu 18.04 or higher')
sys.exit(1)
install_prefix = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')
hub_prefix = os.path.join(install_prefix, 'hub')
# Set up logging to print to a file and to stderr
logger = logging.getLogger(__name__)
os.makedirs(install_prefix, exist_ok=True)
file_logger = logging.FileHandler(os.path.join(install_prefix, 'installer.log'))
file_logger.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logger.addHandler(file_logger)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stderr_logger)
logger.setLevel(logging.INFO)
logger.info('Checking if TLJH is already installed...')
if os.path.exists(os.path.join(hub_prefix, 'bin', 'python3')):
logger.info('TLJH already installed, upgrading...')
initial_setup = False
else:
logger.info('Setting up hub environment')
initial_setup = True
subprocess.check_output(['apt-get', 'update', '--yes'], stderr=subprocess.STDOUT)
subprocess.check_output(['apt-get', 'install', '--yes', 'python3', 'python3-venv'], stderr=subprocess.STDOUT)
logger.info('Installed python & virtual environment')
os.makedirs(hub_prefix, exist_ok=True)
subprocess.check_output(['python3', '-m', 'venv', hub_prefix], stderr=subprocess.STDOUT)
logger.info('Set up hub virtual environment')
if initial_setup:
logger.info('Setting up TLJH installer...')
else:
logger.info('Upgrading TLJH installer...')
pip_flags = ['--upgrade']
if os.environ.get('TLJH_BOOTSTRAP_DEV', 'no') == 'yes':
pip_flags.append('--editable')
tljh_repo_path = os.environ.get(
'TLJH_BOOTSTRAP_PIP_SPEC',
'git+https://github.com/annefou/the-littlest-jupyterhub.git'
)
subprocess.check_output([
os.path.join(hub_prefix, 'bin', 'pip'),
'install'
] + pip_flags + [tljh_repo_path], stderr=subprocess.STDOUT)
logger.info('Setup tljh package')
logger.info('Starting TLJH installer...')
os.execv(
os.path.join(hub_prefix, 'bin', 'python3'),
[
os.path.join(hub_prefix, 'bin', 'python3'),
'-m',
'tljh.installer',
] + sys.argv[1:]
)
if __name__ == '__main__':
main()
| 32.842593 | 117 | 0.657175 |
acfbf089dd6d2819bf01bc3a4e99377029184c6c | 3,985 | py | Python | ambari-server/src/main/resources/common-services/REGISTRY/0.3.0/package/scripts/service_check.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/REGISTRY/0.3.0/package/scripts/service_check.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/REGISTRY/0.3.0/package/scripts/service_check.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.script.script import Script
from resource_management.core.logger import Logger
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.show_logs import show_logs
from resource_management.libraries.functions.get_user_call_output import get_user_call_output
import urllib2, time, json
CURL_CONNECTION_TIMEOUT = '5'
class ServiceCheck(Script):
def service_check(self, env):
import params
env.set_params(params)
Logger.info("Registry check passed")
if params.registry_ssl_enabled:
registry_api = format("https://{params.hostname}:{params.registry_ssl_port}/api/v1/schemaregistry/schemaproviders")
else:
registry_api = format("http://{params.hostname}:{params.registry_port}/api/v1/schemaregistry/schemaproviders")
Logger.info(registry_api)
max_retries = 3
success = False
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {params.smoke_user_keytab} {params.smokeuser_principal};")
return_code, out = shell.checked_call(kinit_cmd,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=params.smokeuser,
)
for num in range(0, max_retries):
try:
Logger.info(format("Making http requests to {registry_api}"))
if (params.security_enabled or params.registry_ssl_enabled):
get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + registry_api
return_code, stdout, _ = get_user_call_output(get_app_info_cmd, user=params.smokeuser, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',)
try:
json_response = json.loads(stdout)
success = True
Logger.info(format("Successfully made a API request to registry. {stdout}"))
break
except Exception as e:
Logger.error(format("Response from REGISTRY API was not a valid JSON. Response: {stdout}"))
else:
response = urllib2.urlopen(registry_api)
api_response = response.read()
response_code = response.getcode()
Logger.info(format("registry response http status {response_code}"))
if response.getcode() != 200:
Logger.error(format("Failed to fetch response for {registry_api}"))
show_logs(params.registry_log_dir, params.registry_user)
raise
else:
success = True
Logger.info(format("Successfully made a API request to registry. {api_response}"))
break
except (urllib2.URLError, ExecutionFailed) as e:
Logger.error(format("Failed to make API request to Registry server at {registry_api},retrying.. {num} out {max_retries}"))
time.sleep(num * 10) # exponential back off
continue
if success != True:
raise Fail(format("Failed to make API request to Registry server at {registry_api} after {max_retries}"))
if __name__ == "__main__":
ServiceCheck().execute()
| 44.277778 | 150 | 0.693099 |
acfbf189d841f322a497c7dcb48a93420948f031 | 169 | py | Python | imageArgu.py | openhack-Doggy/machine-learning | 75fd78b04e4d67d9fa297e8d75e532d9c96f74a0 | [
"MIT"
] | null | null | null | imageArgu.py | openhack-Doggy/machine-learning | 75fd78b04e4d67d9fa297e8d75e532d9c96f74a0 | [
"MIT"
] | null | null | null | imageArgu.py | openhack-Doggy/machine-learning | 75fd78b04e4d67d9fa297e8d75e532d9c96f74a0 | [
"MIT"
] | null | null | null | from PIL import Image
image = Image.open("/Users/hangeulbae/Desktop/test.jpeg")
newImage = image.resize((300, 300))
newImage.save("/Users/hangeulbae/Desktop/test2.png")
| 33.8 | 57 | 0.763314 |
acfbf1dd6a9363a304d204ce3680b9b3637d5df1 | 7,697 | py | Python | custom_components/uhomeuponor/climate.py | dave-code-ruiz/uhomeuponor | 610b6ff094e4d1f180a01aa4f4b9279a4ebe25e4 | [
"MIT"
] | 8 | 2020-02-10T18:57:50.000Z | 2022-01-19T11:48:14.000Z | custom_components/uhomeuponor/climate.py | dave-code-ruiz/uhomeuponor | 610b6ff094e4d1f180a01aa4f4b9279a4ebe25e4 | [
"MIT"
] | 16 | 2020-01-31T17:40:34.000Z | 2021-04-15T07:28:38.000Z | custom_components/uhomeuponor/climate.py | dave-code-ruiz/uhomeuponor | 610b6ff094e4d1f180a01aa4f4b9279a4ebe25e4 | [
"MIT"
] | 4 | 2020-06-03T14:26:51.000Z | 2021-04-26T21:45:47.000Z | """Uponor U@Home integration
Exposes Climate control entities for Uponor thermostats
- UponorThermostat
"""
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
DOMAIN,
HVAC_MODE_AUTO, HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL,
PRESET_COMFORT, PRESET_ECO,
CURRENT_HVAC_HEAT, CURRENT_HVAC_COOL, CURRENT_HVAC_IDLE,
SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_ENTITY_ID, ATTR_TEMPERATURE, ATTR_BATTERY_LEVEL,
CONF_FRIENDLY_NAME, CONF_HOST, CONF_NAME, CONF_PREFIX,
PRECISION_TENTHS,
TEMP_CELSIUS)
import homeassistant.helpers.config_validation as cv
from logging import getLogger
from .uponor_api import UponorClient
from .uponor_api.const import (UHOME_MODE_HEAT, UHOME_MODE_COOL, UHOME_MODE_ECO, UHOME_MODE_COMFORT)
CONF_SUPPORTS_HEATING = "supports_heating"
CONF_SUPPORTS_COOLING = "supports_cooling"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PREFIX): cv.string,
vol.Optional(CONF_SUPPORTS_HEATING, default=True): cv.boolean,
vol.Optional(CONF_SUPPORTS_COOLING, default=True): cv.boolean,
})
ATTR_TECHNICAL_ALARM = "technical_alarm"
ATTR_RF_SIGNAL_ALARM = "rf_alarm"
ATTR_BATTERY_ALARM = "battery_alarm"
_LOGGER = getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
_LOGGER.info("init setup climate platform for %s", config_entry)
# return await async_setup_platform(
# hass, config_entry.data, async_add_entities, discovery_info=None
# )
# async def async_setup_platform(
# hass, config, async_add_entities, discovery_info=None
# ) -> bool:
# """Set up the Alexa alarm control panel platform."""
# """Set up climate for device."""
# _LOGGER.info("init setup climate platform for %s", config)
# host = config[CONF_HOST]
# prefix = config[CONF_PREFIX]
# supports_heating = True
# supports_cooling = True
# _LOGGER.info("init setup host %s", host)
# uponor = await hass.async_add_executor_job(lambda: UponorClient(hass=hass, server=host))
# try:
# await uponor.rescan()
# except (ValueError, RequestException) as err:
# _LOGGER.error("Received error from UHOME: %s", err)
# raise PlatformNotReady
# async_add_entities([UponorThermostat(prefix, uponor, thermostat, supports_heating, supports_cooling)
# for thermostat in uponor.thermostats], True)
# _LOGGER.info("finish setup climate platform for Uhome Uponor")
# return True
def setup_platform(hass, config, add_entities, discovery_info=None):
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
prefix = config.get(CONF_PREFIX)
supports_heating = config.get(CONF_SUPPORTS_HEATING)
supports_cooling = config.get(CONF_SUPPORTS_COOLING)
uponor = UponorClient(hass=hass, server=host)
try:
uponor.rescan()
except (ValueError, RequestException) as err:
_LOGGER.error("Received error from UHOME: %s", err)
raise PlatformNotReady
# Add Climate / Thermostat entities
add_entities([UponorThermostat(prefix, uponor, thermostat, supports_heating, supports_cooling)
for thermostat in uponor.thermostats], True)
_LOGGER.info("finish setup climate platform for Uhome Uponor")
class UponorThermostat(ClimateEntity):
"""HA Thermostat climate entity. Utilizes Uponor U@Home API to interact with U@Home"""
def __init__(self, prefix, uponor_client, thermostat, supports_heating, supports_cooling):
self._available = False
self.prefix = prefix
self.uponor_client = uponor_client
self.thermostat = thermostat
self.supports_heating = supports_heating
self.supports_cooling = supports_cooling
self.identity = f"{prefix or ''}controller{str(thermostat.controller_index)}_thermostat{str(thermostat.thermostat_index)}_thermostat"
# ** Generic **
@property
def name(self):
return f"{self.prefix or ''}{self.thermostat.by_name('room_name').value}"
@property
def unique_id(self):
return self.identity
@property
def available(self):
return self._available
# ** Static **
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def precision(self):
return PRECISION_TENTHS
@property
def target_temperature_step(self):
return '0.5'
@property
def supported_features(self):
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
@property
def hvac_modes(self):
modes = []
if self.supports_heating:
modes.append(HVAC_MODE_HEAT)
if self.supports_cooling:
modes.append(HVAC_MODE_COOL)
return modes
@property
def preset_modes(self):
return [PRESET_ECO, PRESET_COMFORT]
# ** State **
@property
def current_humidity(self):
return self.thermostat.by_name('rh_value').value
@property
def current_temperature(self):
return self.thermostat.by_name('room_temperature').value
@property
def target_temperature(self):
return self.thermostat.by_name('room_setpoint').value
@property
def extra_state_attributes(self):
return {
ATTR_TECHNICAL_ALARM: self.thermostat.by_name('technical_alarm').value,
ATTR_RF_SIGNAL_ALARM: self.thermostat.by_name('rf_alarm').value,
ATTR_BATTERY_ALARM: self.thermostat.by_name('battery_alarm').value
}
@property
def preset_mode(self):
if self.uponor_client.uhome.by_name('forced_eco_mode').value == 1:
return PRESET_ECO
return PRESET_COMFORT
@property
def hvac_mode(self):
if self.uponor_client.uhome.by_name('hc_mode').value == 1:
return HVAC_MODE_COOL
return HVAC_MODE_HEAT
@property
def hvac_action(self):
if self.thermostat.by_name('room_in_demand').value == 0:
return CURRENT_HVAC_IDLE
if self.hvac_mode == HVAC_MODE_HEAT:
return CURRENT_HVAC_HEAT
else:
return CURRENT_HVAC_COOL
# ** Actions **
def update(self):
# Update Uhome (to get HC mode) and thermostat
try:
self.uponor_client.update_devices(self.uponor_client.uhome, self.thermostat)
valid = self.thermostat.is_valid()
self._available = valid
if not valid:
_LOGGER.debug("The thermostat '%s' had invalid data, and is therefore unavailable", self.identity)
except Exception as ex:
self._available = False
_LOGGER.error("Uponor thermostat was unable to update: %s", ex)
def set_hvac_mode(self, hvac_mode):
if hvac_mode == HVAC_MODE_HEAT:
value = UHOME_MODE_HEAT
else:
value = UHOME_MODE_COOL
self.thermostat.set_hvac_mode(value)
# Support setting preset_mode
def set_preset_mode(self, preset_mode):
if preset_mode == PRESET_ECO:
value = UHOME_MODE_ECO
else:
value = UHOME_MODE_COMFORT
self.thermostat.set_preset_mode(value)
self.thermostat.set_auto_mode()
def set_temperature(self, **kwargs):
if kwargs.get(ATTR_TEMPERATURE) is None:
return
self.thermostat.set_setpoint(kwargs.get(ATTR_TEMPERATURE))
| 32.476793 | 141 | 0.692737 |
acfbf2f18f94b9d809dbc94527e683dc655f0966 | 1,447 | py | Python | pydispix/log.py | ItsDrike/pydispix | a1696295e6860c2240730405c1b93b05a7bf2564 | [
"MIT"
] | 3 | 2021-05-27T19:23:46.000Z | 2021-06-10T01:55:14.000Z | pydispix/log.py | ItsDrike/pydispix | a1696295e6860c2240730405c1b93b05a7bf2564 | [
"MIT"
] | null | null | null | pydispix/log.py | ItsDrike/pydispix | a1696295e6860c2240730405c1b93b05a7bf2564 | [
"MIT"
] | 1 | 2021-05-27T19:24:43.000Z | 2021-05-27T19:24:43.000Z | import logging
import os
import sys
import colorama
class ColoredFormatter(logging.Formatter):
COLORS = {
"WARNING": colorama.Fore.YELLOW,
"INFO": colorama.Fore.WHITE,
"DEBUG": colorama.Fore.BLUE,
"CRITICAL": f"{colorama.Style.BRIGHT}{colorama.Fore.RED}",
"ERROR": f"{colorama.Style.DIM}{colorama.Fore.RED}",
}
RESET_SEQ = f"{colorama.Style.RESET_ALL}{colorama.Fore.RESET}"
def __init__(self, fmt: str, *args, use_color: bool = True, **kwargs) -> None:
super().__init__(fmt, *args, **kwargs)
self.use_color = use_color
def format(self, record: logging.LogRecord):
levelname = record.levelname
if self.use_color and levelname in self.COLORS:
levelname_color = self.COLORS[levelname] + levelname + self.RESET_SEQ
record.levelname = levelname_color
return super().format(record)
def setup_logging():
debug_mode = 'DEBUG' in os.environ
log_format = ColoredFormatter(
f"{colorama.Fore.GREEN}%(asctime)s {colorama.Fore.RESET} | "
f"{colorama.Style.BRIGHT} %(name)s {colorama.Style.RESET_ALL} | "
"%(levelname)s | %(message)s"
)
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(log_format)
logger = logging.getLogger("pydispix")
logger.setLevel(logging.DEBUG if debug_mode else logging.INFO)
logger.addHandler(stream_handler)
| 33.651163 | 82 | 0.666206 |
acfbf41147fca21a391f75ad1af1ffbc6f420da7 | 8,689 | py | Python | src/mem/XBar.py | mzwang25/gem5-copy | e122d15e57b8fcc153d6942bcd6518f75fe191a1 | [
"BSD-3-Clause"
] | null | null | null | src/mem/XBar.py | mzwang25/gem5-copy | e122d15e57b8fcc153d6942bcd6518f75fe191a1 | [
"BSD-3-Clause"
] | null | null | null | src/mem/XBar.py | mzwang25/gem5-copy | e122d15e57b8fcc153d6942bcd6518f75fe191a1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012, 2015, 2017, 2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects.System import System
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from m5.objects.ClockedObject import ClockedObject
class BaseXBar(ClockedObject):
type = 'BaseXBar'
abstract = True
cxx_header = "mem/xbar.hh"
slave = VectorSlavePort("Vector port for connecting masters")
master = VectorMasterPort("Vector port for connecting slaves")
# Latencies governing the time taken for the variuos paths a
# packet has through the crossbar. Note that the crossbar itself
# does not add the latency due to assumptions in the coherency
# mechanism. Instead the latency is annotated on the packet and
# left to the neighbouring modules.
#
# A request incurs the frontend latency, possibly snoop filter
# lookup latency, and forward latency. A response incurs the
# response latency. Frontend latency encompasses arbitration and
# deciding what to do when a request arrives. the forward latency
# is the latency involved once a decision is made to forward the
# request. The response latency, is similar to the forward
# latency, but for responses rather than requests.
frontend_latency = Param.Cycles("Frontend latency")
forward_latency = Param.Cycles("Forward latency")
response_latency = Param.Cycles("Response latency")
# Width governing the throughput of the crossbar
width = Param.Unsigned("Datapath width per port (bytes)")
# The default port can be left unconnected, or be used to connect
# a default slave port
default = MasterPort("Port for connecting an optional default slave")
# The default port can be used unconditionally, or based on
# address range, in which case it may overlap with other
# ports. The default range is always checked first, thus creating
# a two-level hierarchical lookup. This is useful e.g. for the PCI
# xbar configuration.
use_default_range = Param.Bool(False, "Perform address mapping for " \
"the default port")
class NoncoherentXBar(BaseXBar):
type = 'NoncoherentXBar'
cxx_header = "mem/noncoherent_xbar.hh"
class CoherentXBar(BaseXBar):
type = 'CoherentXBar'
cxx_header = "mem/coherent_xbar.hh"
# The coherent crossbar additionally has snoop responses that are
# forwarded after a specific latency.
snoop_response_latency = Param.Cycles("Snoop response latency")
# An optional snoop filter
snoop_filter = Param.SnoopFilter(NULL, "Selected snoop filter")
# Maximum number of outstanding snoop requests for sanity checks
max_outstanding_snoops = Param.Int(512, "Max. outstanding snoops allowed")
# Maximum routing table size for sanity checks
max_routing_table_size = Param.Int(512, "Max. routing table size")
# Determine how this crossbar handles packets where caches have
# already committed to responding, by establishing if the crossbar
# is the point of coherency or not.
point_of_coherency = Param.Bool(False, "Consider this crossbar the " \
"point of coherency")
# Specify whether this crossbar is the point of unification.
point_of_unification = Param.Bool(False, "Consider this crossbar the " \
"point of unification")
system = Param.System(Parent.any, "System that the crossbar belongs to.")
class SnoopFilter(SimObject):
type = 'SnoopFilter'
cxx_header = "mem/snoop_filter.hh"
# Lookup latency of the snoop filter, added to requests that pass
# through a coherent crossbar.
lookup_latency = Param.Cycles(1, "Lookup latency")
system = Param.System(Parent.any, "System that the crossbar belongs to.")
# Sanity check on max capacity to track, adjust if needed.
max_capacity = Param.MemorySize('8MB', "Maximum capacity of snoop filter")
# We use a coherent crossbar to connect multiple masters to the L2
# caches. Normally this crossbar would be part of the cache itself.
class L2XBar(CoherentXBar):
# 256-bit crossbar by default
width = 32
# Assume that most of this is covered by the cache latencies, with
# no more than a single pipeline stage for any packet.
frontend_latency = 1
forward_latency = 0
response_latency = 1
snoop_response_latency = 1
# Use a snoop-filter by default, and set the latency to zero as
# the lookup is assumed to overlap with the frontend latency of
# the crossbar
snoop_filter = SnoopFilter(lookup_latency = 0)
# This specialisation of the coherent crossbar is to be considered
# the point of unification, it connects the dcache and the icache
# to the first level of unified cache.
point_of_unification = True
class L3XBar(CoherentXBar):
width = 32
frontend_latency = 1
forward_latency = 0
response_latency = 1
snoop_response_latency = 1
snoop_filter = SnoopFilter(lookup_latency = 0)
# One of the key coherent crossbar instances is the system
# interconnect, tying together the CPU clusters, GPUs, and any I/O
# coherent masters, and DRAM controllers.
class SystemXBar(CoherentXBar):
# 128-bit crossbar by default
width = 16
# A handful pipeline stages for each portion of the latency
# contributions.
frontend_latency = 3
forward_latency = 4
response_latency = 2
snoop_response_latency = 4
# Use a snoop-filter by default
snoop_filter = SnoopFilter(lookup_latency = 1)
# This specialisation of the coherent crossbar is to be considered
# the point of coherency, as there are no (coherent) downstream
# caches.
point_of_coherency = True
# This specialisation of the coherent crossbar is to be considered
# the point of unification, it connects the dcache and the icache
# to the first level of unified cache. This is needed for systems
# without caches where the SystemXBar is also the point of
# unification.
point_of_unification = True
# In addition to the system interconnect, we typically also have one
# or more on-chip I/O crossbars. Note that at some point we might want
# to also define an off-chip I/O crossbar such as PCIe.
class IOXBar(NoncoherentXBar):
# 128-bit crossbar by default
width = 16
# Assume a simpler datapath than a coherent crossbar, incuring
# less pipeline stages for decision making and forwarding of
# requests.
frontend_latency = 2
forward_latency = 1
response_latency = 2
| 42.385366 | 78 | 0.737714 |
acfbf41dbc1527dd427b5c6406b885ca923d64a9 | 4,734 | py | Python | src/Relation_batch.py | weiyi1991/UA_Concurrent | 11238c778c60095abf326800d6e6a13a643bf071 | [
"MIT"
] | null | null | null | src/Relation_batch.py | weiyi1991/UA_Concurrent | 11238c778c60095abf326800d6e6a13a643bf071 | [
"MIT"
] | 1 | 2020-09-02T12:24:59.000Z | 2020-09-02T12:24:59.000Z | src/Relation_batch.py | weiyi1991/UA_Concurrent | 11238c778c60095abf326800d6e6a13a643bf071 | [
"MIT"
] | null | null | null | '''
Relation modules:
Spatial Relation Module - Relation Module
Relation Unit of Spatial Relation Module for atom/class level features - RelationUnit
Temporal Relation Module - TemporalRelation
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from torch.nn import functional as F
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class RelationModule(nn.Module):
'''
Relation module for atom feature or class-level feature, decided by 'isAtom'.
atom feature - isAtom = True
class level feature - isAtom = False
Difference is how to compute geometric attention weight.
atom feature - take in geo_feature, transform by (pos_transform, sigmoid), then apply linear transform by WG
class level feature - just a [N, N] learnable parameter, N is the same with geo_feature_dim, which is the number of classes
'''
def __init__(self,n_relations = 4, appearance_feature_dim=64,key_feature_dim = 8, num_parts=37):
super(RelationModule, self).__init__()
self.Nr = n_relations
self.N = num_parts
self.value_feature_dim = int(appearance_feature_dim/ self.Nr)
#self.isAtom = isAtom
self.relation = nn.ModuleList()
for N in range(self.Nr):
self.relation.append(RelationUnit(appearance_feature_dim, key_feature_dim, self.value_feature_dim, self.N))
def forward(self, f_a):
isFirst=True
for N in range(self.Nr):
if(isFirst):
concat = self.relation[N](f_a)
isFirst=False
else:
concat = torch.cat((concat, self.relation[N](f_a)), -1)
return concat+f_a
class RelationUnit(nn.Module):
def __init__(self, appearance_feature_dim,key_feature_dim, value_feature_dim, N):
super(RelationUnit, self).__init__()
self.dim_k = key_feature_dim
self.WK = nn.Linear(appearance_feature_dim, key_feature_dim, bias=False)
self.WQ = nn.Linear(appearance_feature_dim, key_feature_dim, bias=False)
self.WV = nn.Linear(appearance_feature_dim, value_feature_dim, bias=False)
self.w_fix = nn.Parameter(nn.init.uniform_(torch.empty(N, N), -1/np.sqrt(N), 1/np.sqrt(N)))
def forward(self, f_a):
'''
Input dim: [B, N, df]
B - batch_size frames
N - atom/class numbers
df - relation feature dimension
Output dim: [B, N, df]
'''
B, N, _ = f_a.size()
mask = torch.eye(N).byte().to(device)
w_k = self.WK(f_a) # [B, N, dim_k]
w_k = w_k.view(B, N,1,self.dim_k)
w_q = self.WQ(f_a) # [B, N, dim_k]
w_q = w_q.view(B, 1,N,self.dim_k)
scaled_dot = torch.sum((w_k*w_q),-1 ) # dot product of Key, Query matrix
scaled_dot = scaled_dot / np.sqrt(self.dim_k) # [B, N, N]
w_mn = scaled_dot.view(B,N,N) # w_mn is the attention weight of key m and query n
w_mn.masked_fill_(mask, 0) # set diagonal element w_nn to 0
w_mn = nn.functional.softmax(w_mn, dim=-1)
w_mn = w_mn + self.w_fix
w_mn.masked_fill_(mask, 0) # set diagonal element w_nn to 0
#w_mn = nn.functional.softmax(w_mn, dim=-1)
w_v = self.WV(f_a) #[B, N, dv]
w_mn = w_mn.view(B,N,N,1) # [B,N,N,1]
w_v = w_v.view(B, N, 1, -1) # [B, N, 1, dv]
#w_v = w_v.repeat(N,1,1) # [N, N, dv]
output = w_mn*w_v # [B, N, N, dv]
#print('attention out dim:', output.size())
output = torch.sum(output,-2)
return output # [B,N,v_dim]
class TemporalRelation(nn.Module):
def __init__(self, feat_dim, time_window=1):
super(TemporalRelation, self).__init__()
self.time_window = time_window
self.feat_dim = feat_dim
self.WT = nn.Linear(self.feat_dim, self.feat_dim, bias=False)
def forward(self, feats):
# feats - [t, num_classes, df]
relation_feature = []
att_feats = self.WT(feats) # [t, num_classes, df]
for t in range(0, att_feats.size()[0], 1):
if t<self.time_window:
prev = att_feats[0,:,:]
else:
prev = att_feats[t-1,:,:]
if t== (att_feats.size()[0]-1):
next = att_feats[t,:,:]
else:
next = att_feats[t+1,:,:]
relation_feature.append(prev+next)
relation_feature = torch.stack(relation_feature,dim=0) # [t, num_classes, df]
return relation_feature/2 + feats
| 40.810345 | 138 | 0.592945 |
acfbf58ab0e0b23d0983edc8e14f398dadbd51a8 | 5,527 | py | Python | 11B-124/pipeline5.0.0/EVLA_pipe_priorcals.py | Astroua/LocalGroup-VLA | 4920341c9e25343d724fb4a2e37cdcd234201047 | [
"MIT"
] | 1 | 2019-04-11T00:37:56.000Z | 2019-04-11T00:37:56.000Z | 12A-304/pipeline5.0.0/EVLA_pipe_priorcals.py | Astroua/LocalGroup-VLA | 4920341c9e25343d724fb4a2e37cdcd234201047 | [
"MIT"
] | null | null | null | 12A-304/pipeline5.0.0/EVLA_pipe_priorcals.py | Astroua/LocalGroup-VLA | 4920341c9e25343d724fb4a2e37cdcd234201047 | [
"MIT"
] | null | null | null | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# PRIOR CALIBRATIONS
# Gain curves, opacities, antenna position corrections
# requantizer gains (will require CASA 4.1)
logprint ("Starting EVLA_pipe_priorcals.py", logfileout='logs/priorcals.log')
time_list=runtiming('priorcals', 'start')
QA2_priorcals='Pass'
# Gain curves first:
default(gencal)
vis=ms_active
caltable='gain_curves.g'
caltype='gc'
spw=''
antenna=''
pol=''
parameter=[]
gencal()
# Opacities:
default(gencal)
vis=ms_active
caltable='opacities.g'
caltype='opac'
spw=all_spw
antenna=''
pol=''
parameter=tau
gencal()
# Apply switched power calibration (when commissioned); for now, just
# requantizer gains (needs casa4.1!), and only for data with
# sensible switched power tables (Feb 24, 2011)
if startdate >= 55616.6:
default(gencal)
vis=ms_active
caltable='requantizergains.g'
caltype='rq'
spw=''
antenna=''
pol=''
parameter=[]
gencal()
if os.path.exists('requantizergains.g'):
priorcals=['gain_curves.g','opacities.g','requantizergains.g']
else:
priorcals=['gain_curves.g','opacities.g']
# Correct for antenna position errors, if known
# NB: for the realtime pipeline these will not be available yet, but all
# SBs that do not have good antenna positions should be re-processed when
# they are available
try:
default(gencal)
vis=ms_active
caltable='antposcal.p'
caltype='antpos'
spw=''
antenna=''
pol=''
parameter=[]
gencal()
#
if os.path.exists('antposcal.p'):
priorcals.append('antposcal.p')
antenna_offsets=correct_ant_posns(ms_active)
logprint ("Correcting for known antenna position errors", logfileout='logs/priorcals.log')
logprint (str(antenna_offsets), logfileout='logs/priorcals.log')
else:
logprint ("No antenna position corrections found/needed", logfileout='logs/priorcals.log')
except:
logprint ("No antenna position corrections found/needed", logfileout='logs/priorcals.log')
# Lastly, make switched power table. This is not used in the
# pipeline, but may be used for QA and for flagging, especially at
# S-band for fields near the geostationary satellite belt. Only
# relevant for data taken on 24-Feb-2011 or later.
if startdate >= 55616.6:
default(gencal)
vis=ms_active
caltable='switched_power.g'
caltype='swpow'
spw=''
antenna=''
pol=''
parameter=[]
gencal()
#
# plot switched power table
#
logprint ("Plotting switched power table", logfileout='logs/priorcals.log')
#
nplots=int(numAntenna/3)
#
if ((numAntenna%3)>0):
nplots = nplots + 1
for ii in range(nplots):
filename='switched_power'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='switched_power.g'
xaxis='time'
yaxis='spgain'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
plotcal()
for ii in range(nplots):
filename='Tsys'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='switched_power.g'
xaxis='time'
yaxis='tsys'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
plotcal()
# Until we know what error messages to search for in priorcals,
# leave QA2 score set to "Pass".
logprint ("QA2 score: "+QA2_priorcals, logfileout='logs/priorcals.log')
logprint ("Finished EVLA_pipe_priorcals.py", logfileout='logs/priorcals.log')
time_list=runtiming('priorcals', 'end')
pipeline_save()
| 27.226601 | 98 | 0.643749 |
acfbf6440baccae75c6e36288683473b1573dc65 | 917 | py | Python | examples/progress.py | timgates42/pyvips | 346e60ff3ab34ae9a36c980a6a3a139346f757ba | [
"MIT"
] | 142 | 2017-08-01T12:33:20.000Z | 2018-09-15T16:50:32.000Z | examples/progress.py | timgates42/pyvips | 346e60ff3ab34ae9a36c980a6a3a139346f757ba | [
"MIT"
] | 62 | 2017-08-01T16:22:09.000Z | 2018-09-20T08:00:40.000Z | examples/progress.py | timgates42/pyvips | 346e60ff3ab34ae9a36c980a6a3a139346f757ba | [
"MIT"
] | 15 | 2017-08-04T09:51:29.000Z | 2018-08-25T18:42:49.000Z | #!/usr/bin/env python
from __future__ import print_function
import pyvips
def progress_print(name, progress):
print('{}:'.format(name))
print(' run = {}'.format(progress.run))
print(' eta = {}'.format(progress.eta))
print(' tpels = {}'.format(progress.tpels))
print(' npels = {}'.format(progress.npels))
print(' percent = {}'.format(progress.percent))
def preeval_cb(image, progress):
progress_print('preeval', progress)
def eval_cb(image, progress):
progress_print('eval', progress)
# you can kill computation if necessary
if progress.percent > 50:
image.set_kill(True)
def posteval_cb(image, progress):
progress_print('posteval', progress)
image = pyvips.Image.black(1, 500)
image.set_progress(True)
image.signal_connect('preeval', preeval_cb)
image.signal_connect('eval', eval_cb)
image.signal_connect('posteval', posteval_cb)
image.avg()
| 23.512821 | 53 | 0.690294 |
acfbf6cd34be566d5c03af3e954e04a694f90b46 | 2,046 | py | Python | processing/reconbench/ply2npz.py | raphaelsulzer/dgnn | 08ef076e80ea38daf000ac2be6771363d6d4ea9a | [
"MIT"
] | 18 | 2021-11-22T19:22:04.000Z | 2022-03-22T03:41:04.000Z | processing/reconbench/ply2npz.py | raphaelsulzer/dgnn | 08ef076e80ea38daf000ac2be6771363d6d4ea9a | [
"MIT"
] | 4 | 2022-01-14T14:35:20.000Z | 2022-03-28T16:38:59.000Z | processing/reconbench/ply2npz.py | raphaelsulzer/dgnn | 08ef076e80ea38daf000ac2be6771363d6d4ea9a | [
"MIT"
] | 2 | 2021-12-20T05:59:37.000Z | 2022-02-16T04:50:03.000Z | import argparse
import trimesh
import os
import numpy as np
def main(args):
sfile = os.path.join(args.user_dir,args.data_dir,"scans","with_sensor",args.model+"_"+args.scan_conf+".ply")
nfile = os.path.join(args.user_dir,args.data_dir,"scans","with_normals",args.model+"_"+args.scan_conf+".ply")
pc = trimesh.load(sfile)
sx = pc.metadata['ply_raw']['vertex']['data']['sx']
sy = pc.metadata['ply_raw']['vertex']['data']['sy']
sz = pc.metadata['ply_raw']['vertex']['data']['sz']
sensor_pos = np.concatenate((sx, sy, sz), axis=1)
pc = trimesh.load(nfile)
nx = pc.metadata['ply_raw']['vertex']['data']['nx']
ny = pc.metadata['ply_raw']['vertex']['data']['ny']
nz = pc.metadata['ply_raw']['vertex']['data']['nz']
normals = np.concatenate((nx, ny, nz), axis=1)
points = pc.vertices.astype(np.float64)
normals = normals.astype(np.float64)
sensor_pos = sensor_pos.astype(np.float64)
assert(normals.shape[0]==sensor_pos.shape[0])
assert(pc.vertices.shape[0]==sensor_pos.shape[0])
out_folder = os.path.join(args.user_dir,args.data_dir,"3_scan")
if not os.path.exists(out_folder):
os.makedirs(out_folder)
out_file = os.path.join(args.user_dir,args.data_dir,"3_scan",args.model+"_"+args.scan_conf+".npz")
np.savez(out_file,points=points,normals=normals,sensor_position=sensor_pos)
a=5
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='reconbench evaluation')
parser.add_argument('--user_dir', type=str, default="/mnt/raphael/",
help='the user folder, or PhD folder.')
parser.add_argument('-d', '--data_dir', type=str, default="reconbench/",
help='working directory which should include the different scene folders.')
args = parser.parse_args()
models = ["anchor", "daratech", "dc", "lordquas", "gargoyle"]
scan_confs = [0,1,2,3,4]
for s in scan_confs:
for args.model in models:
args.scan_conf = str(s)
main(args) | 33.540984 | 113 | 0.645161 |
acfbf84e980c486e9a7f3fd7767108af479dfaa7 | 6,102 | py | Python | ogbot/config.py | winiciuscota/ogame-bot | ec788c81e8512e7f017fea033cc2567a3d04563d | [
"MIT"
] | 14 | 2016-01-26T19:19:11.000Z | 2020-04-05T19:42:39.000Z | ogbot/config.py | winiciuscota/ogame-bot | ec788c81e8512e7f017fea033cc2567a3d04563d | [
"MIT"
] | 23 | 2016-01-26T21:57:06.000Z | 2021-06-25T15:16:23.000Z | ogbot/config.py | winiciuscota/ogame-bot | ec788c81e8512e7f017fea033cc2567a3d04563d | [
"MIT"
] | 17 | 2016-01-26T19:26:36.000Z | 2021-06-16T11:10:15.000Z | import ConfigParser
import logging
import os
import re
class Config(object):
def __init__(self, args):
config = ConfigParser.ConfigParser()
current_file = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(current_file, '../')
os.chdir(path)
cfg = config.read('user.cfg')
self.logger = logging.getLogger('OGBot')
self.WRONG_ARGUMENTS_MESSAGE = """
You must pass at least 3 arguments:
You must have the following data on the user.cfg
[UserInfo]
username = your_username
password = your_password
universe = your_universe
[Settings]
DefaultMode = transport_resources_to_planet
DefaultOriginPlanet = origin_planet_name
AttackRange = 10
HowLongToWaitForProbes = 60
"""
parameters = vars(args)
if not cfg:
# Config file is empty, log error
self.logger.error(self.WRONG_ARGUMENTS_MESSAGE)
exit()
else:
# Set configuration from config file
self.logger.info('Getting user info from config file')
# User config options
self.username = config.get('UserInfo', 'Username')
self.password = config.get('UserInfo', 'Password')
self.universe = config.get('UserInfo', 'Universe')
self.country = config.get('UserInfo', 'Country')
# General config options
self.mode = self.parse_multiple_value_config(config.get('General', 'DefaultMode'))
self.default_origin_planet_name = config.get('General', 'DefaultOriginPlanet')
self.excluded_planets = map(lambda x: x.strip().lower(),
config.get('General', 'ExcludedPlanets').split(','))
self.log_level = config.get('General', 'LogLevel') # Get loglevel
# Development config options
self.build_fusion_reactor = config.getboolean('Development', 'FusionReactor') # build fusion reactor or not
self.build_solar_plant = config.getboolean('Development', 'SolarPlant') # build solar plant or not
self.build_storage = config.getboolean('Development', 'Storage') # build storage structures or not
self.defense_proportion = self.parse_multiple_value_config(config.get('Development', 'DefenseProportion'))
self.spend_excess_metal_on_rl = config.getboolean('Development', 'SpendExcessMetalOnRL')
# Transport config options
self.transport_metal = config.getboolean('Transport', 'TransportMetal')
self.transport_crystal = config.getboolean('Transport', 'TransportCrystal')
self.transport_deuterium = config.getboolean('Transport', 'TransportDeuterium')
# Exploration config options
self.attack_range = config.getint('Exploration', 'AttackRange')
self.time_to_wait_for_probes = config.getint('Exploration', 'HowLongToWaitForProbes')
self.spy_report_life = config.getint('Exploration', 'SpyReportLife') # Time in which spy report is valid
self.minimum_inactive_target_rank = config.getint('Exploration', 'MinimumInactiveTargetRank')
self.maximum_inactive_target_rank = config.getint('Exploration', 'MaximumInactiveTargetRank')
self.spy_fleet_min_delay = config.getint('Exploration',
'SpyFleetMinDelay') # Minimum time between sending next spy
self.spy_fleet_max_delay = config.getint('Exploration',
'SpyFleetMaxDelay') # maximum time between sending next spy
self.attack_fleet_min_delay = config.getint('Exploration',
'AttackFleetMinDelay') # Minimum time between sending next attack
self.attack_fleet_max_delay = config.getint('Exploration',
'AttackFleetMaxDelay') # Maximum time between sending next attack
self.expedition_fleet_min_delay = config.getint('Exploration',
'ExpeditionFleetMinDelay') # Minimum time between sending next expedition
self.expedition_fleet_max_delay = config.getint('Exploration',
'ExpeditionFleetMaxDelay') # Maximum time between sending next expedition
self.spy_probes_count = config.getint('Exploration', 'SpyProbesCount') # Amount of spy probes to send
self.min_res_to_attack = config.getint('Exploration', 'MinResToSendAttack') # Min resources to send attack
self.expedition_range = config.getint('Exploration', 'ExpeditionRange') # range to send expeditions
self.enable_twilio_messaging = config.get('Twilio', 'EnableTwilioMessaging')
self.twilio_account_sid = config.get('Twilio', 'AccountSid')
self.twilio_account_token = config.get('Twilio', 'AccountToken')
self.twilio_from_number = config.get('Twilio', 'FromNumber')
self.twilio_to_number = config.get('Twilio', 'ToNumber')
# read values from parameters
mode = parameters.get('m')
attack_range = parameters.get('r')
planet_name = parameters.get('p')
# Override default mode if the user has specified a mode by parameters
if mode is not None:
self.mode = mode
if attack_range is not None:
self.attack_range = int(attack_range)
self.planet_name = planet_name
@staticmethod
def parse_multiple_value_config(str):
"""
:param str: string to parse
:return: parsed vector of arguments
"""
multiple_value_config = re.split(',| ', str)
return filter(lambda x: x is not "", multiple_value_config)
| 50.429752 | 134 | 0.610456 |
acfbf851205d78dbd2a98d86331d92441b458904 | 4,726 | py | Python | framework/utils.py | saan5/chromium-dashboard | a9650cd3a2d58bee819ae218051eeb0f3e657190 | [
"Apache-2.0"
] | 2 | 2021-06-19T14:59:53.000Z | 2021-09-18T14:45:59.000Z | framework/utils.py | saan5/chromium-dashboard | a9650cd3a2d58bee819ae218051eeb0f3e657190 | [
"Apache-2.0"
] | null | null | null | framework/utils.py | saan5/chromium-dashboard | a9650cd3a2d58bee819ae218051eeb0f3e657190 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import calendar
import datetime
import flask
import logging
import time
import traceback
# from google.appengine.api import users
from framework import users
import settings
from django.utils import feedgenerator
def normalized_name(val):
return val.lower().replace(' ', '').replace('/', '')
def format_feature_url(feature_id):
"""Return the feature detail page URL for the specified feature."""
return '/feature/%d' % feature_id
def retry(tries, delay=1, backoff=2):
"""A retry decorator with exponential backoff.
Functions are retried when Exceptions occur.
Args:
tries: int Number of times to retry, set to 0 to disable retry.
delay: float Initial sleep time in seconds.
backoff: float Must be greater than 1, further failures would sleep
delay*=backoff seconds.
"""
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def decorator(func):
def wrapper(*args, **kwargs):
_tries, _delay = tries, delay
_tries += 1 # Ensure we call func at least once.
while _tries > 0:
try:
ret = func(*args, **kwargs)
return ret
except Exception:
_tries -= 1
if _tries == 0:
logging.error('Exceeded maximum number of retries for %s.',
func.__name__)
raise
trace_str = traceback.format_exc()
logging.warning('Retrying %s due to Exception: %s',
func.__name__, trace_str)
time.sleep(_delay)
_delay *= backoff # Wait longer the next time we fail.
return wrapper
return decorator
def strip_trailing_slash(handler):
"""Strips the trailing slash on the URL."""
def remove_slash(self, *args, **kwargs):
path = args[0]
if path[-1] == '/':
return self.redirect(self.request.path.rstrip('/'))
return handler(self, *args, **kwargs) # Call the handler method
return remove_slash
def render_atom_feed(request, title, data):
features_url = '%s://%s%s' % (request.scheme,
request.host,
request.path.replace('.xml', ''))
feature_url_prefix = '%s://%s%s' % (request.scheme,
request.host,
'/feature')
feed = feedgenerator.Atom1Feed(
title=unicode('%s - %s' % (settings.APP_TITLE, title)),
link=features_url,
description=u'New features exposed to web developers',
language=u'en'
)
for f in data:
pubdate = datetime.datetime.strptime(str(f['updated'][:19]),
'%Y-%m-%d %H:%M:%S')
feed.add_item(
title=unicode(f['name']),
link='%s/%s' % (feature_url_prefix, f.get('id')),
description=f.get('summary', ''),
pubdate=pubdate,
author_name=unicode(settings.APP_TITLE),
categories=[f['category']]
)
headers = {
'Strict-Transport-Security':
'max-age=63072000; includeSubDomains; preload',
'Content-Type': 'application/atom+xml;charset=utf-8'}
text = feed.writeString('utf-8')
return text, headers
_ZERO = datetime.timedelta(0)
class _UTCTimeZone(datetime.tzinfo):
"""UTC"""
def utcoffset(self, _dt):
return _ZERO
def tzname(self, _dt):
return "UTC"
def dst(self, _dt):
return _ZERO
_UTC = _UTCTimeZone()
def get_banner_time(timestamp):
"""Converts a timestamp into data so it can appear in the banner.
Args:
timestamp: timestamp expressed in the following format:
[year,month,day,hour,minute,second]
e.g. [2009,3,20,21,45,50] represents March 20 2009 9:45:50 PM
Returns:
EZT-ready data used to display the time inside the banner message.
"""
if timestamp is None:
return None
ts = datetime.datetime(*timestamp, tzinfo=_UTC)
return calendar.timegm(ts.timetuple())
| 30.490323 | 74 | 0.634998 |
acfbf8f177a06075fceb484ac2a77f33d344d811 | 2,385 | py | Python | tf2onnx/tflite/VarHandleOptions.py | LoicDagnas/tensorflow-onnx | 6691850e79047d05d85017573170fd8240393b57 | [
"Apache-2.0"
] | 1,473 | 2018-03-16T02:47:33.000Z | 2022-03-31T03:43:52.000Z | tf2onnx/tflite/VarHandleOptions.py | LoicDagnas/tensorflow-onnx | 6691850e79047d05d85017573170fd8240393b57 | [
"Apache-2.0"
] | 1,208 | 2018-03-14T09:58:49.000Z | 2022-03-31T17:56:20.000Z | tf2onnx/tflite/VarHandleOptions.py | LoicDagnas/tensorflow-onnx | 6691850e79047d05d85017573170fd8240393b57 | [
"Apache-2.0"
] | 350 | 2018-04-03T03:48:40.000Z | 2022-03-30T11:23:55.000Z | # SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class VarHandleOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = VarHandleOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsVarHandleOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def VarHandleOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# VarHandleOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# VarHandleOptions
def Container(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# VarHandleOptions
def SharedName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def Start(builder): builder.StartObject(2)
def VarHandleOptionsStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddContainer(builder, container): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(container), 0)
def VarHandleOptionsAddContainer(builder, container):
"""This method is deprecated. Please switch to AddContainer."""
return AddContainer(builder, container)
def AddSharedName(builder, sharedName): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sharedName), 0)
def VarHandleOptionsAddSharedName(builder, sharedName):
"""This method is deprecated. Please switch to AddSharedName."""
return AddSharedName(builder, sharedName)
def End(builder): return builder.EndObject()
def VarHandleOptionsEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | 38.467742 | 141 | 0.721593 |
acfbf91315ae8fa759e47178ec90f3b7a692cd5c | 7,881 | py | Python | warp/utils/config_parsing.py | j-helland/warp | 2a71346f0ec4d4e6fd45ed3b5e972b683724287c | [
"Unlicense"
] | null | null | null | warp/utils/config_parsing.py | j-helland/warp | 2a71346f0ec4d4e6fd45ed3b5e972b683724287c | [
"Unlicense"
] | null | null | null | warp/utils/config_parsing.py | j-helland/warp | 2a71346f0ec4d4e6fd45ed3b5e972b683724287c | [
"Unlicense"
] | null | null | null | # std
import datetime
from copy import deepcopy
from collections import deque
import yaml
# from .lazy_loader import LazyLoader as LL
# yaml = LL('yaml', globals(), 'yaml')
# json = LL('json', globals(), 'json')
# types
from typing import Dict, Any, Union, Tuple
__all__ = [
'load_config_file',
'save_config']
BASIC_TYPES: Tuple[type, ...] = (
type(None),
bool,
int,
float,
str,
datetime.datetime,
bytes,
complex)
ITERABLE_TYPES: Tuple[type, ...] = (
list,
tuple,
set,
dict)
class HyperParameter:
verbose = False
@classmethod
def set_verbosity(cls, value):
cls.verbose = value
def __init__(self, values=None, spec_type=None, spec=None):
# Default version is to provide a list of actual values
if values and type(values) is not list:
raise TypeError(f'hyperparameter values must be a list not {type(values)}')
if values:
if not isinstance(values[0],dict) and not isinstance(values[0],list):
values = sorted(set(values))
if self.verbose: print('Found literal (unique) hparam values: ',values)
elif len(values)==1 and isinstance(values[0],dict):
raise TypeError(f'known bug/unsupported, hparam len(values)==1 but elm is a dict')
else:
# values = sorted(values)
if self.verbose: print('Found literal hparam values: ',values)
# Can support other value shorthands/generators
if values is None:
# A simple count or range(n) type
if spec_type == 'int':
values = [i for i in range(spec)]
else:
raise TypeError(f'no generator for hyperparameter spec.type: {spec_type}')
# Could add another range type with low, high, stepsize... etc
if self.verbose: print('Found constructable hparam values: ',values)
self.values = values
def set_value(dictionary, keychain, value):
if len(keychain) == 1:
dictionary[keychain[0]] = value
return
set_value(dictionary[keychain[0]],keychain[1:],value)
return dictionary
class BFTreeExpander:
roots = {}
# hparam_keys = set()
# hparam_keychains = set()
hparam_keychains = {}
@classmethod
def reset_roots(cls):
cls.roots = {}
@classmethod
def get_roots(cls):
return [v.root for k,v in cls.roots.items()]
@classmethod
def reset_keys(cls):
# cls.hparam_keys = set()
# cls.hparam_keychains = set()
cls.hparam_keychains = {}
# @classmethod
# def get_hparam_key_list(cls):
# return list(cls.hparam_keys)
@classmethod
def get_hparam_keychains(cls):
return list(cls.hparam_keychains.keys())
# return cls.hparam_keychains
def __init__(self, root):
self.root = root
self.queue = deque()
self.id = id(self)
self.roots[self.id] = self
# recursive traverser
def expand(self, node = None, keychain = []):
if node is None: node = self.root
if isinstance(node, HyperParameter):
# self.hparam_keys.add(keychain[-1])
# self.hparam_keychains.add(".".join(keychain[1:])) # drop root key
self.hparam_keychains[".".join(keychain[1:])] = None
if len(node.values) == 1:
set_value(self.root,keychain,node.values[0])
return False
else:
for val in node.values:
new_root = set_value(deepcopy(self.root),keychain,val)
new_tree = BFTreeExpander(new_root)
return True # "expansion was performed"
if isinstance(node, dict):
for key,val in node.items():
if val is not None:
new_keychain = keychain.copy()
new_keychain.append(key)
self.queue.append((val, new_keychain))
while len(self.queue) > 0:
next_node, next_keychain = self.queue.popleft()
expanded = self.expand(next_node, next_keychain)
if expanded:
# since we had to expand this tree further,
# we can now remove it from the working set
# pop w/ default None, instead of del, as this can get called repeatedly on way up
self.roots.pop(self.id, None)
return True # bubble up
return False # no expansion performed
def expand_config(orig_config):
old_roots = [{'root': orig_config}]
while True:
old_ct = len(old_roots)
new_roots = []
for input_root in old_roots:
BFTreeExpander.reset_roots()
bfte = BFTreeExpander(input_root)
bfte.expand()
new_roots.extend(bfte.get_roots())
if old_ct == len(new_roots):
break
old_roots = new_roots.copy()
roots, keychains = [tree['root'] for tree in new_roots], BFTreeExpander.get_hparam_keychains()
BFTreeExpander.reset_roots()
BFTreeExpander.reset_keys()
return roots, keychains
############ PyYAML Custom obj constructors/representers ###############
def hparam_constructor(loader, node):
fields = loader.construct_mapping(node, deep=True)
hparam = HyperParameter(**fields)
yield hparam
def tuple_to_list_constructor(loader, node):
return list(loader.construct_sequence(node, deep=True))
def hparam_representer(dumper, node):
return dumper.represent_mapping(u'!HYPERPARAMETER', [("values",node.values)], flow_style=False )
# def load_config_file(path: str) -> Dict[str, Any]:
def load_config_file(path: str) -> Tuple[list, list]:
"""Load a YAML file into a dict.
Extensions accepted are `{.yml, .yaml}`.
Arguments:
path: The relative path to the YAML file to load.
Returns:
A dict version of the YAML file.
"""
yaml.add_constructor('!HYPERPARAMETER', hparam_constructor, yaml.FullLoader)
yaml.add_representer(HyperParameter, hparam_representer)
# HyperParameter.set_verbosity(args.verbose)
file_ext = path.split('.')[-1]
if file_ext in {'yml', 'yaml'}:
with open(path, 'rb') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
else:
raise NotImplementedError('unrecognized file extension .{:s} for file {:s}'.format(file_ext, path))
# expanded_set, keychains = expand_config(config)
return expand_config(config)
# return config
def typecheck_config(config: Dict[str, Any]) -> None:
invalid_types = set()
def recursive_typecheck(struct: Union[Dict[str, Any], Any]) -> bool:
# Recurse through iterables
if isinstance(struct, ITERABLE_TYPES):
if isinstance(struct, dict):
return all(map(recursive_typecheck, struct.values()))
return all(map(recursive_typecheck, struct))
# Check against allowed types. Aggregate any found violations.
else:
if not isinstance(struct, BASIC_TYPES):
invalid_types.add(type(struct))
return False
return True
if not recursive_typecheck(config):
raise TypeError(f'config {config} contains invalid type(s) {invalid_types}')
def save_config(path: str, config: Dict[str, Any]) -> None:
try:
typecheck_config(config)
except TypeError as e:
raise RuntimeError( [e, RuntimeError('Cannot cache runtime parameter values due to invalid type(s).')] )
# cache
with open(path, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
| 31.398406 | 112 | 0.597005 |
acfbf96e3737bbd3f7b9e06148aad76db16bbd19 | 11,880 | py | Python | bot/utils/teams.py | fossabot/atlantisbot | 4acda63be226079c740595d4b9c20f2a64cb9e2a | [
"MIT"
] | null | null | null | bot/utils/teams.py | fossabot/atlantisbot | 4acda63be226079c740595d4b9c20f2a64cb9e2a | [
"MIT"
] | null | null | null | bot/utils/teams.py | fossabot/atlantisbot | 4acda63be226079c740595d4b9c20f2a64cb9e2a | [
"MIT"
] | null | null | null | from typing import Union, Tuple
import traceback
import discord
from bot.orm.models import Team, Player, BotMessage, User
from bot.utils.tools import has_any_role, separator
class TeamNotFoundError(Exception):
pass
class WrongChannelError(Exception):
pass
def secondary_full(team: Team, session) -> Tuple[int, bool]:
"""Checks if a team has hit its limit for number of players that only have its secondary role requirement"""
secondary_count = session.query(Player).filter_by(team=team.id, secondary=True).count()
if not team.secondary_limit:
# If the team does not have a secondary role limit, then it can't ever reach that
return 0, False
return secondary_count, (secondary_count >= team.secondary_limit)
def add_to_team(author: discord.Member, team: Team, substitute: bool, secondary: bool, session) -> None:
"""Adds a Player to a Team"""
added_player = Player(player_id=str(author.id), team=team.id, substitute=substitute, secondary=secondary)
session.add(added_player)
session.commit()
def first_substitute(team: Team, session, exclude: int) -> Union[Player, None]:
return session.query(Player).filter(
Player.substitute == True, # noqa: E712
Player.player_id != str(exclude),
Player.team == team.id
).first()
def remove_from_team(player_id: int, team: Team, session) -> None:
session.query(Player).filter_by(player_id=str(player_id), team=team.id).delete()
session.commit()
def team_count(team: Team, session) -> int:
return session.query(Player).filter_by(team=team.id).count()
async def update_team_message(message: discord.Message, team: Team, prefix: str, session) -> None:
embed_description = f"Marque presença no <#{team.invite_channel_id}>\n Criador: <@{team.author_id}>"
requisito = ""
requisito2 = ""
if team.role:
requisito = f"Requisito: <@&{team.role}>\n"
if team.role_secondary:
count = session.query(Player).filter_by(team=team.id, secondary=True).count()
limit = "" if not team.secondary_limit else f"({count}/{team.secondary_limit})"
requisito2 = f"Requisito Secundário: <@&{team.role_secondary}> {limit}\n\n"
embed_description = f"{requisito}{requisito2}{embed_description}"
team_embed = discord.Embed(
title=f"__{team.title}__ - {team_count(team, session)}/{team.size}",
description=embed_description,
color=discord.Color.purple()
)
footer = f"Digite '{prefix}del {team.team_id}' para excluir o time. (Criador do time ou Admin e acima)"
team_embed.set_footer(text=footer)
players = session.query(Player).filter_by(team=team.id)
index = 0
if players:
for player in players:
if not player.substitute:
user: User = session.query(User).filter_by(discord_id=player.player_id).first()
player_role = f"({player.role})" if player.role else ""
if user:
player_ingame = f"({user.ingame_name})"
else:
player_ingame = ""
player_value = (
f"{index + 1}- <@{player.player_id}> {player_role} {player_ingame}"
f"{'***(Secundário)***' if player.secondary else ''}"
)
team_embed.add_field(name=separator, value=player_value, inline=False)
index += 1
if players:
for player in players:
if player.substitute:
user_: User = session.query(User).filter_by(discord_id=player.player_id).first()
if user_:
player_ingame = f"({user_.ingame_name})"
else:
player_ingame = ""
player_role = f"({player.role})" if player.role else ""
player_value = (f"- <@{player.player_id}> {player_role} {player_ingame} ***(Substituto)*** "
f"{'***(Secundário)***' if player.secondary else ''}")
team_embed.add_field(name=separator, value=player_value, inline=False)
await message.edit(embed=team_embed)
async def manage_team(team_id: str, client, message: discord.Message, mode: str) -> None:
"""
Manages a join or leave for a Team
mode: can be 'join' or 'leave'
"""
with client.db_session() as session:
try:
team: Team = session.query(Team).filter_by(team_id=team_id).first()
if not team:
raise TeamNotFoundError
if int(team.invite_channel_id) != message.channel.id:
raise WrongChannelError
await message.delete()
current_players = session.query(Player).filter_by(team=team.id)
substitutes = session.query(Player).filter_by(team=team.id, substitute=True)
invite_channel: discord.TextChannel = client.get_channel(int(team.invite_channel_id))
team_channel: discord.TextChannel = client.get_channel(int(team.team_channel_id))
if not invite_channel or not team_channel:
return await delete_team(session, team, client)
try:
team_message = await team_channel.fetch_message(int(team.team_message_id))
except discord.errors.NotFound:
return await delete_team(session, team, client)
text = ''
no_perm_embed = None
if mode == 'join':
team_role = None if not team.role else int(team.role)
secondary_team_role = None if not team.role_secondary else int(team.role_secondary)
has_main = has_any_role(message.author, team_role) # Has main role requirement
has_secondary = has_any_role(message.author, secondary_team_role) # Has secondary role requirement
has_any = has_any_role(message.author, team_role, secondary_team_role) # Has either or both
# Has only secondary requirement
is_secondary = True if (has_secondary and not has_main) else False
if is_secondary:
_, is_team_full = secondary_full(team, session)
else:
is_team_full = is_full(team, session)
if in_team(message.author.id, team, session):
text = 'já está no time'
elif has_any or not team_role:
add_to_team(message.author, team, substitute=is_team_full, secondary=is_secondary, session=session)
text = 'entrou ***como substituto*** no time' if is_team_full else 'entrou no time'
else:
description = f"{message.author.mention}, você precisa ter o cargo <@&{team.role}>"
if team.role_secondary:
description = f"{description} ou o cargo <@&{team.role_secondary}>"
description = (f"{description} para entrar no Time '{team.title}' "
f"({current_players.count() - substitutes.count()}/{team.size})\n"
f"(*`{message.content}`*)")
no_perm_embed = discord.Embed(
title=f"__Permissões insuficientes__",
description=description,
color=discord.Color.dark_red()
)
elif mode == 'leave':
if in_team(message.author.id, team, session):
text = 'saiu do time'
substitute: Player = first_substitute(team, session, message.author.id)
is_substitute = session.query(Player).filter_by(
player_id=str(message.author.id), team=team.id
).first().substitute
# If the person leaving is not a substitute and there is one available, then
# make that substitute not be a substitute anymore
if substitute and not is_substitute:
if substitute.secondary and secondary_full(team, session)[1]:
pass
else:
substitute.substitute = False
session.commit()
_text = (f"<@{substitute.player_id}> não é mais um substituto do time "
f"**[{team.title}]({team_message.jump_url})** "
f"({current_players.count() - substitutes.count() - 1}/{team.size})")
embed = discord.Embed(title='', description=_text, color=discord.Color.green())
msg = await invite_channel.send(content=f"<@{substitute.player_id}>", embed=embed)
session.add(BotMessage(message_id=msg.id, team=team.id))
remove_from_team(message.author.id, team, session)
else:
text = 'já não estava no time'
if no_perm_embed:
sent_message = await invite_channel.send(embed=no_perm_embed)
else:
_text = (f"{message.author.mention} {text} **[{team.title}]({team_message.jump_url})** "
f"({current_players.count() - substitutes.count()}/{team.size})\n\n *`{message.content}`*")
if mode == 'leave':
embed_color = discord.Color.red()
else:
embed_color = discord.Color.green()
embed = discord.Embed(title='', description=_text, color=embed_color)
embed.set_author(name=message.author.display_name, icon_url=message.author.avatar_url)
sent_message = await invite_channel.send(embed=embed)
session.add(BotMessage(message_id=sent_message.id, team=team.id))
session.commit()
try:
await update_team_message(team_message, team, client.setting.prefix, session)
except discord.errors.NotFound:
session.delete(team)
session.commit()
except TeamNotFoundError:
raise TeamNotFoundError
except WrongChannelError:
raise WrongChannelError
except Exception as e:
await client.send_logs(e, traceback.format_exc())
def is_full(team: Team, session) -> bool:
"""Verifies if a team is full or not"""
count = session.query(Player).filter_by(team=team.id, substitute=False).count()
return count >= team.size
def in_team(author_id: int, team: Team, session) -> bool:
"""Checks if a player is in a team"""
current_players = session.query(Player).filter_by(team=team.id)
return author_id in [int(player.player_id) for player in current_players]
async def delete_team(session, team: Team, client) -> None:
try:
team_channel = client.get_channel(int(team.team_channel_id))
invite_channel = client.get_channel(int(team.invite_channel_id))
except Exception:
session.delete(team)
session.commit()
return
try:
team_message = await team_channel.fetch_message(int(team.team_message_id))
await team_message.delete()
except Exception:
pass
try:
invite_message = await invite_channel.fetch_message(int(team.invite_message_id))
await invite_message.delete()
except Exception:
pass
try:
messages_to_delete = []
qs = session.query(BotMessage).filter_by(team=team.id)
if qs:
for message in qs:
to_delete = await invite_channel.fetch_message(message.message_id)
messages_to_delete.append(to_delete)
await invite_channel.delete_messages(messages_to_delete)
except Exception:
pass
session.delete(team)
session.commit()
| 44.661654 | 119 | 0.597559 |
acfbf9fb4536bb1080f9ac954a35233f2410af53 | 6,171 | py | Python | bin/anthology/volumes.py | tdopierre/acl-anthology | 8eed8a3fb0afa68ff2b6580626ee7c66de29694e | [
"Apache-2.0"
] | 221 | 2017-06-05T04:44:41.000Z | 2022-02-11T20:23:31.000Z | bin/anthology/volumes.py | tdopierre/acl-anthology | 8eed8a3fb0afa68ff2b6580626ee7c66de29694e | [
"Apache-2.0"
] | 1,498 | 2017-08-09T13:41:49.000Z | 2022-03-31T02:56:58.000Z | bin/anthology/volumes.py | tdopierre/acl-anthology | 8eed8a3fb0afa68ff2b6580626ee7c66de29694e | [
"Apache-2.0"
] | 183 | 2017-10-28T00:56:49.000Z | 2022-03-14T14:55:00.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import cached_property
import re
import logging as log
from . import data
from .papers import Paper
from .venues import VenueIndex
from .sigs import SIGIndex
from .utils import (
build_anthology_id,
parse_element,
is_journal,
month_str2num,
infer_url,
infer_year,
)
class Volume:
def __init__(
self,
collection_id,
volume_id,
ingest_date,
meta_data,
venue_index: VenueIndex,
sig_index: SIGIndex,
formatter,
):
"""Instantiate a proceedings volume.
`venue_index` and `sig_index` are used to find venues and SIGs
associated with this proceedings volume.
"""
self.collection_id = collection_id
self._id = volume_id
self.ingest_date = ingest_date
self.formatter = formatter
self.venue_index = venue_index
self._set_meta_info(meta_data)
self.attrib["venues"] = venue_index.get_associated_venues(self.full_id)
self.attrib["sigs"] = sig_index.get_associated_sigs(self.full_id)
self.content = []
self.has_abstracts = False
self.has_frontmatter = False
@staticmethod
def from_xml(
volume_xml, collection_id, venue_index: VenueIndex, sig_index: SIGIndex, formatter
):
volume_id = volume_xml.attrib["id"]
# The date of publication, defaulting to earlier than anything we'll encounter
ingest_date = volume_xml.attrib.get("ingest-date", data.UNKNOWN_INGEST_DATE)
meta_data = parse_element(volume_xml.find("meta"))
# Though metadata uses "booktitle", switch to "title" for compatibility with downstream scripts
meta_data["title"] = formatter(meta_data["xml_booktitle"], "plain")
volume = Volume(
collection_id,
volume_id,
ingest_date,
meta_data,
venue_index,
sig_index,
formatter,
)
front_matter_xml = volume_xml.find("frontmatter")
if front_matter_xml is not None:
front_matter = Paper.from_xml(front_matter_xml, volume, formatter)
volume.add_frontmatter(front_matter)
return volume
@cached_property
def url(self):
# If <url> field not present, use ID.
# But see https://github.com/acl-org/acl-anthology/issues/997.
return infer_url(self.attrib.get("xml_url", self.full_id))
@cached_property
def pdf(self):
url = self.attrib.get("xml_url", None)
if url is not None:
return infer_url(url, template=data.PDF_LOCATION_TEMPLATE)
return None
def _set_meta_info(self, meta_data):
"""Derive journal title, volume, and issue no. used in metadata.
This function replicates functionality that was previously hardcoded in
'app/helpers/papers_helper.rb' of the Rails app."""
self.attrib = meta_data
if "author" in self.attrib:
# Authors of the front matter are the volume's editors
self.attrib["editor"] = self.attrib["author"]
del self.attrib["author"]
# Some volumes don't set this---but they should!
if "year" not in self.attrib:
self.attrib["year"] = infer_year(self.collection_id)
self.attrib["meta_date"] = self.get("year")
if "month" in self.attrib:
month = month_str2num(self.get("month"))
if month is not None:
self.attrib["meta_date"] = f"{self.get('year')}/{month}"
if is_journal(self.collection_id):
self.attrib["meta_journal_title"] = data.get_journal_title(
self.collection_id, self.attrib["title"]
)
volume_no = re.search(
r"Volume\s*(\d+)", self.attrib["title"], flags=re.IGNORECASE
)
if volume_no is not None:
self.attrib["meta_volume"] = volume_no.group(1)
issue_no = re.search(
r"(Number|Issue)\s*(\d+-?\d*)", self.attrib["title"], flags=re.IGNORECASE
)
if issue_no is not None:
self.attrib["meta_issue"] = issue_no.group(2)
@property
def volume_id(self):
return self._id
@cached_property
def full_id(self):
return build_anthology_id(self.collection_id, self.volume_id)
@property
def paper_ids(self):
return [paper.full_id for paper in self.content]
def add_frontmatter(self, frontmatter):
self.has_frontmatter = True
self.append(frontmatter)
def append(self, paper):
self.content.append(paper)
if paper.has_abstract:
self.has_abstracts = True
def get(self, name, default=None):
try:
return self.attrib[name]
except KeyError:
return default
def get_title(self, form="xml"):
"""Returns the paper title, optionally formatting it.
Accepted formats:
- xml: Include any contained XML tags unchanged
- plain: Strip all XML tags, returning only plain text
- html: Convert XML tags into valid HTML tags
"""
return self.formatter(self.get("xml_booktitle"), form)
def as_dict(self):
value = self.attrib.copy()
value["url"] = self.url
if self.pdf:
value["pdf"] = self.pdf
return value
def __len__(self):
return len(self.content)
def __iter__(self):
return self.content.__iter__()
| 32.650794 | 103 | 0.627127 |
acfbfa2616435e31d3127058b16f20c017d6cc8d | 1,335 | py | Python | csrmesh-hue-bridge.py | gorgiea/csrmesh-hue-bridge | 173492a7135dd1bd2e255c7db6880e9fda8bed7e | [
"MIT"
] | 3 | 2017-04-19T02:42:18.000Z | 2019-01-10T05:44:22.000Z | csrmesh-hue-bridge.py | gorgiea/csrmesh-hue-bridge | 173492a7135dd1bd2e255c7db6880e9fda8bed7e | [
"MIT"
] | 2 | 2017-03-26T15:08:55.000Z | 2017-03-26T15:10:01.000Z | csrmesh-hue-bridge.py | gorgiea/csrmesh-hue-bridge | 173492a7135dd1bd2e255c7db6880e9fda8bed7e | [
"MIT"
] | 1 | 2018-02-23T04:27:48.000Z | 2018-02-23T04:27:48.000Z | from __future__ import absolute_import, division, print_function
import argparse
import csrmesh as cm
from time import sleep
from phue import Bridge
from settings import *
import traceback
def read_bulb_state():
state = None
try:
b = Bridge(HUE_BRIGE_IP)
# If the app is not registered and the button is not pressed, press the button and call connect() (this only needs to be run a single time)
b.connect()
bulb_state = b.get_light(HUE_BULB_NAME)
if 'name' in bulb_state:
print(bulb_state['state']['on'])
print(bulb_state['state']['bri'])
state = bulb_state['state']
else:
print("Error reading bulb state: ", bulb_state[0]['error'])
except Exception as e:
s = traceback.format_exc()
print("unexpected failure, {} ,{}".format(e, s))
return state
def set_homebrite_bulb(level):
cm.lightbulb.set_light(HOMEBRITE_MAC, HOMEBRITE_PIN, level, 255, 255, 255, 0)
if __name__ == "__main__":
while True:
state = read_bulb_state()
print(state)
if (state is not None):
if state['on']:
level = state['bri']
else:
level = 0
print("Setting level to ", level)
set_homebrite_bulb(level)
sleep(0.5) | 29.021739 | 147 | 0.606742 |
acfbfbd6159fc2307e3afb5736ecebbdf6892884 | 5,623 | py | Python | draw/draw_graph.py | kochigami/statistical_data_analysis | 86ddcca3d48bcc70de6d3bd1e133b3619084e032 | [
"MIT"
] | 1 | 2018-05-19T23:07:54.000Z | 2018-05-19T23:07:54.000Z | draw/draw_graph.py | kochigami/statistical_data_analysis | 86ddcca3d48bcc70de6d3bd1e133b3619084e032 | [
"MIT"
] | 18 | 2018-03-19T02:35:15.000Z | 2022-03-22T06:08:43.000Z | draw/draw_graph.py | kochigami/statistical_data_analysis | 86ddcca3d48bcc70de6d3bd1e133b3619084e032 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import math
import numpy as np
class DrawGraph:
"""
displaying bar graph which contains average data per sample group
if test_mode is paired, data is like this.
data = {'Crispy': [65, 85, 75, 85, 75, 80, 90, 75, 85, 65, 75, 85, 80, 85, 90],
'Normal' : [70, 70, 85, 80, 65, 75, 65, 85, 80, 60, 70, 75, 70, 80, 85]}
Be sure that string list is like ([category1, category2]).
title: string.
xlabel: string.
ylabel: string.
tight_layout: bool. if execute tight_layout, set True.
sample_type: string. paired, unpaired
p: float. if conducted test is two sample test, it is required.
"""
def draw_graph(self, data, title, xlabel, ylabel, p=None, tight_layout=False, sample_type="paired", is_scale_nominal=False):
"""
fig: make figure instance
"""
fig = plt.figure()
"""
y_data: if nominal scale is not used: calculate sample_data_average as list [ave_0, ave_1, ave_2]
if nominal scale is used : calculate sample_data_total_num as list [total_0, total_1, total_2]
max_y_data: max y value for scale
"""
y_data = []
if is_scale_nominal == False:
for i in range(len(data.keys())):
y_data.append(np.mean(data[(data.keys())[i]]))
else:
for i in range(len(data.keys())):
y_data.append(sum(data[(data.keys())[i]]))
print "y_data: " + str(y_data)
"""
y_error: calculate sample_error as list [err_0, err_1]
is scale is nominal: it is not calculated
left: list of x value for each bar, now it is just empty
ddof=False means calculating Sample standard deviation
not Unbiased standard deviation (ddof=True)
"""
y_error = []
if is_scale_nominal == False:
for i in range(len(data.keys())):
y_error.append(np.std(data[(data.keys())[i]], ddof=False))
print "y_error: " + str(y_error)
max_y_data = math.ceil(max(y_data) + max(y_error))
else:
max_y_data = math.ceil(max(y_data))
left = np.array([])
"""
left: list of x value (the number of order) for each bar
"""
for i in range(len(data.keys())):
left = np.append(left, i+1)
"""
make bar:
left: list of x value (the number of order)
y_data: list of y value
(width: the width of bar, default is 0.8)
color: bar color
yerr: error list of y value
align: position to x bar (default is "edge")
ecolor: color of yerror
capsize: umbrella size of yerror
If scale is nominal, yerr doesn't exist.
"""
if is_scale_nominal == False:
plt.bar(left, y_data, color="cyan", yerr=y_error, align="center", ecolor="blue", capsize=60)
else:
plt.bar(left, y_data, color="cyan", align="center", ecolor="blue", capsize=60)
"""
plt.rcParams["font.size"]: modify character size in a graph
plt.tick_params(labelsize=28): modify character size in x, ylabel
"""
plt.rcParams["font.size"] = 16
plt.tick_params(labelsize=12)
"""
add y_value in each bar
w = 0.4 (bar width from bar center line is 0.4)
"""
ax = plt.gca()
"""
set y range
"""
ax.set_ylim([0.0, max_y_data + 1.0])
for i in range(len(y_data)):
ann = ax.annotate(str((round (y_data[i] * 100.0)) * 0.01), xy=(left[i] - 0.15, y_data[i] / 2.0), fontsize=28)
"""
add x_value in each bar
"""
plt.xticks(left, data.keys())
"""
add title, label
"""
if sample_type == "paired":
new_title = title + "\n(N = " + str(len(data[(data.keys())[0]])) + " for each type, * p < 0.05, ** p < 0.01)"
else:
new_title = title + "\n(N = " + str(len(data[(data.keys())[0]]))
for i in range(1, len(data.keys())):
new_title += ", " + str(len(data[(data.keys())[i]]))
new_title += " respectively, * p < 0.05, ** p < 0.01)"
plt.title(new_title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if p and len(data.keys()) == 2:
"""
add p value and mark
"""
if p < 0.01:
input_word = "**" + " (p = " + str(round (p * 100000.0) * 0.00001) + " )"
plt.text(1.3, max_y_data * 0.75, input_word)
elif p < 0.05:
input_word = "*" + " (p = " + str(round (p * 100000.0) * 0.00001) + " )"
plt.text(1.3, max_y_data * 0.75, input_word)
else:
input_word = " p = " + str(round (p * 100000.0) * 0.00001)
plt.text(1.3, max_y_data * 0.75, input_word)
plt.text(1.0, max_y_data * 0.65, "|--------------------------------------------|")
"""
show grid
"""
plt.grid(True)
"""
make graph small in order not to overlap each label
memo: especially in t-test, which sentences are very long,
graph is made very small in order not to overlap.
That's why I comment in this line. If necessary, please comment out it.
"""
if tight_layout:
fig.tight_layout()
"""
show graph
"""
plt.show()
return y_data, y_error
if __name__ == '__main__':
pass
| 36.993421 | 128 | 0.524809 |
acfbfc88f5b354836d0ed4a6139df765e32672f2 | 16,391 | py | Python | monai/networks/nets/resnet.py | IntroAI-termproject/MONAI | b9e0cca17241318e570021b258b181a15d567603 | [
"Apache-2.0"
] | null | null | null | monai/networks/nets/resnet.py | IntroAI-termproject/MONAI | b9e0cca17241318e570021b258b181a15d567603 | [
"Apache-2.0"
] | null | null | null | monai/networks/nets/resnet.py | IntroAI-termproject/MONAI | b9e0cca17241318e570021b258b181a15d567603 | [
"Apache-2.0"
] | 1 | 2021-11-14T06:54:44.000Z | 2021-11-14T06:54:44.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from monai.networks.layers.factories import Conv, Norm, Pool
from monai.networks.layers.utils import get_pool_layer
from monai.utils.module import look_up_option
__all__ = ["ResNet", "resnet10", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnet200"]
from monai.utils import deprecated_arg
def get_inplanes():
return [64, 128, 256, 512]
def get_avgpool():
return [(0), (1), (1, 1), (1, 1, 1)]
def get_conv1(conv1_t_size: int, conv1_t_stride: int):
return (
[(0), (conv1_t_size), (conv1_t_size, 7), (conv1_t_size, 7, 7)],
[(0), (conv1_t_stride), (conv1_t_stride, 2), (conv1_t_stride, 2, 2)],
[(0), (conv1_t_size // 2), (conv1_t_size // 2, 3), (conv1_t_size // 2, 3, 3)],
)
class ResNetBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes: int,
planes: int,
spatial_dims: int = 3,
stride: int = 1,
downsample: Union[nn.Module, partial, None] = None,
) -> None:
"""
Args:
in_planes: number of input channels.
planes: number of output channels.
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for first conv layer.
downsample: which downsample layer to use.
"""
super().__init__()
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
self.conv1 = conv_type(in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = norm_type(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False)
self.bn2 = norm_type(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out: torch.Tensor = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes: int,
planes: int,
spatial_dims: int = 3,
stride: int = 1,
downsample: Union[nn.Module, partial, None] = None,
) -> None:
"""
Args:
in_planes: number of input channels.
planes: number of output channels (taking expansion into account).
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for second conv layer.
downsample: which downsample layer to use.
"""
super().__init__()
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = norm_type(planes)
self.conv2 = conv_type(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm_type(planes)
self.conv3 = conv_type(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm_type(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out: torch.Tensor = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""
ResNet based on: `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`_
and `Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet? <https://arxiv.org/pdf/1711.09577.pdf>`_.
Adapted from `<https://github.com/kenshohara/3D-ResNets-PyTorch/tree/master/models>`_.
Args:
block: which ResNet block to use, either Basic or Bottleneck.
layers: how many layers to use.
block_inplanes: determine the size of planes at each step. Also tunable with widen_factor.
spatial_dims: number of spatial dimensions of the input image.
n_input_channels: number of input channels for first convolutional layer.
conv1_t_size: size of first convolution layer, determines kernel and padding.
conv1_t_stride: stride of first convolution layer.
no_max_pool: bool argument to determine if to use maxpool layer.
shortcut_type: which downsample block to use. Options are 'A', 'B', default to 'B'.
- 'A': using `self._downsample_basic_block`.
- 'B': kernel_size 1 conv + norm.
widen_factor: widen output for each layer.
num_classes: number of output (classifications).
feed_forward: whether to add the FC layer for the output, default to `True`.
.. deprecated:: 0.6.0
``n_classes`` is deprecated, use ``num_classes`` instead.
"""
@deprecated_arg("n_classes", since="0.6")
def __init__(
self,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
layers: List[int],
block_inplanes: List[int],
spatial_dims: int = 3,
n_input_channels: int = 3,
conv1_t_size: int = 7,
conv1_t_stride: int = 1,
no_max_pool: bool = False,
shortcut_type: str = "B",
widen_factor: float = 1.0,
num_classes: int = 400,
feed_forward: bool = True,
n_classes: Optional[int] = None,
) -> None:
super().__init__()
# in case the new num_classes is default but you still call deprecated n_classes
if n_classes is not None and num_classes == 400:
num_classes = n_classes
conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
avgp_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[
Pool.ADAPTIVEAVG, spatial_dims
]
block_avgpool = get_avgpool()
conv1_kernel, conv1_stride, conv1_padding = get_conv1(conv1_t_size, conv1_t_stride)
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = conv_type(
n_input_channels,
self.in_planes,
kernel_size=conv1_kernel[spatial_dims],
stride=conv1_stride[spatial_dims],
padding=conv1_padding[spatial_dims],
bias=False,
)
self.bn1 = norm_type(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = pool_type(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], spatial_dims, shortcut_type)
self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=2)
self.layer3 = self._make_layer(block, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=2)
self.layer4 = self._make_layer(block, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=2)
self.avgpool = avgp_type(block_avgpool[spatial_dims])
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes) if feed_forward else None
for m in self.modules():
if isinstance(m, conv_type):
nn.init.kaiming_normal_(torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu")
elif isinstance(m, norm_type):
nn.init.constant_(torch.as_tensor(m.weight), 1)
nn.init.constant_(torch.as_tensor(m.bias), 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(torch.as_tensor(m.bias), 0)
def _downsample_basic_block(self, x: torch.Tensor, planes: int, stride: int, spatial_dims: int = 3) -> torch.Tensor:
out: torch.Tensor = get_pool_layer(("avg", {"kernel_size": 1, "stride": stride}), spatial_dims=spatial_dims)(x)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), *out.shape[2:], dtype=out.dtype, device=out.device)
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(
self,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
planes: int,
blocks: int,
spatial_dims: int,
shortcut_type: str,
stride: int = 1,
) -> nn.Sequential:
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
downsample: Union[nn.Module, partial, None] = None
if stride != 1 or self.in_planes != planes * block.expansion:
if look_up_option(shortcut_type, {"A", "B"}) == "A":
downsample = partial(
self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride,
spatial_dims=spatial_dims,
)
else:
downsample = nn.Sequential(
conv_type(self.in_planes, planes * block.expansion, kernel_size=1, stride=stride),
norm_type(planes * block.expansion),
)
layers = [
block(
in_planes=self.in_planes, planes=planes, spatial_dims=spatial_dims, stride=stride, downsample=downsample
)
]
self.in_planes = planes * block.expansion
for _i in range(1, blocks):
layers.append(block(self.in_planes, planes, spatial_dims=spatial_dims))
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.fc is not None:
x = self.fc(x)
return x
def _resnet(
arch: str,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
layers: List[int],
block_inplanes: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNet:
model: ResNet = ResNet(block, layers, block_inplanes, **kwargs)
if pretrained:
# Author of paper zipped the state_dict on googledrive,
# so would need to download, unzip and read (2.8gb file for a ~150mb state dict).
# Would like to load dict from url but need somewhere to save the state dicts.
raise NotImplementedError(
"Currently not implemented. You need to manually download weights provided by the paper's author"
" and load then to the model with `state_dict`. See https://github.com/Tencent/MedicalNet"
)
return model
def resnet10(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-10 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet10", ResNetBlock, [1, 1, 1, 1], get_inplanes(), pretrained, progress, **kwargs)
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-18 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", ResNetBlock, [2, 2, 2, 2], get_inplanes(), pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-34 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", ResNetBlock, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-50 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", ResNetBottleneck, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-101 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", ResNetBottleneck, [3, 4, 23, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-152 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", ResNetBottleneck, [3, 8, 36, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-200 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs)
| 39.783981 | 122 | 0.643158 |
acfbfdc0cdf24859f045e8f176faf51fdf88448d | 655 | py | Python | plugins/keepkey/cmdline.py | schancel/electrum | ee125e0723a9345757b741aed93be906af80d327 | [
"MIT"
] | 1 | 2020-05-24T08:50:13.000Z | 2020-05-24T08:50:13.000Z | plugins/keepkey/cmdline.py | Santobak/electrum | ee125e0723a9345757b741aed93be906af80d327 | [
"MIT"
] | null | null | null | plugins/keepkey/cmdline.py | Santobak/electrum | ee125e0723a9345757b741aed93be906af80d327 | [
"MIT"
] | 1 | 2017-12-22T03:57:04.000Z | 2017-12-22T03:57:04.000Z | from keepkey import KeepKeyPlugin
from electroncash.util import print_msg
class KeepKeyCmdLineHandler:
def get_passphrase(self, msg, confirm):
import getpass
print_msg(msg)
return getpass.getpass('')
def get_pin(self, msg):
t = { 'a':'7', 'b':'8', 'c':'9', 'd':'4', 'e':'5', 'f':'6', 'g':'1', 'h':'2', 'i':'3'}
print_msg(msg)
print_msg("a b c\nd e f\ng h i\n-----")
o = raw_input()
return ''.join(map(lambda x: t[x], o))
def stop(self):
pass
def show_message(self, msg):
print_msg(msg)
class Plugin(KeepKeyPlugin):
handler = KeepKeyCmdLineHandler()
| 25.192308 | 94 | 0.564885 |
acfbfe73455d7ec9f5a8e78a7b8ec8722804b936 | 472 | py | Python | pycon/finaid/migrations/0009_auto_20150917_1629.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 154 | 2015-01-17T02:29:24.000Z | 2022-03-20T20:37:24.000Z | pycon/finaid/migrations/0009_auto_20150917_1629.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 316 | 2015-01-10T04:01:50.000Z | 2020-09-30T20:18:08.000Z | pycon/finaid/migrations/0009_auto_20150917_1629.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 89 | 2015-01-10T05:25:21.000Z | 2022-02-27T03:28:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pycon.finaid.models
class Migration(migrations.Migration):
dependencies = [
('finaid', '0008_auto_20150916_0848'),
]
operations = [
migrations.AlterField(
model_name='receipt',
name='receipt_image',
field=models.FileField(upload_to=pycon.finaid.models.user_directory_path),
),
]
| 22.47619 | 86 | 0.648305 |
acfbfea1721edc59b3c95c63c524fd4e4f6176ae | 4,344 | py | Python | oneflow/python/ops/__init__.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | 1 | 2020-12-04T03:06:16.000Z | 2020-12-04T03:06:16.000Z | oneflow/python/ops/__init__.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | null | null | null | oneflow/python/ops/__init__.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import re
import oneflow.core.job.placement_pb2 as placement_proto_pb
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.compile_context as compile_context
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.input_blob_def as input_blob_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.hob as hob
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.python.framework.session_context as session_ctx
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.blob_register as blob_register_util
blob_register = blob_register_util.GetDefaultBlobRegister()
def InputOpByArgBlobDef(blob_def):
assert isinstance(blob_def, input_blob_util.ArgBlobDef)
op_conf = op_conf_util.OperatorConf()
op_conf.name = blob_def.op_name
op_conf.input_conf.out = blob_def.blob_name
op_conf.input_conf.blob_conf.CopyFrom(blob_def.ToInterfaceBlobConf())
blob_def.AddAndInferOp(op_conf)
return remote_blob_util.RemoteBlob(blob_def.lbi)
def ReturnRemoteBlob(remote_blob, allow_cpu_return_op=True):
return enable_if.unique([LazyReturnRemoteBlob, EagerReturnRemoteBlob])(
remote_blob, allow_cpu_return_op
)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def LazyReturnRemoteBlob(remote_blob, allow_cpu_return_op=True):
assert isinstance(
remote_blob,
(remote_blob_util.LazyMirroredBlob, remote_blob_util.LazyConsistentBlob),
)
op_conf, lbi, scope = _GetReturnOpConfAndOutLbiAndScope(
remote_blob, allow_cpu_return_op
)
compile_context.CurJobAddOp(op_conf, scope)
return remote_blob_util.RemoteBlob(lbi)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def EagerReturnRemoteBlob(remote_blob, allow_cpu_return_op=True):
if not hob.is_trainable(None):
return remote_blob
op_conf, lbi, scope = _GetReturnOpConfAndOutLbiAndScope(
remote_blob, allow_cpu_return_op
)
if remote_blob.blob_object.op_arg_parallel_attr.is_mirrored():
add_and_infer = compile_context.CurJobAddMirroredOp
else:
add_and_infer = compile_context.CurJobAddConsistentOp
op_attribute = add_and_infer(op_conf, scope)
def BuildInstruction(builder):
get_blob_scope = blob_register.BnInOp2BlobObjectScope
with get_blob_scope(op_attribute) as bn_in_op2blob_object:
builder.StatelessCall(
op_attribute,
remote_blob.blob_object.parallel_desc_symbol.parallel_conf,
bn_in_op2blob_object=bn_in_op2blob_object,
)
vm_util.LogicalRun(BuildInstruction)
return remote_blob_util.RemoteBlob(lbi)
def _GetReturnOpConfAndOutLbiAndScope(remote_blob, allow_cpu_return_op=True):
op_conf = op_conf_util.OperatorConf()
op_conf.name = id_util.UniqueStr("Return_")
setattr(op_conf.return_conf, "in", remote_blob.unique_name)
op_conf.return_conf.out = "out"
if allow_cpu_return_op:
op_conf.device_type = c_api_util.DeviceType4DeviceTag("cpu")
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
parallel_conf = placement_proto_pb.ParallelConf()
parallel_conf.CopyFrom(remote_blob.parallel_conf)
def BuildScope(old_scope, builder):
return old_scope.BuildWithNewParallelConf(builder, parallel_conf)
sess = session_ctx.GetDefaultSession()
scope = sess.MakeScope(BuildScope)
return op_conf, lbi, scope
| 38.105263 | 81 | 0.78407 |
acfbfebc2881b0405fbd8b6108decd989674e692 | 11,207 | py | Python | lib/pavilion/legacy/schedulers/moabjobcontroller.py | not-pflarr/Pavilion | 9ae90eed6993af5d16d475ee8934df1488fb6103 | [
"BSD-3-Clause"
] | null | null | null | lib/pavilion/legacy/schedulers/moabjobcontroller.py | not-pflarr/Pavilion | 9ae90eed6993af5d16d475ee8934df1488fb6103 | [
"BSD-3-Clause"
] | null | null | null | lib/pavilion/legacy/schedulers/moabjobcontroller.py | not-pflarr/Pavilion | 9ae90eed6993af5d16d475ee8934df1488fb6103 | [
"BSD-3-Clause"
] | null | null | null | #!python
# ###################################################################
#
# Disclaimer and Notice of Copyright
# ==================================
#
# Copyright (c) 2015, Los Alamos National Security, LLC
# All rights reserved.
#
# Copyright 2015. Los Alamos National Security, LLC.
# This software was produced under U.S. Government contract
# DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL),
# which is operated by Los Alamos National Security, LLC for
# the U.S. Department of Energy. The U.S. Government has rights
# to use, reproduce, and distribute this software. NEITHER
# THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES
# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY
# FOR THE USE OF THIS SOFTWARE. If software is modified to
# produce derivative works, such modified software should be
# clearly marked, so as not to confuse it with the version
# available from LANL.
#
# Additionally, redistribution and use in source and binary
# forms, with or without modification, are permitted provided
# that the following conditions are met:
# - Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# - Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Los Alamos National Security, LLC,
# Los Alamos National Laboratory, LANL, the U.S. Government,
# nor the names of its contributors may be used to endorse
# or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# ###################################################################
""" Implementation of Moab Job Controller """
import sys
import os
import subprocess
import re
#from PAV.modules.basejobcontroller import JobController
from basejobcontroller import JobController
class MoabJobController(JobController):
""" class to run a job using Moab """
def setup_msub_cmd(self, user_script):
"""
create dynamic moab_job_handler script if users script contains msub
DW directives.
"""
fixed_cmd = os.environ['PVINSTALL'] + "/PAV/modules/moab_job_handler.py"
my_moab_wrapper_text = ""
# if DataWarp directives exist in the user script build new wrapper script on the fly
with open(user_script) as f:
match = re.findall(r'^#DW\s.+', f.read(), re.MULTILINE)
if match:
first_line = "#!/usr/bin/env python"
my_moab_wrapper_text += first_line + "\n"
for md in match:
self.logger.info(self.lh + " : adding directive: " + str(md))
my_moab_wrapper_text += md + "\n"
with open(fixed_cmd, 'r') as fc:
for li in fc:
if 'Template' in li:
for next_line in fc: # here are the lines we want
my_moab_wrapper_text += next_line
my_home_dir = os.path.expanduser("~")
my_moab_wrapper = my_home_dir + "/my_moab_wrapper" + ".py"
mw = open(my_moab_wrapper, "w")
mw.write(my_moab_wrapper_text)
mw.close()
dyn_cmd = " " + my_moab_wrapper
else:
dyn_cmd = fixed_cmd
return dyn_cmd
@staticmethod
def is_moab_system():
with open(os.devnull, 'w') as FNULL:
try:
subprocess.check_call(['mdiag'], stdout=FNULL)
return True
except subprocess.CalledProcessError:
return False
# .. some setup and let the msub command fly ...
def start(self):
# Stuff any buffered output into the output file now
# so that the the order doesn't look all mucked up
sys.stdout.flush()
msub_cmd = "msub -V "
# handle optionally specified queue
if 'queue' in self.configs['moab'] and self.configs['moab']['queue']:
msub_cmd += "-q " + self.configs['moab']['queue'] + " "
# add test name
msub_cmd += "-N " + self.name + " "
# get time limit, if specified
time_lim = ''
try:
time_lim = str(self.configs['moab']['time_limit'])
self.logger.info(self.lh + " : time limit = " + time_lim)
except TypeError:
self.logger.info(self.lh + " Error: time limit value, test suite entry may need quotes")
# get target segment, if specified
ts = ''
if 'target_seg' in self.configs['moab'] and self.configs['moab']['target_seg']:
ts = self.configs['moab']['target_seg']
reservation = ''
if 'reservation' in self.configs['moab'] and self.configs['moab']['reservation']:
reservation = self.configs['moab']['reservation']
node_list = ''
if 'node_list' in self.configs['moab'] and self.configs['moab']['node_list']:
node_list = self.configs['moab']['node_list']
machine_type = ''
if 'machine_type' in self.configs['moab'] and self.configs['moab']['machine_type']:
machine_type = self.configs['moab']['machine_type']
# ++ PV_MACHINETYPE : The type of machine requested from moab
os.environ['PV_MACHINETYPE'] = machine_type
os_type = ''
if 'os' in self.configs['moab'] and self.configs['moab']['os']:
os_type = self.configs['moab']['os']
# ++ PV_OS : The os type requested from moab
os.environ['PV_OS'] = os_type
msub_args = ''
if 'msub_args' in self.configs['moab'] and self.configs['moab']['msub_args']:
msub_args = self.configs['moab']['msub_args']
# accounting file? or just log it?
# variation passed as arg0 - nodes, arg1 - ppn
nnodes = str(self.configs['moab']['num_nodes'])
#nnodes = str(self.job_variation[0])
#ppn = str(self.job_variation[1])
ppn = str(self.configs['moab']['procs_per_node'])
self.logger.info(self.lh + " : nnodes=" + nnodes)
self.logger.info(self.lh + " : ppn=" + ppn)
self.logger.info(self.lh + " : args=" + str(self.configs['run']['test_args']))
pes = int(ppn) * int(nnodes)
self.logger.info(self.lh + " : npes=" + str(pes))
# ++ PV_PESPERNODE : Number of cores per node
os.environ['GZ_PESPERNODE'] = ppn
os.environ['PV_PESPERNODE'] = ppn
# ++ PV_NNODES : Number of nodes allocated for this job
os.environ['GZ_NNODES'] = nnodes
os.environ['PV_NNODES'] = nnodes
print "<nnodes> " + nnodes
# ++ PV_NPES : Number of pe's allocated for this job
os.environ['PV_NPES'] = str(pes)
os.environ['GZ_NPES'] = os.environ['PV_NPES']
print "<npes> " + str(pes)
# create working space here so that each msub run gets its own
#self.setup_working_space()
# print the common log settings here right after the job is started
self.save_common_settings()
# setup unique Moab stdout and stderr file names
# Handle differences between moab-slurm, moab-cle, etc. ??
# ++ PV_JOB_RESULTS_LOG_DIR : Path where results for this job are placed
se = os.environ['PV_JOB_RESULTS_LOG_DIR'] + "/drm.stderr"
so = os.environ['PV_JOB_RESULTS_LOG_DIR'] + "/drm.stdout"
msub_cmd += "-o " + so + " -e " + se + " "
if node_list:
msub_cmd += "-l nodes=" + node_list
else:
msub_cmd += "-l nodes=" + nnodes
msub_cmd += ":ppn=" + ppn
if machine_type:
msub_cmd += ":" + machine_type
if os_type:
msub_cmd += ",os=" + os_type
if time_lim:
msub_cmd += ",walltime=" + time_lim
if ts:
msub_cmd += ",feature=" + ts
if reservation:
msub_cmd += ",advres=" + reservation
if msub_args:
msub_cmd += " " + msub_args
# ++ PV_RUNHOME : Path where this job is run from
run_cmd = os.environ['PV_RUNHOME'] + "/" + self.configs['run']['cmd']
os.environ['USER_CMD'] = run_cmd
# msub_cmd += " " + os.environ['PVINSTALL'] + "/PAV/modules/moab_job_handler.py"
if MoabJobController.is_moab_system():
msub_wrapper_script = self.setup_msub_cmd(run_cmd)
msub_cmd += " " + msub_wrapper_script
self.logger.info(self.lh + " : " + msub_cmd)
# call to invoke real Moab command
try:
output = subprocess.check_output(msub_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.logger.info(self.lh + " : msub exit status:" + str(e.returncode))
print "msub exit status:" + str(e.returncode)
self.logger.info(self.lh + " : msub output:" + e.output)
print "msub output:" + e.output
sys.stdout.flush()
raise
# Finds the jobid in the output from msub. The job id can either
# be just a number or Moab.number.
match = re.search(r"^((Moab.)?(\d+))[\r]?$",
output, re.IGNORECASE | re.MULTILINE)
jid = 0
if match.group(1):
jid = match.group(1)
print "<JobID> " + str(jid)
else:
# fake-out section to run on basic unix system
fake_job_cmd = os.environ['PVINSTALL'] + "/PAV/modules/moab_job_handler.py"
p = subprocess.Popen(fake_job_cmd, stdout=self.job_log_file,
stderr=self.job_log_file, shell=True)
# wait for the subprocess to finish
(output, errors) = p.communicate()
if p.returncode or errors:
print "Error: something went wrong!"
print [p.returncode, errors, output]
self.logger.info(self.lh + " run error: " + errors)
# this gets called if it's run as a script/program
if __name__ == '__main__':
sys.exit()
| 40.31295 | 100 | 0.590256 |
acfbff7386efd25e8da4b6e7f576164b8f7c658a | 395 | py | Python | exp/type_check/metadata/typing_types_extractor.py | mir-am/typilus | d2c126f178c02cfcef9b0ce652c4b019c2462e09 | [
"MIT"
] | 39 | 2020-04-16T05:14:53.000Z | 2022-01-12T12:50:07.000Z | exp/type_check/metadata/typing_types_extractor.py | fwangdo/typilus | 69c377b4cd286fd3657708accf3b2f56a5da1e8d | [
"MIT"
] | 6 | 2020-11-26T18:21:03.000Z | 2021-05-25T09:04:14.000Z | exp/type_check/metadata/typing_types_extractor.py | fwangdo/typilus | 69c377b4cd286fd3657708accf3b2f56a5da1e8d | [
"MIT"
] | 12 | 2020-04-25T19:12:46.000Z | 2022-02-17T08:49:24.000Z | #!/usr/bin/env python3
from bs4 import BeautifulSoup
import requests
def main():
url = "https://docs.python.org/3/library/typing.html"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
for dt in soup.find_all("dt"):
mem = dt["id"].split(".")[1]
if mem[0].isupper():
print(mem)
if __name__ == "__main__":
main()
| 19.75 | 57 | 0.607595 |
acfbff76fbf01eca5ae4d08ea9208d19e264a554 | 22,584 | py | Python | objectModel/Python/tests/cdm/projection/test_projection_add_type.py | CBA-Consult/CDM | 892bceac7a15167c85342cc1c61d7ecdf5f1b78d | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/tests/cdm/projection/test_projection_add_type.py | CBA-Consult/CDM | 892bceac7a15167c85342cc1c61d7ecdf5f1b78d | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/tests/cdm/projection/test_projection_add_type.py | CBA-Consult/CDM | 892bceac7a15167c85342cc1c61d7ecdf5f1b78d | [
"CC-BY-4.0",
"MIT"
] | 1 | 2021-09-24T16:51:04.000Z | 2021-09-24T16:51:04.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.enums import CdmObjectType
from cdm.storage import LocalAdapter
from cdm.utilities import ResolveOptions, AttributeResolutionDirectiveSet
from tests.common import async_test, TestHelper
from tests.utilities.projection_test_utils import ProjectionTestUtils
class ProjectionAddTypeTest(unittest.TestCase):
"""A test class for testing the AddTypeAttribute operation in a projection as well as SelectedTypeAttribute in a resolution guidance"""
# All possible combinations of the different resolution directives
res_opts_combinations = [
[],
['referenceOnly'],
['normalized'],
['structured'],
['referenceOnly', 'normalized'],
['referenceOnly', 'structured'],
['normalized', 'structured'],
['referenceOnly', 'normalized', 'structured']
]
# The path between TestDataPath and TestName.
tests_subpath = os.path.join('Cdm', 'Projection', 'TestProjectionAddType')
@async_test
async def test_entity_attribute_proj_using_object_model(self):
"""Test for creating a projection with an AddTypeAttribute operation on an entity attribute using the object model"""
corpus = TestHelper.get_local_corpus(self.tests_subpath, 'test_entity_attribute_proj_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_entity_attribute_proj_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
# Create an entity
entity = ProjectionTestUtils.create_entity(corpus, local_root)
# Create a projection
projection = ProjectionTestUtils.create_projection(corpus, local_root)
# Create an AddTypeAttribute operation
add_type_attr_op = corpus.make_object(CdmObjectType.OPERATION_ADD_TYPE_ATTRIBUTE_DEF)
add_type_attr_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testType')
add_type_attr_op.type_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'entityName', True)
projection.operations.append(add_type_attr_op)
# Create an entity reference to hold this projection
projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_entity_ref.explicit_reference = projection
# Create an entity attribute that contains this projection and add this to the entity
entity_attribute = corpus.make_object(CdmObjectType.ENTITY_ATTRIBUTE_DEF, 'TestEntityAttribute')
entity_attribute.entity = projection_entity_ref
entity.attributes.append(entity_attribute)
# Resolve the entity
resolved_entity = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), None, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# Type attribute: "testType"
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('id', resolved_entity.attributes[0].name)
self.assertEqual('name', resolved_entity.attributes[1].name)
self.assertEqual('value', resolved_entity.attributes[2].name)
self.assertEqual('date', resolved_entity.attributes[3].name)
self.assertEqual('testType', resolved_entity.attributes[4].name)
self.assertIsNotNone(resolved_entity.attributes[4].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_entity_proj_using_object_model(self):
"""Test for creating a projection with an AddTypeAttribute operation on an entity definition using the object model"""
corpus = TestHelper.get_local_corpus(self.tests_subpath, 'test_entity_proj_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_entity_proj_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
# Create an entity
entity = ProjectionTestUtils.create_entity(corpus, local_root)
# Create a projection
projection = ProjectionTestUtils.create_projection(corpus, local_root)
# Create an AddTypeAttribute operation
add_type_attr_op = corpus.make_object(CdmObjectType.OPERATION_ADD_TYPE_ATTRIBUTE_DEF)
add_type_attr_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testType')
add_type_attr_op.type_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'entityName', True)
projection.operations.append(add_type_attr_op)
# Create an entity reference to hold this projection
projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_entity_ref.explicit_reference = projection
# Set the entity's ExtendEntity to be the projection
entity.extends_entity = projection_entity_ref
# Resolve the entity
resolved_entity = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), None, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# Type attribute: "testType"
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('id', resolved_entity.attributes[0].name)
self.assertEqual('name', resolved_entity.attributes[1].name)
self.assertEqual('value', resolved_entity.attributes[2].name)
self.assertEqual('date', resolved_entity.attributes[3].name)
self.assertEqual('testType', resolved_entity.attributes[4].name)
self.assertIsNotNone(resolved_entity.attributes[4].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_conditional_proj_using_object_model(self):
"""Test for creating a projection with an AddTypeAttribute operation and a condition using the object model"""
corpus = TestHelper.get_local_corpus(self.tests_subpath, 'test_conditional_proj_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_conditional_proj_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
# Create an entity
entity = ProjectionTestUtils.create_entity(corpus, local_root)
# Create a projection with a condition that states the operation should only execute when the resolution directive is 'referenceOnly'
projection = ProjectionTestUtils.create_projection(corpus, local_root)
projection.condition = 'referenceOnly==True'
# Create an AddTypeAttribute operation
add_type_attr_op = corpus.make_object(CdmObjectType.OPERATION_ADD_TYPE_ATTRIBUTE_DEF)
add_type_attr_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testType')
add_type_attr_op.type_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'entityName', True)
projection.operations.append(add_type_attr_op)
# Create an entity reference to hold this projection
projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_entity_ref.explicit_reference = projection
# Create an entity attribute that contains this projection and add this to the entity
entity_attribute = corpus.make_object(CdmObjectType.ENTITY_ATTRIBUTE_DEF, 'TestEntityAttribute')
entity_attribute.entity = projection_entity_ref
entity.attributes.append(entity_attribute)
# Create resolution options with the 'referenceOnly' directive
res_opt = ResolveOptions(entity.in_document)
res_opt.directives = AttributeResolutionDirectiveSet(set(['referenceOnly']))
# Resolve the entity with 'referenceOnly'
resolved_entity_with_reference_only = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), res_opt, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# Type attribute: "testType"
self.assertEqual(5, len(resolved_entity_with_reference_only.attributes))
self.assertEqual('id', resolved_entity_with_reference_only.attributes[0].name)
self.assertEqual('name', resolved_entity_with_reference_only.attributes[1].name)
self.assertEqual('value', resolved_entity_with_reference_only.attributes[2].name)
self.assertEqual('date', resolved_entity_with_reference_only.attributes[3].name)
self.assertEqual('testType', resolved_entity_with_reference_only.attributes[4].name)
self.assertIsNotNone(resolved_entity_with_reference_only.attributes[4].applied_traits.item('is.linkedEntity.name'))
# Now resolve the entity with the 'structured' directive
res_opt.directives = AttributeResolutionDirectiveSet(set(['structured']))
resolved_entity_with_structured = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), res_opt, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# No Type attribute added, condition was false
self.assertEqual(4, len(resolved_entity_with_structured.attributes))
self.assertEqual('id', resolved_entity_with_structured.attributes[0].name)
self.assertEqual('name', resolved_entity_with_structured.attributes[1].name)
self.assertEqual('value', resolved_entity_with_structured.attributes[2].name)
self.assertEqual('date', resolved_entity_with_structured.attributes[3].name)
@async_test
async def test_add_type_attribute_proj(self):
"""AddTypeAttribute on an entity attribute"""
test_name = 'test_add_type_attribute_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType"
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertIsNotNone(resolved_entity.attributes[7].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_selected_type_attr(self):
"""SelectedTypeAttribute on an entity attribute"""
test_name = 'test_selected_type_attr'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType"
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertIsNotNone(resolved_entity.attributes[7].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_extends_entity_proj(self):
"""AddTypeAttribute on an entity definition"""
test_name = 'test_extends_entity_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType"
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertIsNotNone(resolved_entity.attributes[7].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_extends_entity(self):
"""SelectedTypeAttribute on an entity definition"""
test_name = 'test_extends_entity'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType" (using extendsEntityResolutionGuidance)
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertIsNotNone(resolved_entity.attributes[7].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_add_type_with_combine_proj(self):
"""AddTypeAttribute on an entity attribute (after a CombineAttributes)"""
test_name = 'test_add_type_with_combine_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Merge ["emailId, "phoneId, "socialId"] into "contactId", type attribute: "contactType"
self.assertEqual(6, len(resolved_entity.attributes))
self.assertEqual('address', resolved_entity.attributes[0].name)
self.assertEqual('isPrimary', resolved_entity.attributes[1].name)
self.assertEqual('number', resolved_entity.attributes[2].name)
self.assertEqual('account', resolved_entity.attributes[3].name)
self.assertEqual('contactId', resolved_entity.attributes[4].name)
self.assertEqual('contactType', resolved_entity.attributes[5].name)
self.assertIsNotNone(resolved_entity.attributes[5].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_combine_ops_proj(self):
"""AddTypeAttribute with other operations in the same projection"""
test_name = 'test_combine_ops_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType", rename "address" to "homeAddress"
self.assertEqual(9, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertIsNotNone(resolved_entity.attributes[7].applied_traits.item('is.linkedEntity.name'))
self.assertEqual('homeAddress', resolved_entity.attributes[8].name)
@async_test
async def test_combine_ops_nested_proj(self):
"""Nested projections with AddTypeAttribute and other operations"""
test_name = 'test_combine_ops_nested_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Merge ["emailId, "phoneId, "socialId"] into "contactId", type attribute: "contactType",
# rename ["contactId", "isPrimary"] as "new_{m}", include ["contactId", "new_isPrimary", "contactType"]
self.assertEqual(3, len(resolved_entity.attributes))
self.assertEqual('new_isPrimary', resolved_entity.attributes[0].name)
self.assertEqual('new_contactId', resolved_entity.attributes[1].name)
self.assertEqual('contactType', resolved_entity.attributes[2].name)
self.assertIsNotNone(resolved_entity.attributes[2].applied_traits.item('is.linkedEntity.name'))
@async_test
async def test_conditional_proj(self):
"""AddTypeAttribute with a condition"""
test_name = 'test_conditional_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_corpus(test_name, self.tests_subpath)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Merge ["emailId, "phoneId, "socialId"] into "contactId", type attribute: "contactType"
# Condition for projection containing AddTypeAttribute is false, so no Type attribute is created
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('address', resolved_entity.attributes[0].name)
self.assertEqual('isPrimary', resolved_entity.attributes[1].name)
self.assertEqual('number', resolved_entity.attributes[2].name)
self.assertEqual('account', resolved_entity.attributes[3].name)
self.assertEqual('contactId', resolved_entity.attributes[4].name)
| 60.546917 | 162 | 0.735299 |
acfc011d5de0d242ca920b7a270d372ab3f75e89 | 405 | py | Python | echo/__init__.py | AshleyLab/lvh-fusion | 5a7c22d2236345ba67c8ed5f9c4dc720b49936fb | [
"MIT"
] | null | null | null | echo/__init__.py | AshleyLab/lvh-fusion | 5a7c22d2236345ba67c8ed5f9c4dc720b49936fb | [
"MIT"
] | null | null | null | echo/__init__.py | AshleyLab/lvh-fusion | 5a7c22d2236345ba67c8ed5f9c4dc720b49936fb | [
"MIT"
] | null | null | null | """
The lvh-fusion package contains code for loading echocardiogram videos, and
functions for training and testing for hcm, htn, and athletes for
prediction models.
"""
from hha.__version__ import __version__
from hha.config import CONFIG as config
import hha.datasets as datasets
import hha.utils as utils
import hha.losses as losses
__all__ = ["__version__", "config", "datasets", "utils","losses"]
| 27 | 75 | 0.777778 |
acfc01fc0e0695d4775ee8f589c95f7494a39c23 | 4,470 | py | Python | contrib/seeds/generate-seeds.py | dgarage/bc3 | b38c9fd8cb96ad8ecfe3ffb734d0d71764967638 | [
"MIT"
] | 11 | 2017-01-10T02:48:57.000Z | 2017-07-31T03:29:21.000Z | contrib/seeds/generate-seeds.py | dgarage/bc3 | b38c9fd8cb96ad8ecfe3ffb734d0d71764967638 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | dgarage/bc3 | b38c9fd8cb96ad8ecfe3ffb734d0d71764967638 | [
"MIT"
] | 8 | 2017-02-02T09:28:15.000Z | 2017-08-03T00:28:03.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_bc2.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_bc2', 10232)
g.write('\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.478873 | 98 | 0.580089 |
acfc02d83c11d92b6f3a700217895ed60a301e3b | 4,074 | py | Python | easy/Problem3.py | Pawan459/infytq-dsa-day7 | 8625c37b863e2594e9e200a464a08e9ab38a67ef | [
"MIT"
] | null | null | null | easy/Problem3.py | Pawan459/infytq-dsa-day7 | 8625c37b863e2594e9e200a464a08e9ab38a67ef | [
"MIT"
] | null | null | null | easy/Problem3.py | Pawan459/infytq-dsa-day7 | 8625c37b863e2594e9e200a464a08e9ab38a67ef | [
"MIT"
] | null | null | null | # A bank wants to maintain the list of its customers.
# Write a python program to implement the class diagram given below:
# Customer
# - name
# - account_balance
# __init__(name, account_balance)
# + get_name()
# + get_account_balance()
# __str__()
# --------<>
# Bank
# - bank_name
# - customer_list
# __init__(bank_name, customer_list)
# + get_bank_name()
# + get_customer_list()
# + insert_customer(customer)
# Class Description – Bank:
# customer_list: Linked list where data in each node refers to a customer
# insert_customer(customer): Accept a customer object
# and insert it as the second customer in the linked list
# Create objects of Customer class,
# represent list of customers as a Linked list,
# create object of Bank class and test your program by invoking the methods.
#DSA-Prac-3
class Node:
def __init__(self, data):
self.__data = data
self.__next = None
def get_data(self):
return self.__data
def set_data(self, data):
self.__data = data
def get_next(self):
return self.__next
def set_next(self, next_node):
self.__next = next_node
class LinkedList:
def __init__(self):
self.__head = None
self.__tail = None
def get_head(self):
return self.__head
def get_tail(self):
return self.__tail
def add(self, data):
new_node = Node(data)
if(self.__head is None):
self.__head = self.__tail = new_node
else:
self.__tail.set_next(new_node)
self.__tail = new_node
def insert(self, data, data_before):
new_node = Node(data)
if(data_before == None):
new_node.set_next(self.__head)
self.__head = new_node
if(new_node.get_next() == None):
self.__tail = new_node
else:
node_before = self.find_node(data_before)
if(node_before is not None):
new_node.set_next(node_before.get_next())
node_before.set_next(new_node)
if(new_node.get_next() is None):
self.__tail = new_node
else:
print(data_before, "is not present in the Linked list")
def display(self):
temp = self.__head
while(temp is not None):
print(temp.get_data())
temp = temp.get_next()
def find_node(self, data):
temp = self.__head
while(temp is not None):
if(temp.get_data() == data):
return temp
temp = temp.get_next()
return None
def delete(self, data):
node = self.find_node(data)
if(node is not None):
if(node == self.__head):
if(self.__head == self.__tail):
self.__tail = None
self.__head = node.get_next()
else:
temp = self.__head
while(temp is not None):
if(temp.get_next() == node):
temp.set_next(node.get_next())
if(node == self.__tail):
self.__tail = temp
node.set_next(None)
break
temp = temp.get_next()
else:
print(data, "is not present in Linked list")
#You can use the below __str__() to print the elements of the DS object while debugging
def __str__(self):
temp = self.__head
msg = []
while(temp is not None):
msg.append(str(temp.get_data()))
temp = temp.get_next()
msg = " ".join(msg)
msg = "Linkedlist data(Head to Tail): " + msg
return msg
#start writing your code here
class Customer:
def __init__(self, name, account_balance):
self.__name = name
self.__account_balance = account_balance
def getName(self):
return self.__name
def get_account_balance(self):
return self.__account_balance
def __str__(self):
return super().__str__() | 27.90411 | 91 | 0.571183 |
acfc0300375be32892ba16e2f5284217d47e24a1 | 8,998 | py | Python | 04_recognize/configs/biglm/gen_configs.py | phsmit/iwclul2016-scripts | 20ead4c7492241ec19dec88fe86065a273c66af6 | [
"BSD-3-Clause"
] | null | null | null | 04_recognize/configs/biglm/gen_configs.py | phsmit/iwclul2016-scripts | 20ead4c7492241ec19dec88fe86065a273c66af6 | [
"BSD-3-Clause"
] | null | null | null | 04_recognize/configs/biglm/gen_configs.py | phsmit/iwclul2016-scripts | 20ead4c7492241ec19dec88fe86065a273c66af6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
import glob
for lang in ("sme", "est", "fin"):
for gender in ("M", "F"):
for order in [4,10,20]:
with open("{}{}_b_v_{}g_m.sh".format(lang,gender,order), 'w') as f:
print("export TEST_NAME='{}{}_b_v_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}{}_b_v_{}g_m".format(lang,gender,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
for lang in ("sme", "est", "fin"):
for gender in ("M", "F"):
for order in [4,10,20]:
with open("{}{}_b60k_v_{}g_m.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_b60k_v_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60k_v_{}g_m".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
for lang in ("sme", "est", "fin"):
g = ("M", "F")
if lang == "est":
g = ("M", "F", "M2")
for gender in g:
for order in [4,10,20]:
with open("{}{}_b60kv_v_{}g_m.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_b60kv_v_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60kv_v_{}g_m".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
for lang in ("sme", "est", "fin"):
g = ("M", "F")
if lang == "est":
g = ("M", "F", "M2")
for gender in g:
for order in [4,10,20]:
with open("{}{}_s1b60k_v_{}g_m.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_s1b60k_v_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}_150m/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60k_v_{}g_m".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
for lang in ("sme", "est", "fin"):
for gender in ("M", "F"):
for order in [4,10,20]:
with open("{}{}_s1b60kv_v_{}g_m.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_s1b60kv_v_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}_150m/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60kv_v_{}g_m".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
lang = "sme"
for order in range(2, 21):
for gender in ("M", "F"):
with open("{}{}_s1b60kv_v_{}g_m.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_s1b60kv_v_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60kv_v_{}g_m".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
with open("{}{}_s1b60kv_s_{}g_m.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_s1b60kv_s_{}g_m'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60kv_s_{}g_m".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
with open("{}{}_s1b60kv_s_{}g_w.sh".format(lang, gender ,order), 'w') as f:
print("export TEST_NAME='{}{}_s1b60kv_s_{}g_w'".format(lang,gender,order), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/biglm/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/biglm/{}_b60kv_s_{}g_w".format(lang,order), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
| 50.836158 | 132 | 0.582352 |
acfc033e0271d00f279c06ab7f2f34e84ed2203c | 3,943 | py | Python | qa/rpc-tests/wallet.py | escortcrypto/EscortCoin | c85acd0480ae0d6105d0faf2c22e6eae7d3e383c | [
"MIT"
] | 1 | 2020-04-07T08:04:04.000Z | 2020-04-07T08:04:04.000Z | qa/rpc-tests/wallet.py | escortcrypto/EscortCoin | c85acd0480ae0d6105d0faf2c22e6eae7d3e383c | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet.py | escortcrypto/EscortCoin | c85acd0480ae0d6105d0faf2c22e6eae7d3e383c | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise the wallet. Ported from wallet.sh.
# Does the following:
# a) creates 3 nodes, with an empty chain (no blocks).
# b) node0 mines a block
# c) node1 mines 32 blocks, so now node 0 has 500000EAPC, node 1 has 4250EAPC, node2 has none.
# d) node0 sends 601 EAPC to node2, in two transactions (301 EAPC, then 300 EAPC).
# e) node0 mines a block, collects the fee on the second transaction
# f) node1 mines 16 blocks, to mature node0's just-mined block
# g) check that node0 has 100-21, node2 has 21
# h) node0 should now have 2 unspent outputs; send these to node2 via raw tx broadcast by node1
# i) have node1 mine a block
# j) check balances - node0 should have 0, node2 should have 100
#
from test_framework import BitcoinTestFramework
from util import *
class WalletTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].setgenerate(True, 1)
self.sync_all()
self.nodes[1].setgenerate(True, 32)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500000)
assert_equal(self.nodes[1].getbalance(), 4250)
assert_equal(self.nodes[2].getbalance(), 0)
# Send 601 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 351)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 350)
# Have node0 mine a block, thus he will collect his own fee.
self.nodes[0].setgenerate(True, 1)
self.sync_all()
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].setgenerate(True, 16)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_greater_than(self.nodes[0].getbalance(), 59549)
assert_equal(self.nodes[2].getbalance(), 701)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].setgenerate(True, 1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_greater_than(self.nodes[2].getbalance(), 60250)
assert_greater_than(self.nodes[2].getbalance("from1"), 59549)
if __name__ == '__main__':
WalletTest ().main ()
| 39.039604 | 98 | 0.662693 |
acfc03a47f881cd5bf480c6f19108d9d185bbf35 | 4,923 | py | Python | Main Code.py | DonaldHobson/Large-Text | 06241afce667de339aff829afba12e4dd788a715 | [
"CC0-1.0"
] | null | null | null | Main Code.py | DonaldHobson/Large-Text | 06241afce667de339aff829afba12e4dd788a715 | [
"CC0-1.0"
] | null | null | null | Main Code.py | DonaldHobson/Large-Text | 06241afce667de339aff829afba12e4dd788a715 | [
"CC0-1.0"
] | null | null | null | #Donald Hobson
#A program to make big letters out of small ones
#storeage of pattern to make large letters.
largeLetter=[[" A ",
" A A ",
" AAA ",
"A A",
"A A",],
["BBBB ",
"B B",
"BBBBB",
"B B",
"BBBB "],[
" cccc",
"c ",
"c ",
"c ",
" cccc",],[
"DDDD ",
"D D",
"D D",
"D D",
"DDDD "],[
"EEEEE",
"E ",
"EEEE ",
"E ",
"EEEEE"],[
"FFFFF",
"F ",
"FFFF ",
"F ",
"F "],[
" GGG ",
"G ",
"G GG",
"G G",
" GGG "],[
"H H",
"H H",
"HHHHH",
"H H",
"H H"],[
"IIIII",
" I ",
" I ",
" I ",
"IIIII"],[
" JJJJ",
" J ",
" J ",
" J ",
"JJJ "],[
"K K",
"K KK ",
"KK ",
"K KK ",
"K K"],[
"L ",
"L ",
"L ",
"L ",
"LLLLL"],[
"M M",
"MM MM",
"M M M",
"M M",
"M M"],[
"N N",
"NN N",
"N N N",
"N NN",
"N N"],[
" OOO ",
"O O",
"O O",
"O O",
" OOO "],[
"PPPP ",
"P P",
"PPPP ",
"P ",
"P "],[
" QQ ",
"Q Q ",
"Q QQ ",
"Q Q ",
" QQ Q"],[
"RRRR ",
"R R",
"RRRR ",
"R R ",
"R R"],[
" SSSS",
"S ",
" SSS ",
" S",
"SSSS "],[
"TTTTT",
" T ",
" T ",
" T ",
" T "],[
"U U",
"U U",
"U U",
"U U",
" UUU "],[
"V V",
"V V",
" V V ",
" V V ",
" V "],[
"W W",
"W W",
"W W",
"W W W",
" W W "],[
"X X",
" X X ",
" X ",
" X X ",
"X X"],[
"Y Y",
" Y Y ",
" Y ",
" Y ",
" Y "],[
"ZZZZZ",
" Z ",
" Z ",
" Z ",
"ZZZZZ"]]
# Outer loop, For repeating whle process
while True:
largeText=input("Large Text>").upper()
while True:
method=input("Calital \"C\" , Lowercase \"L\" or Subtext \"S\" >").upper()
if method=="C"or method=="L":
break
if method=="S":
subtext=""
while len(subtext)==0:
subtext=input("Subtext is >")
positionInSubtext=0
subtextLength=len(subtext)
break
largeTextSections=[]
print()
while len(largeText)>19:
largeTextSections.append(largeText[:19])
largeText=largeText[19:]
if len(largeText)>0:
largeTextSections.append(largeText)
for section in largeTextSections:
for i in range(5):
string=""
for line in section:
if line==" ":
string+=" "*5
else:
if method=="S":
for character in range (5):
newstr=largeLetter[ord(line)-65][i]
if largeLetter[ord(line)-65][i][character]==" ":
string+=" "
else:
string+=subtext[positionInSubtext]
positionInSubtext=(positionInSubtext+1)%subtextLength
elif method=="L":
string+=largeLetter[ord(line)-65][i].lower()
else:
string+=largeLetter[ord(line)-65][i]
string+=" "
print(string)
print("\n")
if input("Do you wish to exit \"Y\"/\"N\" >").upper() =="Y":
break
| 23.004673 | 85 | 0.239082 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.