hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6af847c4409dc5698048181b6fa67b8dcf6d55a | 1,808 | py | Python | playback/db.py | Nierot/Spotify | 11dfe064cbd281c86473ef025d41f7eef293e81b | [
"MIT"
] | null | null | null | playback/db.py | Nierot/Spotify | 11dfe064cbd281c86473ef025d41f7eef293e81b | [
"MIT"
] | null | null | null | playback/db.py | Nierot/Spotify | 11dfe064cbd281c86473ef025d41f7eef293e81b | [
"MIT"
] | null | null | null | from . import models
def new_user(name, date, token):
existing_users = models.User.objects.filter(token=token)
if (len(models.User.objects.filter(token=token)) > 0):
return False
else:
user = models.User(name=name,created_at=date,token=token)
user.save()
return True
def new_genre(name):
genre = models.Genre(name=name)
genre.save()
return genre
def new_artist(name, genres):
artist = models.Artist(name=name)
artist.save()
for genre in genres:
artist.genres.add(genre)
return artist
def new_track(name, artist):
track = models.Track(name=name, artist=artist)
track.save()
return track
def add_liked_track(user, track, term):
"""
Term is an integer, 1 for short_term, 2 for medium_term, and 3 for long_term
"""
liked_track = models.Liked_track(term=term, track=track, user=user)
liked_track.save()
return liked_track
def add_liked_artist(user, artist, term):
"""
Term is an integer, 1 for short_term, 2 for medium_term, and 3 for long_term
"""
liked_artist = models.Liked_artist(term=term, artist=artist, user=user)
liked_artist.save()
return liked_artist
def add_liked_genre(user, genre, term):
"""
Term is an integer, 1 for short_term, 2 for medium_term, and 3 for long_term
"""
liked_genre = models.Liked_genre(term=term, genre=genre, user=user)
liked_genre.save()
return liked_genre
def get_user(token):
return models.User.objects.get(token=token)
def get_genre(name):
return models.Genre.objects.get(name=name)
def get_artist(name):
return models.Artist.objects.get(name=name)
def get_track(name):
return models.Track.objects.get(name=name)
def add_genre_to_artist(genre,artist):
artist.genres.add(genre) | 27.393939 | 81 | 0.690819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.154314 |
f6b1170ad7a3338dfad41aa494313487f0ff14fc | 767 | py | Python | invenio_records_rest/loaders/__init__.py | NRodriguezcuellar/invenio-records-rest | c4a3717afcf9b08b6e42f3529addecc64bb2e47c | [
"MIT"
] | 5 | 2017-10-22T00:13:49.000Z | 2019-10-04T11:35:18.000Z | invenio_records_rest/loaders/__init__.py | NRodriguezcuellar/invenio-records-rest | c4a3717afcf9b08b6e42f3529addecc64bb2e47c | [
"MIT"
] | 221 | 2015-10-30T23:27:52.000Z | 2022-03-07T13:17:55.000Z | invenio_records_rest/loaders/__init__.py | NRodriguezcuellar/invenio-records-rest | c4a3717afcf9b08b6e42f3529addecc64bb2e47c | [
"MIT"
] | 60 | 2015-10-30T22:43:27.000Z | 2022-02-10T10:08:08.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Loaders for deserializing records in the REST API."""
from ..schemas import RecordMetadataSchemaJSONV1, RecordSchemaJSONV1
from .marshmallow import json_patch_loader, marshmallow_loader
json_v1 = marshmallow_loader(RecordSchemaJSONV1)
"""Simple example loader that will take any JSON."""
json_patch_v1 = json_patch_loader
"""Simple example loader that will take any JSON patch."""
json_pid_checker = marshmallow_loader(RecordMetadataSchemaJSONV1)
__all__ = (
'json_v1',
'json_patch_loader',
'json_pid_checker'
)
| 28.407407 | 72 | 0.762712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 439 | 0.57236 |
f6b1308798ea655f67ea118514f86ed5643a557d | 665 | py | Python | tests/test_csvtodb.py | rv816/csvtodb | 020ef50e44e458cddeec84f42d3d6e372aa678df | [
"0BSD"
] | null | null | null | tests/test_csvtodb.py | rv816/csvtodb | 020ef50e44e458cddeec84f42d3d6e372aa678df | [
"0BSD"
] | null | null | null | tests/test_csvtodb.py | rv816/csvtodb | 020ef50e44e458cddeec84f42d3d6e372aa678df | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_csvtodb
----------------------------------
Tests for `csvtodb` module.
"""
import unittest
from csvtodb.csvtodb import *
class TestCsvtodb(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
testfix = [['foo', 'bar', 'yellow'], ['thing1', 'thing2', 3], ['green', 'purple', 10]]
def test_upload_to_db():
db_url = 'sqlite://'
db = dataset.connect(db_url)
tablename = 'qrs_valueset_to_codes'
testtable = upload_to_db(testfix, tablename, db_url)
assert list(testtable.all())[1]['foo'] == 'green'
| 19 | 86 | 0.593985 | 158 | 0.237594 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.335338 |
f6b177997afccd32ce4402f37f222e11eaf794c4 | 915 | py | Python | config.py | mscienski/code-challenge-starter-python-flask | 17f5856bdde0e7ea73dc7dab20b4b366e01a61a2 | [
"MIT"
] | null | null | null | config.py | mscienski/code-challenge-starter-python-flask | 17f5856bdde0e7ea73dc7dab20b4b366e01a61a2 | [
"MIT"
] | null | null | null | config.py | mscienski/code-challenge-starter-python-flask | 17f5856bdde0e7ea73dc7dab20b4b366e01a61a2 | [
"MIT"
] | null | null | null | #pylint: disable=no-member
import os
import logging
from flask import Flask
from flask_cors import CORS
from sqlalchemy.orm import sessionmaker
from flask_sqlalchemy import SQLAlchemy
host: str = os.getenv('DB_HOST', 'localhost')
port: int = int(os.getenv('DB_PORT', '5432'))
user: str = os.getenv('DB_USER', 'postgres')
password: str = os.getenv('DB_PASSWORD', '')
name = os.getenv('DB_NAME', 'challenge_starter_development')
db_url = f'postgresql://{user}:{password}@{host}:{port}/{name}'
flask_app = Flask(__name__)
flask_app.config['SQLALCHEMY_DATABASE_URI'] = db_url
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB = SQLAlchemy(flask_app)
Session = sessionmaker(bind=DB.engine)
CORS(flask_app)
gunicorn_logger = logging.getLogger('gunicorn.error')
gunicorn_logger.setLevel(logging.INFO)
flask_app.logger.handlers = gunicorn_logger.handlers
flask_app.logger.setLevel(gunicorn_logger.level)
| 29.516129 | 63 | 0.780328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.286339 |
f6b27fc8dc15fd2e2b41b5a9f80c430678ed9908 | 389 | py | Python | shared-data/python/tests/labware/__init__.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | 2 | 2015-11-10T17:49:51.000Z | 2016-01-15T04:43:37.000Z | shared-data/python/tests/labware/__init__.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | null | null | null | shared-data/python/tests/labware/__init__.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | null | null | null | from typing import List, Tuple
from pathlib import Path
def get_ot_defs() -> List[Tuple[str, int]]:
def_files = (
Path(__file__).parent / ".." / ".." / ".." / "labware" / "definitions" / "2"
).glob("**/*.json")
# example filename
# shared-data/labware/definitions/2/opentrons_96_tiprack_300ul/1.json
return [(f.parent.name, int(f.stem)) for f in def_files]
| 27.785714 | 84 | 0.627249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.347044 |
f6b3633afcaa133a55552fd744518177020a4b93 | 2,699 | py | Python | scripts/migrate_unconfirmed_valid_users.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | scripts/migrate_unconfirmed_valid_users.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | 4 | 2016-05-13T14:24:16.000Z | 2017-03-30T15:28:31.000Z | scripts/migrate_unconfirmed_valid_users.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate users with a valid date_last_login but no date_confirmed."""
import sys
import logging
from django.utils import timezone
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
import datetime as dt
logger = logging.getLogger(__name__)
def do_migration(records):
for user in records:
log_info(user)
user.date_confirmed = user.date_last_login
if not user.is_registered:
user.is_registered = True
user.save()
logger.info('Migrated {0} users'.format(len(records)))
def get_targets():
return User.find(Q('date_confirmed', 'eq', None) & Q('date_last_login', 'ne', None))
def log_info(user):
logger.info(
'Migrating user - {}: date_confirmed={}, '
'date_last_login={}, is_registered={}'.format(
user._id,
user.date_confirmed,
user.date_last_login,
user.is_registered
)
)
def main():
init_app(routes=False) # Sets the storage backends on all models
if 'dry' in sys.argv:
user_list = get_targets()
for user in user_list:
log_info(user)
logger.info('[dry] Migrated {0} users'.format(len(user_list)))
else:
do_migration(get_targets())
class TestMigrateNodeCategories(OsfTestCase):
def test_get_targets(self):
today = timezone.now()
user1 = UserFactory.build(date_confirmed=today, date_last_login=today)
user2 = UserFactory.build(date_confirmed=None, date_last_login=today)
user1.save()
user2.save()
user_list = get_targets()
assert user_list is not None
assert len(user_list) is 1
user1.date_confirmed = None
user1.save()
user_list = get_targets()
assert len(user_list) is 2
def test_do_migration(self):
today = timezone.now()
user1 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=False)
user2 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=True)
user1.save()
user2.save()
user_list = User.find(Q('_id', 'eq', user1._id) | Q('_id', 'eq', user2._id))
do_migration(user_list)
assert user1.date_confirmed is today
assert user1.is_registered
assert user2.date_confirmed is today
assert user2.is_registered
if __name__ == '__main__':
if 'dry' not in sys.argv:
script_utils.add_file_logger(logger, __file__)
main()
| 28.712766 | 98 | 0.659504 | 1,125 | 0.416821 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.137458 |
f6b528db6aa41bb01a751fc5e92895bd172d0c13 | 681 | py | Python | src/main/python/proc/expression/let.py | cjblink1/lang | 245b8d002341dce4fa5905b1f274770e34867c7e | [
"MIT"
] | null | null | null | src/main/python/proc/expression/let.py | cjblink1/lang | 245b8d002341dce4fa5905b1f274770e34867c7e | [
"MIT"
] | null | null | null | src/main/python/proc/expression/let.py | cjblink1/lang | 245b8d002341dce4fa5905b1f274770e34867c7e | [
"MIT"
] | null | null | null |
from proc.expression.expression import Expression
from proc.environment import Environment
class LetExpression(Expression):
def __init__(self, variable: str, bound_expression: Expression, body: Expression):
self.variable = variable
self.bound_expression = bound_expression
self.body = body
def string_representation(self):
return "variable = {0}, bound-expression = {1}, body = {2)".format(self.variable, self.bound_expression, self.body)
def evaluate(self, environment: Environment):
bound_value = self.bound_expression.evaluate(environment)
return self.body.evaluate(environment.extend(self.variable, bound_value)) | 40.058824 | 123 | 0.735683 | 588 | 0.863436 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.076358 |
f6b7fa5f60e2de9db3a6e09ccdc529c81f6a56da | 203 | py | Python | cah/__init__.py | pordino/aikaterna-cogs | 07108bc808b571395e511586492f719c6582e72e | [
"Apache-2.0"
] | 98 | 2017-09-12T01:52:17.000Z | 2022-03-17T16:43:01.000Z | cah/__init__.py | pordino/aikaterna-cogs | 07108bc808b571395e511586492f719c6582e72e | [
"Apache-2.0"
] | 147 | 2016-12-01T04:39:05.000Z | 2022-02-13T02:20:14.000Z | cah/__init__.py | pordino/aikaterna-cogs | 07108bc808b571395e511586492f719c6582e72e | [
"Apache-2.0"
] | 160 | 2016-12-01T20:19:44.000Z | 2022-03-30T10:32:41.000Z | from .cah import CardsAgainstHumanity
__red_end_user_data_statement__ = "This cog does not persistently store data or metadata about users."
def setup(bot):
bot.add_cog(CardsAgainstHumanity(bot))
| 25.375 | 102 | 0.802956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.334975 |
f6b888452d4e63186e8b7dc8abfa403e51a0d83f | 895 | py | Python | apps/civic_pulse/management/commands/create_scraper_user.py | JimHafner/GovLens | ea44084255d5409fa25ab11b24562f4d14d51bd1 | [
"MIT"
] | 17 | 2019-08-21T07:58:05.000Z | 2021-09-03T20:00:56.000Z | apps/civic_pulse/management/commands/create_scraper_user.py | JimHafner/GovLens | ea44084255d5409fa25ab11b24562f4d14d51bd1 | [
"MIT"
] | 51 | 2019-08-20T23:00:10.000Z | 2022-03-11T23:45:35.000Z | apps/civic_pulse/management/commands/create_scraper_user.py | JimHafner/GovLens | ea44084255d5409fa25ab11b24562f4d14d51bd1 | [
"MIT"
] | 44 | 2019-08-30T01:45:53.000Z | 2021-09-30T23:27:02.000Z | """Idempotent management command to create the scraper user with a DRF token
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
SCRAPER_USERNAME = "scraper"
class Command(BaseCommand):
help = "Get or create a scraper user with a Django REST Framework token"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
user, created = User.objects.get_or_create(username=SCRAPER_USERNAME)
user.save()
if created:
self.stdout.write(f"Created new user with username {SCRAPER_USERNAME}")
else:
self.stdout.write(f"User {SCRAPER_USERNAME} already exists.")
token, created = Token.objects.get_or_create(user=user)
self.stdout.write(f"The token for the user {SCRAPER_USERNAME} is {token}")
| 33.148148 | 83 | 0.707263 | 635 | 0.709497 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.338547 |
f6b8bf7ed46f49468f699cdf6f50b8125f1977b6 | 1,591 | py | Python | Simulation/reaching_gym/render.py | wq13552463699/UCD_UR5E | 513acb7e235ab940fd03c3038208678e285690f3 | [
"MIT"
] | 5 | 2021-11-02T10:48:54.000Z | 2022-01-10T12:32:51.000Z | Simulation/reaching_gym/render.py | wq13552463699/UR5E_robot_gym_env_Real_and_Sim | 513acb7e235ab940fd03c3038208678e285690f3 | [
"MIT"
] | null | null | null | Simulation/reaching_gym/render.py | wq13552463699/UR5E_robot_gym_env_Real_and_Sim | 513acb7e235ab940fd03c3038208678e285690f3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 23:19:58 2021
@author: qiang
"""
import tensorflow.keras.backend as K
import tensorflow as tf
import time
from rl_symbol_env_continous import symbol_env_continous
from DDPG_keras import ActorCritic
from env_base import UR5_env
obs_dim = (10,)
obs_dimss = 10
act_dim = 4
act_dimension = (4,)
sess = tf.Session()
K.set_session(sess)
env = symbol_env_continous()
actor_critic = ActorCritic(sess)
episodee = 2500
actor_critic.actor_model.load_weights(str(episodee)+"_"+"actor"+".h5")
actor_critic.target_actor_model.load_weights(str(episodee)+"_"+"actor_target"+".h5")
actor_critic.critic_model.load_weights(str(episodee)+"_"+"critic"+".h5")
actor_critic.target_critic_model.load_weights(str(episodee)+"_"+"critic_target"+".h5")
#%%
# Input how many times you want to test the robot.
times = 100
env_rl = symbol_env_continous()
env_sim = UR5_env()
env_sim.sim_start()
for _ in range(times):
done = False
cur_state = env_rl.reset()
el = 0
target_pos = [cur_state[7],cur_state[8],cur_state[9]]
env_sim.set_target_pos(target_pos)
env_sim.movej(env_rl.current_joint_pos)
cur_state = cur_state.reshape((1, obs_dimss))
while not done and el<=100:
el += 1
action = actor_critic.act(cur_state)
action = action.reshape((1, act_dim))
next_state, reward, done = env_rl.step(action)
env_sim.movej(env_rl.current_joint_pos)
time.sleep(0.05)
next_state = next_state.reshape((1,obs_dimss))
cur_state = next_state
| 24.476923 | 86 | 0.706474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.146449 |
f6b926004f1740bebae35f7fc98b1abf6a61ed14 | 2,111 | py | Python | battingOrbowling_comp.py | ThomasAitken/Cricket-Modelling-Project | a0b8265c562d988dab16a145dc2e73225e2a4774 | [
"MIT"
] | null | null | null | battingOrbowling_comp.py | ThomasAitken/Cricket-Modelling-Project | a0b8265c562d988dab16a145dc2e73225e2a4774 | [
"MIT"
] | null | null | null | battingOrbowling_comp.py | ThomasAitken/Cricket-Modelling-Project | a0b8265c562d988dab16a145dc2e73225e2a4774 | [
"MIT"
] | null | null | null | #how to get a measure of batting or bowling strength? why it's simple! take top-100 rankings list at given period...
#what is score held by #1 player? normalise against this score (i.e. this score now equivalent to 1)
#now add up top 8 normalised scores for each nation's top 8 players in top 100 (set x: 0 <= x <= 1) .
#(8 an arbitrary number yes, but seems to be fair, on the evidence (8 players in top 100 means good depth, but above 8...
#we just bias nations that chop and change, and ingrain bias against minnows))
#now we normalise against nation with highest total of normalised scores. this nation has overall strength 1. others have 0.a
import csv
maindict = { "AUS":[0,0], "SA":[0,0], "IND":[0,0], "NZ":[0,0], "WI":[0,0], "PAK":[0,0], "BAN": [0,0], "AFG": [0,0], "SL": [0,0], "IRE": [0,0], "ZIM": [0,0], "ENG": [0,0], "SCO": [0,0], "CAN": [0,0], "NED": [0,0], "KEN": [0,0]}
j = 0
topValue = 0
with open('[insert cute name for list of 100 rankings here]') as file:
reader = csv.reader(file)
for row in reader:
if j == 0:
j += 1
continue
if j == 1:
topValue = int(row[1])
print('[insert cute heading for output here]\n' + row[1])
j += 1
#excluding the super miniature minnows... these three countries are the ones to leave out from 2011-2013.
#but the last three in the dict should be swapped with these three for the years 2014-the present
if row[3] == 'P.N.G.' or row[3] == 'U.A.E.' or row[3] == 'H.K.':
continue
x = maindict[row[3]]
normalValue = int(row[1])/topValue
if x[1] < 8:
x[0] += normalValue
x[1] += 1
#printing the dict with raw scores of each nation as first element followed by # of players who contributed to the score
print(maindict)
print('\n')
biggestValue = 0
for entries in maindict:
x = maindict[entries][0]
if x > biggestValue:
biggestValue = x
for entries in maindict:
#normalising with respect to strongest nation
maindict[entries] = maindict[entries][0]/biggestValue
#the output will now show us the dict with each nation's normalised strength-value first, followed by # of players again (unchanged)
print(maindict)
| 39.830189 | 226 | 0.672667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.632402 |
f6b9ca45aa3335bdf8507d0c324366c72d34aef6 | 460 | py | Python | Python-Programming/apply_functions_to_args.py | clickok/Code-Snippets | 403796256a2ec1bd37b2deb4ce5052671a39048f | [
"MIT"
] | null | null | null | Python-Programming/apply_functions_to_args.py | clickok/Code-Snippets | 403796256a2ec1bd37b2deb4ce5052671a39048f | [
"MIT"
] | null | null | null | Python-Programming/apply_functions_to_args.py | clickok/Code-Snippets | 403796256a2ec1bd37b2deb4ce5052671a39048f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
""" Applies a list of functions to command line arguments.
For quick-and-dirty command line argument handling.
"""
import sys
# List of functions to apply
funcLst = [int, int, float, float, int, int, int]
def parseArgs(argLst):
for i in range(len(argLst)):
print(funcLst[i](argLst[i]))
def main():
if sys.argv[1] in {"help", "--help", "-h"}:
print(__doc__)
else:
parseArgs(sys.argv[1:])
if __name__ == "__main__":
main() | 17.692308 | 59 | 0.663043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.415217 |
f6ba4c3ad84b5b9411ffe7603ebe9e1cc9a52bf6 | 3,583 | py | Python | slot_filling/corpus_server_direct.py | IBM/kgi-slot-filling | 24f8005bc1d010746e046a8c2ec292a2222fff00 | [
"Apache-2.0"
] | 21 | 2021-05-27T23:14:19.000Z | 2022-02-10T06:36:55.000Z | slot_filling/corpus_server_direct.py | IBM/retrieve-write-slot-filling | 24f8005bc1d010746e046a8c2ec292a2222fff00 | [
"Apache-2.0"
] | 1 | 2021-11-26T04:00:19.000Z | 2021-11-26T04:00:19.000Z | slot_filling/corpus_server_direct.py | IBM/kgi-slot-filling | 24f8005bc1d010746e046a8c2ec292a2222fff00 | [
"Apache-2.0"
] | 3 | 2021-06-04T13:14:22.000Z | 2022-03-09T14:45:50.000Z | #!/usr/bin/env python
# encoding: utf-8
from flask import Flask, request, jsonify
import base64
import numpy as np
from util.args_help import fill_from_args
import os
import logging
from dpr.simple_mmap_dataset import Corpus
from dpr.faiss_index import ANNIndex
logger = logging.getLogger(__name__)
class Options():
def __init__(self):
self.port = 5001
self.corpus_dir = ''
self.model_name = 'facebook/rag-token-nq'
self.rest_dtype = 16
self.local_only = False # only accessible on same machine
self.debug = False
self.log_info = False
self.__required_args__ = ['corpus_dir']
def get_rest_dtype(self):
return np.float32 if self.rest_dtype == 32 else np.float16
def run(opts: Options):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(format='%(filename)s:%(lineno)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if opts.log_info else logging.WARNING)
app = Flask(__name__)
if not opts.log_info:
log = logging.getLogger('werkzeug')
log.disabled = True
app.logger.disabled = True
app.logger.setLevel(logging.WARNING)
passages = Corpus(os.path.join(opts.corpus_dir))
index = ANNIndex(os.path.join(opts.corpus_dir, "index.faiss"))
dim = index.dim()
print(dim)
@app.route('/config', methods=['GET'])
def get_config():
return jsonify({'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir})
@app.route('/retrieve', methods=['POST'])
def retrieve_docs():
rest_dtype = opts.get_rest_dtype()
query = request.get_json()
# input is three parts:
# the base64 encoded fp16 numpy matrix
# k (the number of records per document)
# return-vectors flag
query_vectors = np.frombuffer(base64.decodebytes(query['query_vectors'].encode('ascii')), dtype=rest_dtype).reshape(-1, dim)
k = query['k']
include_vectors = 'include_vectors' in query and query['include_vectors']
query_vectors = query_vectors.astype(np.float32)
scores, indexes = index.search(query_vectors, k)
docs = [[passages[ndx] for ndx in ndxs] for ndxs in indexes]
if 'pid' in docs[0][0]:
doc_dicts = [{'pid': [dqk['pid'] for dqk in dq],
'title': [dqk['title'] for dqk in dq],
'text': [dqk['text'] for dqk in dq]} for dq in docs]
else:
doc_dicts = [{'title': [dqk['title'] for dqk in dq],
'text': [dqk['text'] for dqk in dq]} for dq in docs]
retval = {'docs': doc_dicts}
if include_vectors:
doc_vectors = np.zeros([query_vectors.shape[0], k, query_vectors.shape[1]], dtype=rest_dtype)
for qi, docs_qi in enumerate(docs):
for ki, doc_qi_ki in enumerate(docs_qi):
doc_vectors[qi, ki] = doc_qi_ki['vector']
retval['doc_vectors'] = base64.b64encode(doc_vectors).decode('ascii')
# print(retval)
# output
# list of docs: len(docs) == query_vectors.shape[0]; len(docs[i].title) == len(docs[i].text) == k
# doc_vectors: query_vectors.shape[0] x k x query_vectors.shape[1]
return jsonify(retval)
app.run(host='127.0.0.1' if opts.local_only else '0.0.0.0', debug=opts.debug, port=opts.port)
if __name__ == '__main__':
opts = Options()
fill_from_args(opts)
run(opts)
| 36.561224 | 132 | 0.612057 | 443 | 0.123639 | 0 | 0 | 1,954 | 0.545353 | 0 | 0 | 745 | 0.207926 |
f6bb83e5aaef3e05a37054dc4da7d7e3f6405a72 | 2,019 | py | Python | b2btool/rename/rename_to_jde.py | recs12/b2btool | 37108c856f1be399b2b90c31ee90afa5b828481f | [
"Apache-2.0",
"MIT"
] | null | null | null | b2btool/rename/rename_to_jde.py | recs12/b2btool | 37108c856f1be399b2b90c31ee90afa5b828481f | [
"Apache-2.0",
"MIT"
] | null | null | null | b2btool/rename/rename_to_jde.py | recs12/b2btool | 37108c856f1be399b2b90c31ee90afa5b828481f | [
"Apache-2.0",
"MIT"
] | null | null | null | import os
import shutil
import pandas as pd
from pathlib import Path
from b2btool.blob.storage import *
try:
conversion = pd.read_excel(
r"J:\PTCR\Users\RECS\.b2btool\drawings_to_jde.xlsx",
dtype={"ID": str, "JDE": str},
)
except FileNotFoundError as ex:
print(ex.args)
def find_jde(drawing_name, conversion):
line = conversion[conversion["drawing_number"] == drawing_name]
return line.JDE.tolist() # list of jde equivalent.
def copy_file(image, image_renamed, push=False):
if os.path.exists(image_renamed):
shutil.move(image_renamed, f"duplicate/{image_renamed}")
print(f"[!]: {image_renamed} -> duplicate")
else:
shutil.copyfile(image, image_renamed)
print(f"[Copy] {image} \t->\t {image_renamed}")
if push:
push_to_blob_unique_image(image_renamed)
def rename_to_drawing_number(image_jpg, push):
"""Rename drawings-name >>> jde-name
"""
image_name = Path(image_jpg).stem
# example of renaming pictures names: PT0019300.jpg -> 340247.jpg
# create a folder for duplicate jde pictures.
os.makedirs("duplicate", exist_ok=True)
if (
os.path.exists(image_jpg) and image_name in conversion["drawing_number"].tolist()
):
jde_equivalences = find_jde(image_name, conversion)
if len(jde_equivalences) == 1:
# single jde for one drawing
jde_equivalent_jpg = f"{jde_equivalences[0]}.jpg"
copy_file(image_jpg, jde_equivalent_jpg, push)
elif len(jde_equivalences) > 1:
# Multiple jde's for one drawing
for jde in jde_equivalences:
jde_jpg = f"{jde}.jpg"
copy_file(image_jpg, jde_jpg, push)
os.remove(image_jpg)
print(f"[Deleted]: {image_jpg}")
else:
# for empty list simply ignore.
pass
else:
# it's a jde part image.
if push:
push_to_blob_unique_image(image_jpg)
pass | 29.691176 | 89 | 0.630015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.280337 |
f6bbb4bc704bc44c6e1985c37232af967115f61d | 1,849 | py | Python | nlp/extract/util/nlp_process.py | anndawn/ccij-water-search | 0ec153f3f9eaa0e80184f4ddc9b3bd7f2e29c2ba | [
"Apache-2.0"
] | null | null | null | nlp/extract/util/nlp_process.py | anndawn/ccij-water-search | 0ec153f3f9eaa0e80184f4ddc9b3bd7f2e29c2ba | [
"Apache-2.0"
] | null | null | null | nlp/extract/util/nlp_process.py | anndawn/ccij-water-search | 0ec153f3f9eaa0e80184f4ddc9b3bd7f2e29c2ba | [
"Apache-2.0"
] | 1 | 2020-11-05T07:04:59.000Z | 2020-11-05T07:04:59.000Z | import spacy
import pandas as pd
import numpy as np
# Given tokens, find sentences containing a keyword from texts
def find_sents(tokens,keyword):
useful_sents=[]
for sent in tokens.sents:
bows=[token.text for token in sent]
for word in bows:
if ((keyword in word.lower()) & (sent not in useful_sents)):
useful_sents.append(sent)
return useful_sents
# Given tokens and a keyword, get the dependency of this keyword in sentences
def get_dependency(tokens,keyword):
dependency={'lefts':[],'rights':[],'head':[]}
for token in tokens:
if token.text==keyword:
if len([t.text for t in token.lefts])>0:
dependency['lefts'].append([t.text for t in token.lefts])
if len([t.text for t in token.rights])>0:
dependency['rights'].append([t.text for t in token.rights])
dependency['head'].append(token.head.text)
return dependency
# Given a df of news articles with texts,
# Get tokens with nlp
# Get sentences with a certain keyword in the text
# Get dependency of the keyword in the sentences containing the keyword
def process(df,keyword,txt_col):
nlp = spacy.load('en_core_web_sm')
# nlp processing get token from text
df=df[~df[txt_col].isna()]
df.loc[:,'nlp_t']=df.loc[:,txt_col].apply(lambda i: nlp(i))
# Write sentences with keyword in the keyword column
df.loc[:,keyword]=df.loc[:,'nlp_t'].apply(lambda x: find_sents(x,keyword))
# if there is no sentence containing the keyword in the article, return nan
df.loc[df[keyword].apply(lambda x: len(x)==0),keyword]=np.nan
# Write dependence component in the dependency column
df.loc[:,'dependence']=df.loc[:,'nlp_t'].apply(lambda i: get_dependency(i,keyword))
df.reset_index(inplace=True,drop=True)
return df | 38.520833 | 87 | 0.66901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.341266 |
f6bccbdedb65751e2dbdc99d5d3f99b99bf35188 | 2,608 | py | Python | recipes/onedpl/all/conanfile.py | dvirtz/conan-center-index | 2e7a6337804325616f8d97e3a5b6f66cc72699cb | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/onedpl/all/conanfile.py | dvirtz/conan-center-index | 2e7a6337804325616f8d97e3a5b6f66cc72699cb | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/onedpl/all/conanfile.py | dvirtz/conan-center-index | 2e7a6337804325616f8d97e3a5b6f66cc72699cb | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | import os
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.28.0"
class OneDplConan(ConanFile):
name = "onedpl"
description = ("OneDPL (Formerly Parallel STL) is an implementation of "
"the C++ standard library algorithms"
"with support for execution policies, as specified in "
"ISO/IEC 14882:2017 standard, commonly called C++17")
license = ("Apache-2.0", "LLVM-exception")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/oneapi-src/oneDPL"
topics = ("stl", "parallelism")
settings = "os", "arch", "build_type", "compiler"
options = {"backend": ["tbb", "serial"]}
default_options = {"backend": "tbb"}
generators = ["cmake", "cmake_find_package"]
exports = ["CMakeLists.txt"]
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def configure(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 11)
def requirements(self):
if self.options.backend == "tbb":
self.requires("tbb/2020.2")
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("oneDPL-" + self.version, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["PARALLELSTL_BACKEND"] = self.options.backend
cmake.configure()
return cmake
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("*", src=os.path.join(self._source_subfolder, "stdlib"), dst=os.path.join("lib", "stdlib"))
self.copy("LICENSE.txt", src=self._source_subfolder, dst="licenses")
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "ParallelSTL"
self.cpp_info.filenames["cmake_find_package_multi"] = "ParallelSTL"
self.cpp_info.names["cmake_find_package"] = "pstl"
self.cpp_info.names["cmake_find_package_multi"] = "pstl"
self.cpp_info.components["_onedpl"].names["cmake_find_package"] = "ParallelSTL"
self.cpp_info.components["_onedpl"].names["cmake_find_package_multi"] = "ParallelSTL"
self.cpp_info.components["_onedpl"].includedirs = ["include", os.path.join("lib", "stdlib")]
if self.options.backend == "tbb":
self.cpp_info.components["_onedpl"].requires = ["tbb::tbb"]
| 39.515152 | 109 | 0.643788 | 2,515 | 0.96434 | 0 | 0 | 76 | 0.029141 | 0 | 0 | 870 | 0.333589 |
f6bd32f46d54d98cc09632370c1ad80e2609554b | 5,521 | py | Python | atvpolimorfismo.py | Patricia-Silva1/atividade | f294c52a8156a5b69972d0190deedccab6c5df2f | [
"MIT"
] | null | null | null | atvpolimorfismo.py | Patricia-Silva1/atividade | f294c52a8156a5b69972d0190deedccab6c5df2f | [
"MIT"
] | null | null | null | atvpolimorfismo.py | Patricia-Silva1/atividade | f294c52a8156a5b69972d0190deedccab6c5df2f | [
"MIT"
] | null | null | null | class Atletas:
def _init_(self,nome,idade,pontuacao):
self.nome = nome
self.idade = idade
self.pontuacao = pontuacao
class Amador(Atletas):
def _init_(self, nome, idade, pontuacao):
super()._init_(nome, idade, pontuacao)
self.amador = True
self.profissional = False
self.lenda = False
class Profissional(Atletas):
def _init_(self, nome, idade, pontuacao):
super()._init_(nome, idade, pontuacao)
self.amador = True
self.profissional = True
self.lenda = False
class Lenda(Atletas):
def _init_(self, nome, idade, pontuacao):
super()._init_(nome, idade, pontuacao)
self.amador = True
self.profissional = True
self.lenda = True
class Patrocinadores:
def _init_(self,nome,valor):
self.nome = nome
self.valor = valor
class Campeonato:
def _init_(self,nome,local,premiacao,patrocinadores,atletas):
self.nome = nome
self.local = local
self.premiacao = premiacao
self.patrocinadores = patrocinadores
self.atletas = atletas
def adicionar_atletas(self,*novo_atleta):
for atleta in novo_atleta:
self.atletas.append(atleta)
def adicionar_patrocinador(self,*novo_patrocionio):
for empresa in novo_patrocionio:
self.patrocinadores.append(empresa)
def vencedor(self,nome_vencedor):
for atleta in self.atletas:
if nome_vencedor == atleta.nome:
atleta.pontuacao += 0
print('O atleta {} ficou com {} pontos'.format(nome_vencedor))
class CircuitoAmador(Campeonato):
def _init_(self, nome, local, premiacao, patrocinadores, atletas):
super()._init_(nome, local, premiacao, patrocinadores, atletas)
def adicionar_patrocinador(self, *novo_patrocionio):
return super().adicionar_patrocinador(*novo_patrocionio)
def adicionar_atletas(self, *novo_atleta):
return super().adicionar_atletas(*novo_atleta)
def vencedor(self,nome_vencedor):
for atleta in self.atletas:
if nome_vencedor == atleta.nome:
atleta.pontuacao += 10
print('O atleta {} ficou com {} pontos'.format(nome_vencedor,atleta.pontuacao))
class CircuitoProfissional(Campeonato):
def _init_(self, nome, local, premiacao, patrocinadores, atletas):
super()._init_(nome, local, premiacao, patrocinadores, atletas)
def adicionar_patrocinador(self, *novo_patrocionio):
return super().adicionar_patrocinador(*novo_patrocionio)
def adicionar_atletas(self, *novo_atleta):
for atleta in novo_atleta:
if atleta.profissional == True or atleta.lenda == True:
self.atletas.append(atleta)
else:
return print('Está categoria não pode fazer parte deste circuito.')
def vencedor(self,nome_vencedor):
for atleta in self.atletas:
if nome_vencedor == atleta.nome:
atleta.pontuacao += 50
print('O atleta {} ficou com {} pontos'.format(nome_vencedor,atleta.pontuacao))
class CircuitoAmador(Campeonato):
def _init_(self, nome, local, premiacao, patrocinadores, atletas):
super()._init_(nome, local, premiacao, patrocinadores, atletas)
def adicionar_patrocinador(self, *novo_patrocionio):
return super().adicionar_patrocinador(*novo_patrocionio)
def adicionar_atletas(self, *novo_atleta):
return super().adicionar_atletas(*novo_atleta)
def vencedor(self,nome_vencedor):
for atleta in self.atletas:
if nome_vencedor == atleta.nome:
atleta.pontuacao += 10
print('O atleta {} ficou com {} pontos'.format(nome_vencedor,atleta.pontuacao))
class CircuitoProfissional(Campeonato):
def _init_(self, nome, local, premiacao, patrocinadores, atletas):
super()._init_(nome, local, premiacao, patrocinadores, atletas)
def adicionar_patrocinador(self, *novo_patrocionio):
return super().adicionar_patrocinador(*novo_patrocionio)
def adicionar_atletas(self, *novo_atleta):
for atleta in novo_atleta:
if atleta.profissional == True or atleta.lenda == True:
self.atletas.append(atleta)
else:
return print('Está categoria não pode fazer parte deste circuito.')
def vencedor(self,nome_vencedor):
for atleta in self.atletas:
if nome_vencedor == atleta.nome:
atleta.pontuacao += 50
print('O atleta {} ficou com {} pontos'.format(nome_vencedor,atleta.pontuacao))
class CircuitoLenda(Campeonato):
def _init_(self, nome, local, premiacao, patrocinadores, atletas):
super()._init_(nome, local, premiacao, patrocinadores, atletas)
def adicionar_patrocinador(self, *novo_patrocionio):
return super().adicionar_patrocinador(*novo_patrocionio)
def adicionar_atletas(self, *novo_atleta):
for atleta in novo_atleta:
if atleta.lenda == True:
self.atletas.append(atleta)
else:
return print('Está categoria não pode fazer parte deste circuito.')
def vencedor(self,nome_vencedor):
for atleta in self.atletas:
if nome_vencedor == atleta.nome:
atleta.pontuacao += 100
print('O atleta {} ficou com {} pontos'.format(nome_vencedor,atleta.pontuacao))
| 34.080247 | 106 | 0.649158 | 5,485 | 0.992401 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.065678 |
f6be3f33c5cbe03b07efac65c5537f50b908734b | 573 | py | Python | grouper/ctl/base.py | aneeq009/merou | 7a87b43aaf64244932fa460842132a2d9329e704 | [
"Apache-2.0"
] | 58 | 2017-05-26T06:46:24.000Z | 2022-03-25T20:55:51.000Z | grouper/ctl/base.py | aneeq009/merou | 7a87b43aaf64244932fa460842132a2d9329e704 | [
"Apache-2.0"
] | 74 | 2017-06-16T17:48:37.000Z | 2022-03-28T23:09:54.000Z | grouper/ctl/base.py | aneeq009/merou | 7a87b43aaf64244932fa460842132a2d9329e704 | [
"Apache-2.0"
] | 43 | 2017-05-20T22:11:51.000Z | 2022-03-25T00:24:56.000Z | from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import ArgumentParser, Namespace
class CtlCommand(metaclass=ABCMeta):
"""Implements a subcommand of grouper-ctl."""
@staticmethod
@abstractmethod
def add_arguments(parser):
# type: (ArgumentParser) -> None
"""Add the arguments for this command to the provided parser."""
pass
@abstractmethod
def run(self, args):
# type: (Namespace) -> None
"""Run a command with some arguments."""
pass
| 24.913043 | 72 | 0.666667 | 427 | 0.745201 | 0 | 0 | 329 | 0.574171 | 0 | 0 | 208 | 0.363002 |
f6c144a6777f88e0431ff780adf19960dca5e7ab | 6,798 | py | Python | noysim/geo.py | bertdecoensel/noysim | 0f45958093e3db453db8be6edc54f4cb1e119be0 | [
"MIT"
] | 1 | 2016-06-12T08:27:58.000Z | 2016-06-12T08:27:58.000Z | noysim/geo.py | bertdecoensel/noysim | 0f45958093e3db453db8be6edc54f4cb1e119be0 | [
"MIT"
] | null | null | null | noysim/geo.py | bertdecoensel/noysim | 0f45958093e3db453db8be6edc54f4cb1e119be0 | [
"MIT"
] | 1 | 2019-01-16T13:16:56.000Z | 2019-01-16T13:16:56.000Z | # Noysim -- Noise simulation tools for Aimsun.
# Copyright (c) 2010-2011 by Bert De Coensel, Ghent University & Griffith University.
#
# Basic geometry functions and classes
import numpy
import pylab
EPSILON = 10e-12 # smallest difference for points/directions
#---------------------------------------------------------------------------------------------------
# Convenience functions
#---------------------------------------------------------------------------------------------------
def parse_coordinates(*args):
""" parse 2D/3D coordinates x,y(,z) in a variety of fashions, and return a 3-element tuple """
n = len(args)
if n == 0:
return (0.0,0.0,0.0)
if n == 1:
try: # try if a Point object is supplied
return args[0].coordinates()
except:
if type(args[0]) in (tuple,list):
# coordinates supplied as a tuple (x,y) or (x,y,z)
if len(args[0]) == 2:
return (args[0][0], args[0][1], 0.0)
if len(args[0]) == 3:
return (args[0][0], args[0][1], args[0][2])
if type(args[0]) is str:
# coordinates supplied as a string '(x,y,z)'
c = args[0].strip('()').split(',')
return (float(c[0]), float(c[1]), float(c[2]))
else:
# coordinates supplied as separate arguments x,y or x,y,z
if n == 2:
return (args[0], args[1], 0.0)
if n == 3:
return (args[0], args[1], args[2])
raise Exception('unable to parse coordinates: ' + str(args))
def asPoint(p):
""" create a point object from 2D/3D coordinates """
if isinstance(p, Point):
return p
else:
return Point(p)
def asDirection(d):
""" create a direction object from a tuple (bearing, gradient) """
if isinstance(d, Direction):
return d
else:
return Direction(bearing = d[0], gradient = d[1])
#---------------------------------------------------------------------------------------------------
# Point class
#---------------------------------------------------------------------------------------------------
class Point(object):
""" basic 3D point class """
def __init__(self, *xyz):
object.__init__(self)
self.x, self.y, self.z = parse_coordinates(*xyz)
def copy(self):
""" return a copy """
return Point(self.x, self.y, self.z)
def coordinates(self):
""" return the coordinates as a tuple (x,y,z) """
return (self.x, self.y, self.z)
def __getitem__(self, key):
""" implement list style access to coordinates: p[0], p[1], p[2] """
return self.coordinates()[key]
def __str__(self):
""" string representation of a point """
return '(%.2f,%.2f,%.2f)' % self.coordinates()
def middle(self, other):
""" return the middle point between self and another point """
return Point((self.x + other.x)/2.0, (self.y + other.y)/2.0, (self.z + other.z)/2.0)
def distanceSquared(self, other):
""" return the squared distance to another point """
return (self.x - other.x)**2 + (self.y - other.y)**2 + (self.z - other.z)**2
def distance(self, other):
""" return the distance to another point """
return numpy.sqrt(self.distanceSquared(other))
def distanceXY(self, other):
""" return the distance to another point, both projected to the xy-plane """
return numpy.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)
def __eq__(self, other):
""" check if points coincide """
if other == None:
return False
return (self.distance(other) < EPSILON)
def __ne__(self, other):
""" check if points do not coincide """
return not self.__eq__(other)
def __cmp__(self, other):
""" compare the coordinates, first x, then y, then z """
if self.x == other.x:
if (self.y == other.y):
return (self.z < other.z)
else:
return (self.y < other.y)
else:
return (self.x < other.x)
def projectXY(self, z = 0.0):
""" return the projection of the point on the xy-plane """
return Point(self.x, self.y, z)
def transform(self, func):
""" perform a coordinate transformation with the given function (x,y,z) to (x',y',z') """
self.x, self.y, self.z = func((self.x, self.y, self.z))
def plot(self, color = 'black', size = 5):
""" plot the point in the xy-plane """
pylab.plot([self.x], [self.y], color = color, linestyle = 'None', marker = '.', markersize = size)
#---------------------------------------------------------------------------------------------------
# Direction class
#---------------------------------------------------------------------------------------------------
class Direction(object):
""" basic geometrical 3D direction class """
def __init__(self, bearing, gradient = 0.0):
object.__init__(self)
# both bearing and gradient are stored in degrees
self.bearing = bearing
self.gradient = gradient
def copy(self):
""" return a copy """
return Direction(self.bearing, self.gradient)
def __getitem__(self, key):
""" implement list style access to bearing and gradient """
return (self.bearing, self.gradient)[key]
def bearingRadians(self):
""" return the bearing (horizontal angle with the x-axis) in radians """
return numpy.radians(self.bearing)
def gradientRadians(self):
""" return the gradient (vertical angle with the xy-plane) in radians """
return numpy.radians(self.gradient)
def __str__(self):
""" return a string representation of the direction """
return '[%.2f,%.2f]' % (self.bearing, self.gradient)
def __eq__(self, other):
""" check if directions coincide """
if other == None:
return False
db = abs(self.bearing - other.bearing)
dg = abs(self.gradient - other.gradient)
return (db <= EPSILON) and (dg <= EPSILON)
def __ne__(self, other):
""" check if directions do not coincide """
return not self.__eq__(other)
def directionFromTo(p1, p2):
""" returns the direction from point 1 to point 2 """
(dx, dy, dz) = (p2.x - p1.x, p2.y - p1.y, p2.z - p1.z)
siz = p1.distance(p2)
return Direction(bearing = numpy.degrees(numpy.arctan2(dy, dx)), gradient = numpy.degrees(numpy.arcsin(dz/siz)))
#---------------------------------------------------------------------------------------------------
# Test code
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
points = []
points.append(Point(1.2, 3.4))
points.append(Point([5.6, 7.8, 9.0]))
points.append(Point('(7.8, 9.0, 1.2)'))
pylab.figure()
for p in points:
p.plot()
try:
pylab.show()
except:
pass
| 32.84058 | 115 | 0.529126 | 3,703 | 0.544719 | 0 | 0 | 0 | 0 | 0 | 0 | 2,866 | 0.421595 |
f6c1d658392020e0d6f57aed9f80cf163700273d | 197 | py | Python | aa.py | ani37/Hello-world | db3998f9ce110b145c48eda401c607acc726cd2c | [
"MIT"
] | null | null | null | aa.py | ani37/Hello-world | db3998f9ce110b145c48eda401c607acc726cd2c | [
"MIT"
] | null | null | null | aa.py | ani37/Hello-world | db3998f9ce110b145c48eda401c607acc726cd2c | [
"MIT"
] | 1 | 2018-10-27T10:14:24.000Z | 2018-10-27T10:14:24.000Z | MAX = 100000
MOD = pwo(10, 9) + 7
dp = [1 for in xrange(MAX)]
for i in xrange(7, MAX):
dp[i] = (dp[i - 1] + dp[i - 7]) % MOD
n = input()
for i in xrange(n):
k = input()
print dp[k]
| 16.416667 | 39 | 0.502538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f6c25e843d1f45408a584bccb97be735f0255735 | 17,651 | py | Python | pyrometheus/codegen/python.py | pyrometheus/pyrometheus | 9cfb6fb404173c132d1a92439cce0e4ed10d1406 | [
"MIT"
] | 7 | 2021-12-29T18:17:49.000Z | 2022-01-10T09:53:09.000Z | pyrometheus/codegen/python.py | pyrometheus/pyrometheus | 9cfb6fb404173c132d1a92439cce0e4ed10d1406 | [
"MIT"
] | 3 | 2022-02-06T01:22:09.000Z | 2022-03-16T15:20:16.000Z | pyrometheus/codegen/python.py | pyrometheus/pyrometheus | 9cfb6fb404173c132d1a92439cce0e4ed10d1406 | [
"MIT"
] | null | null | null | """
Python code generation
----------------------
.. autofunction:: gen_thermochem_code
.. autofunction:: get_thermochem_class
"""
__copyright__ = """
Copyright (C) 2020 Esteban Cisneros
Copyright (C) 2020 Andreas Kloeckner
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from numbers import Number
import pymbolic.primitives as p
from pymbolic.mapper.stringifier import StringifyMapper, PREC_NONE, PREC_CALL
import cantera as ct
import numpy as np # noqa: F401
from mako.template import Template
import pyrometheus.chem_expr
file_extension = "py"
# {{{ code generation helpers
class CodeGenerationMapper(StringifyMapper):
def map_constant(self, expr, enclosing_prec):
return repr(expr)
OP_NAMES = {
">=": "greater_equal",
">": "greater",
"==": "equal",
"!=": "not_equal",
"<=": "less_equal",
"<": "less",
}
def map_comparison(self, expr, enclosing_prec, *args, **kwargs):
return (f"self.usr_np.{self.OP_NAMES[expr.operator]}"
f"({self.rec(expr.left, PREC_NONE, *args, **kwargs)}, "
f"{self.rec(expr.right, PREC_NONE, *args, **kwargs)})")
def map_if(self, expr, enclosing_prec, *args, **kwargs):
return "self.usr_np.where(%s, %s, %s)" % (
self.rec(expr.condition, PREC_NONE, *args, **kwargs),
self.rec(expr.then, PREC_NONE, *args, **kwargs),
self.rec(expr.else_, PREC_NONE, *args, **kwargs),
)
def map_call(self, expr, enclosing_prec, *args, **kwargs):
return self.format(
"self.usr_np.%s(%s)",
self.rec(expr.function, PREC_CALL, *args, **kwargs),
self.join_rec(", ", expr.parameters, PREC_NONE, *args, **kwargs),
)
def str_np_inner(ary):
if isinstance(ary, Number):
return repr(ary)
elif ary.shape:
return "[%s]" % (", ".join(str_np_inner(ary_i) for ary_i in ary))
raise TypeError("invalid argument to str_np_inner")
def str_np(ary):
return "np.array(%s)" % str_np_inner(ary)
# }}}
# {{{ main code template
code_tpl = Template(
"""\"""
.. autoclass:: Thermochemistry
\"""
import numpy as np
class Thermochemistry:
\"""
.. attribute:: model_name
.. attribute:: num_elements
.. attribute:: num_species
.. attribute:: num_reactions
.. attribute:: num_falloff
.. attribute:: one_atm
Returns 1 atm in SI units of pressure (Pa).
.. attribute:: gas_constant
.. attribute:: species_names
.. attribute:: species_indices
.. automethod:: get_specific_gas_constant
.. automethod:: get_density
.. automethod:: get_pressure
.. automethod:: get_mix_molecular_weight
.. automethod:: get_concentrations
.. automethod:: get_mixture_specific_heat_cp_mass
.. automethod:: get_mixture_specific_heat_cv_mass
.. automethod:: get_mixture_enthalpy_mass
.. automethod:: get_mixture_internal_energy_mass
.. automethod:: get_species_specific_heats_r
.. automethod:: get_species_enthalpies_rt
.. automethod:: get_species_entropies_r
.. automethod:: get_species_gibbs_rt
.. automethod:: get_equilibrium_constants
.. automethod:: get_temperature
.. automethod:: __init__
\"""
def __init__(self, usr_np=np):
\"""Initialize thermochemistry object for a mechanism.
Parameters
----------
usr_np
:mod:`numpy`-like namespace providing at least the following functions,
for any array ``X`` of the bulk array type:
- ``usr_np.log(X)`` (like :data:`numpy.log`)
- ``usr_np.log10(X)`` (like :data:`numpy.log10`)
- ``usr_np.exp(X)`` (like :data:`numpy.exp`)
- ``usr_np.where(X > 0, X_yes, X_no)`` (like :func:`numpy.where`)
- ``usr_np.linalg.norm(X, np.inf)`` (like :func:`numpy.linalg.norm`)
where the "bulk array type" is a type that offers arithmetic analogous
to :class:`numpy.ndarray` and is used to hold all types of (potentialy
volumetric) "bulk data", such as temperature, pressure, mass fractions,
etc. This parameter defaults to *actual numpy*, so it can be ignored
unless it is needed by the user (e.g. for purposes of
GPU processing or automatic differentiation).
\"""
self.usr_np = usr_np
self.model_name = ${repr(sol.source)}
self.num_elements = ${sol.n_elements}
self.num_species = ${sol.n_species}
self.num_reactions = ${sol.n_reactions}
self.num_falloff = ${
sum(1 if isinstance(r, ct.FalloffReaction) else 0
for r in sol.reactions())}
self.one_atm = ${ct.one_atm}
self.gas_constant = ${ct.gas_constant}
self.big_number = 1.0e300
self.species_names = ${sol.species_names}
self.species_indices = ${
dict([[sol.species_name(i), i]
for i in range(sol.n_species)])}
self.wts = ${str_np(sol.molecular_weights)}
self.iwts = 1/self.wts
def _pyro_zeros_like(self, argument):
# FIXME: This is imperfect, as a NaN will stay a NaN.
return 0 * argument
def _pyro_make_array(self, res_list):
\"""This works around (e.g.) numpy.exp not working with object
arrays of numpy scalars. It defaults to making object arrays, however
if an array consists of all scalars, it makes a "plain old"
:class:`numpy.ndarray`.
See ``this numpy bug <https://github.com/numpy/numpy/issues/18004>`__
for more context.
\"""
from numbers import Number
all_numbers = all(isinstance(e, Number) for e in res_list)
dtype = np.float64 if all_numbers else object
result = np.empty((len(res_list),), dtype=dtype)
# 'result[:] = res_list' may look tempting, however:
# https://github.com/numpy/numpy/issues/16564
for idx in range(len(res_list)):
result[idx] = res_list[idx]
return result
def _pyro_norm(self, argument, normord):
\"""This works around numpy.linalg norm not working with scalars.
If the argument is a regular ole number, it uses :func:`numpy.abs`,
otherwise it uses ``usr_np.linalg.norm``.
\"""
# Wrap norm for scalars
from numbers import Number
if isinstance(argument, Number):
return np.abs(argument)
return self.usr_np.linalg.norm(argument, normord)
def species_name(self, species_index):
return self.species_name[species_index]
def species_index(self, species_name):
return self.species_indices[species_name]
def get_specific_gas_constant(self, mass_fractions):
return self.gas_constant * (
%for i in range(sol.n_species):
+ self.iwts[${i}]*mass_fractions[${i}]
%endfor
)
def get_density(self, p, temperature, mass_fractions):
mmw = self.get_mix_molecular_weight(mass_fractions)
rt = self.gas_constant * temperature
return p * mmw / rt
def get_pressure(self, rho, temperature, mass_fractions):
mmw = self.get_mix_molecular_weight(mass_fractions)
rt = self.gas_constant * temperature
return rho * rt / mmw
def get_mix_molecular_weight(self, mass_fractions):
return 1/(
%for i in range(sol.n_species):
+ self.iwts[${i}]*mass_fractions[${i}]
%endfor
)
def get_concentrations(self, rho, mass_fractions):
return self.iwts * rho * mass_fractions
def get_mass_average_property(self, mass_fractions, spec_property):
return sum([mass_fractions[i] * spec_property[i] * self.iwts[i]
for i in range(self.num_species)])
def get_mixture_specific_heat_cp_mass(self, temperature, mass_fractions):
cp0_r = self.get_species_specific_heats_r(temperature)
cpmix = self.get_mass_average_property(mass_fractions, cp0_r)
return self.gas_constant * cpmix
def get_mixture_specific_heat_cv_mass(self, temperature, mass_fractions):
cp0_r = self.get_species_specific_heats_r(temperature) - 1.0
cpmix = self.get_mass_average_property(mass_fractions, cp0_r)
return self.gas_constant * cpmix
def get_mixture_enthalpy_mass(self, temperature, mass_fractions):
h0_rt = self.get_species_enthalpies_rt(temperature)
hmix = self.get_mass_average_property(mass_fractions, h0_rt)
return self.gas_constant * temperature * hmix
def get_mixture_internal_energy_mass(self, temperature, mass_fractions):
e0_rt = self.get_species_enthalpies_rt(temperature) - 1.0
emix = self.get_mass_average_property(mass_fractions, e0_rt)
return self.gas_constant * temperature * emix
def get_species_specific_heats_r(self, temperature):
return self._pyro_make_array([
% for sp in sol.species():
${cgm(ce.poly_to_expr(sp.thermo, "temperature"))},
% endfor
])
def get_species_enthalpies_rt(self, temperature):
return self._pyro_make_array([
% for sp in sol.species():
${cgm(ce.poly_to_enthalpy_expr(sp.thermo, "temperature"))},
% endfor
])
def get_species_entropies_r(self, temperature):
return self._pyro_make_array([
% for sp in sol.species():
${cgm(ce.poly_to_entropy_expr(sp.thermo, "temperature"))},
% endfor
])
def get_species_gibbs_rt(self, temperature):
h0_rt = self.get_species_enthalpies_rt(temperature)
s0_r = self.get_species_entropies_r(temperature)
return h0_rt - s0_r
def get_equilibrium_constants(self, temperature):
rt = self.gas_constant * temperature
c0 = self.usr_np.log(self.one_atm / rt)
g0_rt = self.get_species_gibbs_rt(temperature)
return self._pyro_make_array([
%for i, react in enumerate(sol.reactions()):
%if react.reversible:
${cgm(ce.equilibrium_constants_expr(
sol, i, Variable("g0_rt")))},
%else:
-0.17364695002734*temperature,
%endif
%endfor
])
def get_temperature(self, enthalpy_or_energy, t_guess, y, do_energy=False):
if do_energy is False:
pv_fun = self.get_mixture_specific_heat_cp_mass
he_fun = self.get_mixture_enthalpy_mass
else:
pv_fun = self.get_mixture_specific_heat_cv_mass
he_fun = self.get_mixture_internal_energy_mass
num_iter = 500
tol = 1.0e-6
ones = self._pyro_zeros_like(enthalpy_or_energy) + 1.0
t_i = t_guess * ones
for _ in range(num_iter):
f = enthalpy_or_energy - he_fun(t_i, y)
j = -pv_fun(t_i, y)
dt = -f / j
t_i += dt
if self._pyro_norm(dt, np.inf) < tol:
return t_i
raise RuntimeError("Temperature iteration failed to converge")
%if falloff_reactions:
def get_falloff_rates(self, temperature, concentrations, k_fwd):
ones = self._pyro_zeros_like(temperature) + 1.0
k_high = self._pyro_make_array([
%for _, react in falloff_reactions:
%if react.uses_legacy:
${cgm(ce.rate_coefficient_expr(
react.high_rate, Variable("temperature")))},
%else:
${cgm(ce.rate_coefficient_expr(
react.rate.high_rate, Variable("temperature")))},
%endif
%endfor
])
k_low = self._pyro_make_array([
%for _, react in falloff_reactions:
%if react.uses_legacy:
${cgm(ce.rate_coefficient_expr(
react.low_rate, Variable("temperature")))},
%else:
${cgm(ce.rate_coefficient_expr(
react.rate.low_rate, Variable("temperature")))},
%endif
%endfor
])
reduced_pressure = self._pyro_make_array([
%for i, (_, react) in enumerate(falloff_reactions):
(${cgm(ce.third_body_efficiencies_expr(
sol, react, Variable("concentrations")))})*k_low[${i}]/k_high[${i}],
%endfor
])
falloff_center = self._pyro_make_array([
%for _, react in falloff_reactions:
${cgm(ce.troe_falloff_expr(react, Variable("temperature")))},
%endfor
])
falloff_function = self._pyro_make_array([
%for i, (_, react) in enumerate(falloff_reactions):
${cgm(ce.falloff_function_expr(
react, i, Variable("temperature"), Variable("reduced_pressure"),
Variable("falloff_center")))},
%endfor
])*reduced_pressure/(1+reduced_pressure)
%for j, (i, react) in enumerate(falloff_reactions):
k_fwd[${i}] = k_high[${j}]*falloff_function[${j}]*ones
%endfor
return
%endif
def get_fwd_rate_coefficients(self, temperature, concentrations):
ones = self._pyro_zeros_like(temperature) + 1.0
k_fwd = [
%for react in sol.reactions():
%if isinstance(react, ct.FalloffReaction):
0*temperature,
%else:
${cgm(ce.rate_coefficient_expr(react.rate,
Variable("temperature")))} * ones,
%endif
%endfor
]
%if falloff_reactions:
self.get_falloff_rates(temperature, concentrations, k_fwd)
%endif
%for i, react in three_body_reactions:
k_fwd[${i}] *= (${cgm(ce.third_body_efficiencies_expr(
sol, react, Variable("concentrations")))})
%endfor
return self._pyro_make_array(k_fwd)
def get_net_rates_of_progress(self, temperature, concentrations):
k_fwd = self.get_fwd_rate_coefficients(temperature, concentrations)
log_k_eq = self.get_equilibrium_constants(temperature)
return self._pyro_make_array([
%for i in range(sol.n_reactions):
${cgm(ce.rate_of_progress_expr(sol, i,
Variable("concentrations"),
Variable("k_fwd"), Variable("log_k_eq")))},
%endfor
])
def get_net_production_rates(self, rho, temperature, mass_fractions):
c = self.get_concentrations(rho, mass_fractions)
r_net = self.get_net_rates_of_progress(temperature, c)
ones = self._pyro_zeros_like(r_net[0]) + 1.0
return self._pyro_make_array([
%for sp in sol.species():
${cgm(ce.production_rate_expr(
sol, sp.name, Variable("r_net")))} * ones,
%endfor
])""", strict_undefined=True)
# }}}
def gen_thermochem_code(sol: ct.Solution) -> str:
"""For the mechanism given by *sol*, return Python source code for a class conforming
to a module containing a class called ``Thermochemistry`` adhering to the
:class:`~pyrometheus.thermochem_example.Thermochemistry` interface.
"""
return code_tpl.render(
ct=ct,
sol=sol,
str_np=str_np,
cgm=CodeGenerationMapper(),
Variable=p.Variable,
ce=pyrometheus.chem_expr,
falloff_reactions=[(i, react) for i, react in enumerate(sol.reactions())
if isinstance(react, ct.FalloffReaction)],
three_body_reactions=[(i, react) for i, react in enumerate(sol.reactions())
if isinstance(react, ct.ThreeBodyReaction)],
)
def compile_class(code_str, class_name="Thermochemistry"):
exec_dict = {}
exec(compile(code_str, "<generated code>", "exec"), exec_dict)
exec_dict["_MODULE_SOURCE_CODE"] = code_str
return exec_dict[class_name]
def get_thermochem_class(sol: ct.Solution):
"""For the mechanism given by *sol*, return a class conforming to the
:class:`~pyrometheus.thermochem_example.Thermochemistry` interface.
"""
return compile_class(gen_thermochem_code(sol))
def cti_to_mech_file(cti_file_name, mech_file_name):
"""Write python file for mechanism specified by CTI file."""
with open(mech_file_name, "w") as outf:
code = gen_thermochem_code(ct.Solution(cti_file_name, "gas"))
print(code, file=outf)
# vim: foldmethod=marker
| 35.372745 | 89 | 0.622798 | 1,189 | 0.067362 | 0 | 0 | 0 | 0 | 0 | 0 | 15,121 | 0.856665 |
f6c294247659dfe928a49c1fe3981150196bea4a | 1,107 | py | Python | foxwall_api/urls.py | umtdemr/foxwall | 7854806774486bed1895c114f14398cacc90449c | [
"MIT"
] | null | null | null | foxwall_api/urls.py | umtdemr/foxwall | 7854806774486bed1895c114f14398cacc90449c | [
"MIT"
] | null | null | null | foxwall_api/urls.py | umtdemr/foxwall | 7854806774486bed1895c114f14398cacc90449c | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from drf_spectacular.views import (
SpectacularAPIView,
SpectacularRedocView,
SpectacularSwaggerView
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/schema/', SpectacularAPIView.as_view(), name='schema'),
# Optional UI:
path(
'',
SpectacularSwaggerView.as_view(url_name='schema'),
name='swagger-ui'
),
path(
'redoc/',
SpectacularRedocView.as_view(url_name='schema'),
name='redoc'
),
path("user/", include("user.urls", namespace="user")),
path("follow/", include("follow.urls", namespace="follow")),
path("post/", include("post.urls", namespace="post")),
path("like/", include("like.urls", namespace="like"))
]
if settings.DEBUG:
urlpatterns += static(
settings.STATIC_URL,
document_root=settings.STATIC_ROOT,
)
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT,
)
| 26.357143 | 69 | 0.650407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.171635 |
f6c4ba56f91f92449ed57ea8aedac277928e66b9 | 3,025 | py | Python | python/dash_tools/staticdownloader.py | jainsat/media-tools | 1f457d3a84dc3e1c8a5461774ebd45bcbb2eefb4 | [
"BSD-3-Clause"
] | 1 | 2021-04-26T06:34:08.000Z | 2021-04-26T06:34:08.000Z | python/dash_tools/staticdownloader.py | jainsat/media-tools | 1f457d3a84dc3e1c8a5461774ebd45bcbb2eefb4 | [
"BSD-3-Clause"
] | null | null | null | python/dash_tools/staticdownloader.py | jainsat/media-tools | 1f457d3a84dc3e1c8a5461774ebd45bcbb2eefb4 | [
"BSD-3-Clause"
] | 1 | 2020-05-18T22:08:07.000Z | 2020-05-18T22:08:07.000Z | #!/usr/bin/env python
import os
from common import fetch_file
import staticmpdparser
import client
import json
import pdb
def download(options, mpd_url=None, mpd_str=None, base_url=None, base_dst=""):
"Download MPD if url specified and then start downloading segments."
if mpd_url:
# First download the MPD file
mpd_str, _, _ = fetch_file(mpd_url)
base_url, file_name = os.path.split(mpd_url)
mpd_parser = staticmpdparser.StaticManifestParser(mpd_str)
if options.verbose:
print str(mpd_parser.mpd)
if options.abr:
print("Starting ABR client")
client.AbrClient(mpd_parser.mpd, base_url, base_dst, options).download()
elif options.bola:
print("Starting BOLA client")
client.BolaClient(mpd_parser.mpd, base_url, base_dst, options).download()
elif options.bba0:
print("Starting BBA0 client")
client.BBAClient(mpd_parser.mpd, base_url, base_dst, options).download_bba0()
elif options.bba2:
print("Starting BBA2 client")
client.BBAClient(mpd_parser.mpd, base_url, base_dst, options).download_bba2()
elif options.pensieve:
print("Starting Pensieve client")
client.PensieveClient(mpd_parser.mpd, base_url, base_dst, options).download_pensieve()
else:
print("Starting Simple client")
client.SimpleClient(mpd_parser.mpd, base_url, base_dst).download()
def main():
"Parse command line and start the fetching."
from optparse import OptionParser
usage = "usage: %prog [options] mpdURL [dstDir]"
parser = OptionParser(usage)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
parser.add_option("-a", "--abr", dest="abr", action="store_true")
parser.add_option("-b", "--bola", dest="bola", action="store_true")
parser.add_option("-B", "--bba0", dest="bba0", action="store_true")
parser.add_option("-X", "--bba2", dest="bba2", action="store_true")
parser.add_option("-p", "--pensieve", dest="pensieve", action="store_true")
parser.add_option("-g", "--gp", dest="gp", type="float", default=5,
help = 'Specify the (gamma p) product in seconds.')
parser.add_option("-s", "--buffer_size", dest="buffer_size", type="int", default=20,
help='Specify the buffer size in seconds')
parser.add_option("-C", "--bandwidthchangerscript", dest="bandwidth_changerscript_path", type="str",
default="./trigger_bandwidth_changer.sh", help='Specify the bandwidth changer script to trigger the remote program on server that runs tc on a network trace')
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("incorrect number of arguments")
print(usage)
# MPD url can be of the form http://10.128.0.33:5000/manifest.mpd
mpd_url = args[0]
base_dst = "download"
if len(args) >= 2:
base_dst = args[1]
download(options, mpd_url, base_dst=base_dst)
if __name__ == "__main__":
main()
| 43.84058 | 180 | 0.669421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.328264 |
f6c60cbb47c6eb49ed0c025ad7108cd872063833 | 303 | py | Python | Python/Delta/Delta.py | jankupczyk/Proste-Programy-PY | 96d932a5869e71861de89422caf1329f9edcd5f3 | [
"MIT"
] | 1 | 2021-06-28T15:53:51.000Z | 2021-06-28T15:53:51.000Z | Python/Delta/Delta.py | jankupczyk/Proste-Programy-PY | 96d932a5869e71861de89422caf1329f9edcd5f3 | [
"MIT"
] | null | null | null | Python/Delta/Delta.py | jankupczyk/Proste-Programy-PY | 96d932a5869e71861de89422caf1329f9edcd5f3 | [
"MIT"
] | null | null | null | # Oblicza delte
a = int(input("Podaj [a]:"))
b = int(input("Podaj [b]:"))
c = int(input("Podaj [c]:"))
d = b**2-4*a*c
if d > 0:
print("2 rozwiązania")
elif d == 0:
print("1 rozwiązanie")
else:
print("0 rozwiązań")
for i in range():
if i != 0:
print(i, end=" ")
| 17.823529 | 29 | 0.49505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.332248 |
f6c771f2f38897991f6c87629ffcfa7268ef1620 | 142 | py | Python | mealie/db/mongo/user_models.py | stevenroh/mealie | ba111073d652a2f34aea8324d0bce4203de34127 | [
"MIT"
] | null | null | null | mealie/db/mongo/user_models.py | stevenroh/mealie | ba111073d652a2f34aea8324d0bce4203de34127 | [
"MIT"
] | 1 | 2021-01-25T18:49:10.000Z | 2021-01-25T18:49:10.000Z | mealie/db/mongo/user_models.py | stevenroh/mealie | ba111073d652a2f34aea8324d0bce4203de34127 | [
"MIT"
] | null | null | null |
# import mongoengine
# class User(mongoengine.Document):
# username: mongoengine.EmailField()
# password: mongoengine.ReferenceField | 23.666667 | 42 | 0.753521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.964789 |
f6c87781dc296b3f3f472139665bb2dcb047df30 | 2,005 | py | Python | common/xrd-ui-tests-python/tests/xroad_global_groups_tests/XroadMemberRemoveFromGlobalGroup.py | nordic-institute/X-Road-tests | e030661a0ad8ceab74dd8122b751e88025a3474a | [
"MIT"
] | 1 | 2019-02-09T00:16:54.000Z | 2019-02-09T00:16:54.000Z | common/xrd-ui-tests-python/tests/xroad_global_groups_tests/XroadMemberRemoveFromGlobalGroup.py | nordic-institute/X-Road-tests | e030661a0ad8ceab74dd8122b751e88025a3474a | [
"MIT"
] | 1 | 2018-06-06T08:33:32.000Z | 2018-06-06T08:33:32.000Z | common/xrd-ui-tests-python/tests/xroad_global_groups_tests/XroadMemberRemoveFromGlobalGroup.py | nordic-institute/X-Road-tests | e030661a0ad8ceab74dd8122b751e88025a3474a | [
"MIT"
] | 3 | 2018-07-09T08:51:00.000Z | 2020-07-23T18:40:24.000Z | import unittest
from helpers import auditchecker, xroad
from main.maincontroller import MainController
from tests.xroad_global_groups_tests import global_groups_tests
class XroadMemberRemoveFromGlobalGroup(unittest.TestCase):
"""
SERVICE_38 Remove an X-Road Member from a Global Group
RIA URL: https://jira.ria.ee/browse/XTKB-183
Depends on finishing other test(s): member add to global group
Requires helper scenarios:
X-Road version: 6.16.0
"""
def __init__(self, methodName='test_member_remove_from_global_group'):
unittest.TestCase.__init__(self, methodName)
def test_member_remove_from_global_group(self):
main = MainController(self)
cs_host = main.config.get('cs.host')
cs_user = main.config.get('cs.user')
cs_pass = main.config.get('cs.pass')
cs_ssh_host = main.config.get('cs.ssh_host')
cs_ssh_user = main.config.get('cs.ssh_user')
cs_ssh_pass = main.config.get('cs.ssh_pass')
group_name = main.config.get('cs.global_group')
log_checker = auditchecker.AuditChecker(cs_ssh_host, cs_ssh_user, cs_ssh_pass)
member_name = main.config.get('ss1.client_name')
member_code = xroad.split_xroad_id(main.config.get('ss1.client_id'))['code']
test_member_remove_from_global_group = global_groups_tests.test_member_remove_from_global_group(main, member_name,
member_code,
group_name,
log_checker=log_checker)
try:
main.reload_webdriver(cs_host, cs_user, cs_pass)
test_member_remove_from_global_group()
except:
main.save_exception_data()
raise
finally:
main.tearDown()
| 43.586957 | 128 | 0.597007 | 1,834 | 0.914713 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.200998 |
f6c989f83c09341fd2147143a2556f4bde064d93 | 6,582 | py | Python | python/GafferTest/ExtensionAlgoTest.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferTest/ExtensionAlgoTest.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferTest/ExtensionAlgoTest.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2019, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import unittest
import functools
import Gaffer
import GafferTest
class ExtensionAlgoTest( GafferTest.TestCase ) :
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.addCleanup(
functools.partial( setattr, sys, "path", sys.path[:] )
)
def testExport( self ) :
# Export
box = Gaffer.Box( "AddOne" )
box["__add"] = GafferTest.AddNode()
box["__add"]["op2"].setValue( 1 )
Gaffer.PlugAlgo.promote( box["__add"]["op1"] ).setName( "in" )
Gaffer.PlugAlgo.promote( box["__add"]["sum"] ).setName( "out" )
Gaffer.Metadata.registerValue( box, "description", "Test" )
Gaffer.Metadata.registerValue( box["in"], "description", "The input" )
Gaffer.Metadata.registerValue( box["out"], "description", "The output" )
Gaffer.Metadata.registerValue( box["in"], "test", 1 )
Gaffer.ExtensionAlgo.exportExtension( "TestExtension", [ box ], self.temporaryDirectory() )
self.assertTrue( os.path.exists( os.path.join( self.temporaryDirectory(), "python", "TestExtension" ) ) )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
# Import and test
import TestExtension
script = Gaffer.ScriptNode()
script["node"] = TestExtension.AddOne()
script["node"]["in"].setValue( 2 )
self.assertEqual( script["node"]["out"].getValue(), 3 )
import TestExtensionUI
def assertExpectedMetadata( node ) :
self.assertEqual( Gaffer.Metadata.registeredValues( node, instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( node["in"], instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( node["out"], instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.value( node, "description" ), "Test" )
self.assertEqual( Gaffer.Metadata.value( node["in"], "description" ), "The input" )
self.assertEqual( Gaffer.Metadata.value( node["out"], "description" ), "The output" )
self.assertEqual( Gaffer.Metadata.value( node["in"], "test" ), 1 )
assertExpectedMetadata( script["node"] )
# Copy/paste and test
script.execute( script.serialise( filter = Gaffer.StandardSet( { script["node"] } ) ) )
self.assertEqual( script["node1"].keys(), script["node"].keys() )
self.assertEqual( script["node1"]["out"].getValue(), script["node"]["out"].getValue() )
assertExpectedMetadata( script["node1"] )
def testPlugTypes( self ) :
box = Gaffer.Box( "PlugTypes" )
box["int"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["float"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["string"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["v2i"] = Gaffer.V2iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["v3i"] = Gaffer.V3iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["color4f"] = Gaffer.Color4fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["spline"] = Gaffer.SplinefColor3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.ExtensionAlgo.exportExtension( "PlugTypesExtension", [ box ], self.temporaryDirectory() )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
import PlugTypesExtension
node = PlugTypesExtension.PlugTypes()
for plug in Gaffer.Plug.Range( node ) :
self.assertIsInstance( plug, type( box[plug.getName() ] ) )
if hasattr( plug, "getValue" ) :
self.assertEqual( plug.getValue(), box[plug.getName()].getValue() )
for plug in Gaffer.Plug.RecursiveRange( node ) :
self.assertFalse( plug.getFlags( Gaffer.Plug.Flags.Dynamic ) )
def testInternalExpression( self ) :
box = Gaffer.Box( "AddOne" )
box["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["__expression"] = Gaffer.Expression()
box["__expression"].setExpression( """parent["out"] = parent["in"] + 1""" )
Gaffer.ExtensionAlgo.exportExtension( "TestExtensionWithExpression", [ box ], self.temporaryDirectory() )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
import TestExtensionWithExpression
script = Gaffer.ScriptNode()
script["node"] = TestExtensionWithExpression.AddOne()
script["node"]["in"].setValue( 2 )
self.assertEqual( script["node"]["out"].getValue(), 3 )
# Test copy/paste
script.execute( script.serialise( filter = Gaffer.StandardSet( { script["node"] } ) ) )
self.assertEqual( script["node1"].keys(), script["node"].keys() )
self.assertEqual( script["node1"]["out"].getValue(), 3 )
if __name__ == "__main__":
unittest.main()
| 41.1375 | 133 | 0.692343 | 4,657 | 0.707536 | 0 | 0 | 0 | 0 | 0 | 0 | 2,479 | 0.376633 |
f6c9dadb66ba06561d5aa2ae635083ad448267fc | 494 | py | Python | sentimental_analysis.py | akrakman/hackillinois | a4cfb107aefb95ff987ba159e3c7867a9d10ce9a | [
"MIT"
] | null | null | null | sentimental_analysis.py | akrakman/hackillinois | a4cfb107aefb95ff987ba159e3c7867a9d10ce9a | [
"MIT"
] | null | null | null | sentimental_analysis.py | akrakman/hackillinois | a4cfb107aefb95ff987ba159e3c7867a9d10ce9a | [
"MIT"
] | null | null | null | from numpy import average, number
from textblob import TextBlob
class ScaleUtilities:
average = 0
number = 0
def __init__(self, string, number):
self.string = string
def get_subjectivity_of(string):
polarity = TextBlob(string).sentiment.polarity * 5
number += 1
average += polarity
return polarity
def average_opinion():
if (number == 0):
print("You idiot")
exit(1)
return average / number
| 22.454545 | 58 | 0.605263 | 428 | 0.866397 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.022267 |
f6ca9a419c623fefe3a83ca6f9ace8a92f85a66b | 2,753 | py | Python | chibi_command/lxc/lxc.py | dem4ply/chibi_command | 49efc3070bdf40e5f27146379487345b1accd427 | [
"WTFPL"
] | null | null | null | chibi_command/lxc/lxc.py | dem4ply/chibi_command | 49efc3070bdf40e5f27146379487345b1accd427 | [
"WTFPL"
] | null | null | null | chibi_command/lxc/lxc.py | dem4ply/chibi_command | 49efc3070bdf40e5f27146379487345b1accd427 | [
"WTFPL"
] | null | null | null | from chibi.atlas import Chibi_atlas
from chibi_command import Command, Command_result
from chibi_hybrid.chibi_hybrid import Chibi_hybrid
__all__ = [ 'Create', 'Start', 'Stop', 'Attach', 'Info', 'Destroy' ]
class Info_result( Command_result ):
def parse_result( self ):
if not self:
return
result = Chibi_atlas()
for l in self.result.split( '\n' ):
l = l.strip()
if not l:
continue
k, v = l.split( ':' )
v = v.strip()
result[k.lower()] = v.lower()
self.result = result
# lo dejare de usar
@property
def is_running( self ):
return self and self.result.state == 'running'
class LXC( Command ):
command = 'lxc'
captive = False
@Chibi_hybrid
def name( cls, name ):
return cls( '-n', name )
@name.instancemethod
def name( self, name ):
self.add_args( '-n', name )
return self
class Create( LXC ):
command = 'lxc-create'
captive = False
@Chibi_hybrid
def template( cls, template ):
return cls( '-t', template )
@template.instancemethod
def template( self, template ):
self.add_args( '-t', template )
return self
def parameters( self, *args ):
self.add_args( '--', *args )
return self
class Start( LXC ):
command = 'lxc-start'
captive = False
@Chibi_hybrid
def daemon( cls ):
return cls( '-d' )
@daemon.instancemethod
def daemon( self ):
self.add_args( '-d' )
return self
class Stop( LXC ):
command = 'lxc-stop'
captive = False
class Attach( LXC ):
command = 'lxc-attach'
args = ( '--clear-env', )
captive = False
@Chibi_hybrid
def set_var( cls, name, value ):
return cls( '--set-var', f"{name}={value}" )
@set_var.instancemethod
def set_var( self, name, value ):
self.add_args( '--set-var', f"{name}={value}" )
return self
def build_tuple( self, *args, **kw ):
new_args = []
for arg in args:
if isinstance( arg, Command ):
new_args += list( arg.build_tuple() )
else:
new_args.append( arg )
if self.delegate:
delegate_tuple = self.build_delegate()
return (
*delegate_tuple, self.command,
*self.build_kw( **kw ), *self.args, '--', *new_args )
return (
self.command, *self.build_kw( **kw ), *self.args, '--', *new_args )
class Info( LXC ):
command = 'lxc-info'
captive = True
args = ( '-H', )
result_class = Info_result
class Destroy( LXC ):
command = 'lxc-destroy'
captive = False
| 22.941667 | 79 | 0.54486 | 2,521 | 0.915728 | 0 | 0 | 873 | 0.317109 | 0 | 0 | 261 | 0.094806 |
f6cb67b13096c25131faa2e988c43e882cde21bd | 166,942 | py | Python | SeagateSenseCodes.py | ssmore98/siod | 5e1bd2691710ebaf49223d52894c2b08aa958112 | [
"Unlicense"
] | null | null | null | SeagateSenseCodes.py | ssmore98/siod | 5e1bd2691710ebaf49223d52894c2b08aa958112 | [
"Unlicense"
] | null | null | null | SeagateSenseCodes.py | ssmore98/siod | 5e1bd2691710ebaf49223d52894c2b08aa958112 | [
"Unlicense"
] | null | null | null | seagate_sense_codes = {0: {0: {0: {0: 'No error.', 'L1': 'No Sense', 'L2': 'No Sense'},
31: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Logical unit transitioning to another power condition'}},
94: {0: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Drive is in power save mode for unknown reasons'},
1: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Idle Condition Activated by timer'},
2: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Standby condition activated by timer'},
3: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Idle condition activated by host command'},
4: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Standby condition activated by host command'},
5: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Idle B condition activated by timer'},
6: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Idle B condition activated by host command'},
7: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Idle C condition activated by timer'},
8: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Idle C condition activated by host command'},
9: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Standby Y condition activated by timer'},
10: {0: 'No Specific FRU code.',
'L1': 'No Sense',
'L2': 'Standby Y conditition activated by host command'}}},
1: {1: {0: {157: "Recovered Media Manager's anticipatory autoseek (ATS2) XFR error.",
'L1': 'Recovered Error',
'L2': 'No Index/Logical Block Signal'}},
3: {0: {0: 'FRU code comes from the contents of the lower 8-bit of the servo fault register (address 38h). A description of this register is attached at the end of this document.',
'L1': 'Recovered Error',
'L2': 'Peripheral Device Write Fault'}},
9: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Track Following Error'},
1: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Servo Fault'},
13: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Write to at least one copy of a redundant file failed'},
14: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Redundant files have < 50% good copies'},
248: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Calibration is needed but the QST is set without the Recal Only bit'},
255: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Servo cal completed as part of self-test'}},
11: {0: {6: 'Non Volatile Cache now is volatile',
48: 'Recovered Erase Error Rate Warning',
50: 'Recovered Read Error Rate Warning',
66: 'Recovered Program Error Rate Warning',
'L1': 'Recovered Error',
'L2': 'Recovered Error Rates Warnings'},
1: {0: 'Warning \xe2\x80\x93 Specified temperature exceeded.',
'L1': 'Recovered Error',
'L2': 'Warning \xe2\x80\x93 Specified temperature exceeded'},
2: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Warning, Enclosure Degraded'},
3: {0: 'Warning \xe2\x80\x93 Specified temperature exceeded.',
'L1': 'Recovered Error',
'L2': 'Warning \xe2\x80\x93 Flash temperature exceeded'},
4: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Warning - Flash Read Cache Capacity Degraded'},
6: {0: 'Warning \xe2\x80\x93 NVC now volatile. NVC specified temperature exceeded.',
'L1': 'Recovered Error',
'L2': 'Warning - Non-Volatile Cache now volatile'},
7: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Warning, spare sector margin exceeded. NVC_WCD disabled.'},
38: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Warning - Power loss management warning threshold exceeded'},
93: {0: 'Pre Warning.',
'L1': 'Recovered Error',
'L2': 'Pre-SMART Warning'},
225: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Drive is exiting RAW mode, returning from high temperature'},
226: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Drive is exiting RAW mode, returning from low temperature'},
241: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Drive is entering RAW mode due to high temperature'},
242: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Drive is entering RAW mode due to low temperature'}},
12: {1: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Write Error Recovered with Auto-Reallocation'},
2: {1: 'Data written with retries. Auto-reallocation failed.',
2: 'Data written with retries. Auto-reallocation failed with critical error, triggering Write Protection.',
'L1': 'Recovered Error',
'L2': 'Write Error Recovered, Auto-Reallocation failed'}},
17: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Unrecovered Read Error'}},
21: {1: {0: 'Mechanical Positioning Error.',
1: 'Mechanical positioning error - Recovered servo command.',
2: 'Mechanical positioning error - Recovered servo command during spinup.',
'L1': 'Recovered Error',
'L2': 'Mechanical Positioning Error'}},
22: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Data Synchronization Mark Error'}},
23: {1: {0: 'No specific FRU code.',
17: 'PRESCAN - Recovered data with error recovery.',
18: 'RAW - Recovered data with error recovery.',
'L1': 'Recovered Error',
'L2': 'Recovered Data Using Retries'},
2: {0: 'No specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Recovered Data Using Positive Offset'},
3: {0: 'No specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Recovered Data Using Negative Offset'}},
24: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Recovered Data with ECC, no retry attempted'},
1: {0: 'Recovered data with ECC and retries applied.',
1: 'F2 \xe2\x80\x93 Recovered data with ECC and retries applied, but did normal ECC during erasure correction step.',
2: 'BERP \xe2\x80\x93 Recovered data with BERP Erasure Recovery (Erasure Pointer) and retries applied.',
3: 'BERP \xe2\x80\x93 Recovered data with BERP Sliding Window and retries applied.',
4: 'BERP \xe2\x80\x93 Recovered data with BERP Extended Iterations and retries applied.',
5: 'BERP \xe2\x80\x93 Recovered data with BERP LLR Scaling and retries applied.',
'L1': 'Recovered Error',
'L2': 'Recovered Data with ECC and Retries Applied'},
2: {0: 'No specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Recovered Data with ECC and/or Retries, Data Auto-Reallocated'},
3: {0: 'Data recovered with ATIC and retries applied.',
1: 'Data recovered with ATIC and retries applied, and auto-reallocated.',
2: 'Data recovered with ATIC and retries applied, and re-written.',
'L1': 'Recovered Error',
'L2': 'Recovered Data with ATIC and Retries Applied'},
4: {0: 'Data recovered with SERV and retries applied.',
1: 'Data recovered with SERV and retries applied, and auto-reallocated.',
2: 'Data recovered with SERV and retries applied, and re-written.',
'L1': 'Recovered Error',
'L2': 'Recovered Data with SERV and Retries Applied'},
5: {1: 'Data recovered. Auto-reallocation failed.',
2: 'Data recovered. Auto-reallocation failed with critical error, triggering Write Protection.',
'L1': 'Recovered Error',
'L2': 'Recovered Data with ECC and/or Retries, Auto-Reallocation Failed'},
7: {0: 'No specific FRU code.',
'L1': 'Recovered Error',
'L2': 'ECC and/or retries, data re-written'},
8: {0: 'Data Recovered with BIPS/SP',
1: 'Data Recovered with BIPS/SP, and auto-reallocated.',
2: 'Data Recovered with BIPS/SP, and re-written.',
3: 'Data Recovered with Intermediate Super Parity.',
4: 'Data Recovered with Intermediate Super Parity, and auto-reallocated.',
5: 'Data Recovered with Intermediate Super Parity, and re-written.',
'L1': 'Recovered Error',
'L2': 'Recovered Data With Intermediate Super Parity'},
9: {0: 'Recovered the sector found to bad during IRAW scan with the IRAW process.',
'L1': 'Recovered Error',
'L2': 'Recovered the sector found to bad during IRAW scan'}},
25: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Defect List Error'}},
28: {0: {0: 'Defect list not found.',
1: 'Invalid Defect list format data request.',
'L1': 'Recovered Error',
'L2': 'Defect List Not Found'}},
31: {0: {0: 'Partial defect list transfer.',
'L1': 'Recovered Error',
'L2': 'Number of defects overflows the allocated space that the Read Defect command can handle'}},
55: {0: {0: 'Parameter Rounded.',
1: 'Limit the BytesPerSector to Maximum Sector Size.',
2: 'Limit the BytesPerSector to Minimum Sector Size.',
3: 'Rounded the odd BytesPerSector.',
4: 'Parameter rounded in the mode page check.',
5: 'Rounded the VBAR size.',
'L1': 'Recovered Error',
'L2': 'Parameter Rounded'}},
63: {128: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Buffer Contents Have Changed'}},
64: {1: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'DRAM Parity Error'},
2: {128: 'Spinup error recovered with buzz retries.',
129: 'Spinup error recovered without buzz retries.',
'L1': 'Recovered Error',
'L2': 'Spinup Error recovered with retries'}},
68: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Internal Target Failure'}},
93: {0: {0: 'No Specific FRU code.',
1: 'Fail Max Recovered Error threshold during DST',
4: 'Reallocation.',
5: 'Reallocation AST table.',
6: 'Reallocation DDT table.',
8: 'Auto Reallocation Failure',
9: 'Impending Failure - Throughput Performance: Insufficient spare to back all NVC cache.',
10: 'NVC has failed to save Torn Write data more times than the threshold (currently 10)',
11: 'Failure Prediction Max Temperature Exceeded',
16: 'Hardware failure.',
20: 'Excessive reassigns.',
22: 'Start times failure.',
24: 'Instruction DBA error found during idle task. Fixed.',
32: 'General failure.',
39: 'SSD Early Retired Blocks Failure',
40: 'SSD Flash Life Left',
41: 'Exceeded time allocated to complete Zero Disk test (seq write)',
48: 'Recovered erase error rate (SSD-Jaeger only)',
49: 'Head failure.',
50: 'Recovered data error rate.',
55: 'Recovered TA.',
56: 'Hard TA event.',
64: 'Head flip.',
65: 'SSE (servo seek error).',
66: 'Write fault.',
67: 'Seek failure.',
68: 'Erase Error.',
69: 'Track following errors (Hit66).',
74: 'Seek performance failure.',
91: 'Spinup failure.',
96: 'Firmware Failure condition',
97: 'RVFF system failure.',
98: 'Gain adaptation failure.',
99: 'Fluid Dynamic Bearing Motor leakage detection test failed.',
100: 'Saving Media Cache Map Table (MCMT) to reserved zone failed.',
116: 'SED NOR Key store near to end of life',
117: 'Multiply threshold config.',
239: 'No control table on disk.',
'L1': 'Recovered Error',
'L2': 'Failure Prediction Threshold Exceeded'},
16: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'Failure Prediction Threshold Exceeded'},
255: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': 'False Failure Prediction Threshold Exceeded'}},
133: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': '5V threshold exceeded'}},
140: {0: {0: 'No Specific FRU code.',
'L1': 'Recovered Error',
'L2': '12V threshold exceeded'}}},
2: {4: {0: {0: 'Logical Unit Not Ready, Cause Not Reportable.',
1: 'No Specific FRU code.',
2: 'Logical unit not ready, Change Definition Command in progress.',
128: 'R/W system not ready.',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, Cause Not Reportable'},
1: {0: 'No Specific FRU code.',
1: 'Logical unit not ready, excess particle sweep time required',
2: 'Wait for good power loss management status.',
'L1': 'Not Ready',
'L2': 'Logical Unit is in Process of Becoming Ready'},
2: {0: 'No Specific FRU code.',
1: 'Supply voltage levels not within spec',
'L1': 'Not Ready',
'L2': 'Drive Not Ready \xe2\x80\x93 START UNIT Required'},
3: {0: 'Logical Unit Not Ready, Manual Intervention Required.',
1: 'Logical unit not ready, manual intervention required, Servo doesn\xe2\x80\x99t support MDW Delta-L, Servo doesn\xe2\x80\x99t support VBAR.',
2: 'Logical unit not ready, manual intervention required \xe2\x80\x94CFW is configured for medium latency, but channel family is not capable of supporting medium latency.',
3: 'Logical unit not ready, R/W sub-system failure.',
4: 'Logical unit not ready, Drive is unmated',
5: 'Logical unit not ready, R/W sub-system init failed; Invalid Servo Firmware',
6: 'Logical unit not ready, R/W sub-system init failed; Invalid Servo Adaptives',
7: 'Logical unit not ready, Read failure on all System Data Table copies',
8: 'Logical unit not ready, Forward table read error during restore',
9: 'Logical unit not ready, Scram metadata read error during restore',
10: 'Logical unit not ready, GCU info restore failed',
11: 'Logical unit not ready, Defect table restore failed',
12: 'Logical unit not ready, Scram restore failed',
13: 'Logical unit not ready, Bxor Critical Failure',
15: 'Logical unit not ready, Media is corrupted but data successfully recovered via media scan',
16: 'Logical unit not ready, suspect list read error',
17: 'Logical unit not ready, reverse directory error during restore',
18: 'Logical unit not ready, system recovery format completed but drive lost critical system data',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, Manual Intervention Required'},
4: {0: 'Logical unit not ready, Scram restore failed',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, Format in Progress'},
7: {0: 'Logical unit not ready, Scram restore failed',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, Format in Progress'},
9: {0: 'Logical unit not ready, self-test in progress.',
1: 'Logical unit not ready, short background self-test in progress.',
2: 'Logical unit not ready, extended background self-test in progress.',
3: 'Logical unit not ready, short foreground self-test in progress.',
4: 'Logical unit not ready, extended foreground self-test in progress.',
5: 'Logical unit not ready, firmware download in progress.',
6: 'Logical unit not ready, initial volume download in progress.',
7: 'Logical unit not ready, session opened.',
8: 'No Specific FRU code',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, H2SAT measurement is Progress'},
12: {0: 'No Specific FRU code.',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, Field Adjustable Adaptive Fly Height (FAAFH) in progress'},
13: {7: 'Logical unit not ready, Session is already Open.',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, Session is already Open'},
17: {2: 'Logical Unit Not Ready, Notify (Enable Spinup) required',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, Notify (Enable Spinup) required'},
26: {0: 'Logical Unit Not Ready, Start Stop Unit Command in Progress',
'L1': 'Not Ready',
'L2': 'Logical Unit Not Ready, Start Stop Unit Command in Progress'},
27: {0: 'Logical unit not ready, sanitize in progress.',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, sanitize in progress.'},
28: {0: 'No specific FRU code.',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, additional power use not yet granted'},
34: {1: 'Spindle Error (Spinup Failure)',
2: 'SMIF Traning Failed after 5 times attempt',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, power cycle required.'},
240: {0: 'Logical unit not ready, super certify in progress.',
1: 'Counterfeit attempt detected (ETF log SN or SAP SN mismatch)',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, super certify in progress'},
242: {0: 'Drive has been placed in special firmware due to assert storm threshold being exceeded.',
'L1': 'Not Ready',
'L2': 'Logical unit not ready, Assert Storm Threshold being exceeded.'}},
53: {2: {1: 'Enclosure not ready, no ENCL_ACK assert.',
'L1': 'Not Ready',
'L2': 'Enclosure Services Unavailable'}},
132: {0: {0: 'Remanufacturing State.',
'L1': 'Not Ready',
'L2': 'Remanufacturing State'}}},
3: {3: {0: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Peripheral Device Write Fault'}},
9: {0: {0: 'Track following error.',
254: 'Head flip during power cycle.',
255: 'Track following error.',
'L1': 'Medium Error',
'L2': 'Track Following Error'},
4: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Head Select Fault'}},
10: {1: {0: 'No Specific FRU code.',
1: 'Failed to write super certify log file from media backend',
'L1': 'Medium Error',
'L2': 'Failed to write super certify log file'},
2: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Failed to read super certify log file'}},
12: {0: {0: 'Peripheral device write fault.',
1: 'Write error during single sector recovery.',
2: 'Write error during gc relocation',
3: 'Write is revirtualized because of a channel error - SSD',
4: 'Read during preamp unsafe fault.',
5: 'Flash Media Request aborted due to Graceful Channel Reset',
7: 'Save SMART error log failed',
8: 'Write error from media backend',
9: 'SSD Uncorrectable Write Error from Media backend',
10: 'SSD Erase Error.',
11: 'SSD PSM Runt Buffer Page Mismatch Error',
17: 'Unrecovered read error on READ step of PRESCAN.',
18: 'Unrecovered read error on READ step of WRITE converted to WRITE VERIFY during RAW operation.',
22: 'Unrecovered read error on READ step of Read-modify-write',
128: 'BVD update error.',
129: 'BVD Correctable IOEDC error.',
'L1': 'Medium Error',
'L2': 'Write Error'},
2: {0: 'Unrecovered write error - Auto reallocation failed.',
1: 'Reallocate Block - Write alternate block failed, no servo defects.',
2: 'Reallocate Block - Alternate block compare test failed.',
3: 'Reallocate Block - Alternate block sync mark error.',
4: 'Reallocate Block - Maximum allowed alternate selection exhausted.',
5: 'Reallocate Block - Resource is not available for a repetitive reallocation.',
6: 'Reallocate Block Failed',
7: 'Reallocate Block Failed - Write Protect',
8: 'Write error, autoreallocation failed from media backend',
'L1': 'Medium Error',
'L2': 'Write Error \xe2\x80\x93 Auto Reallocation Failed'},
3: {0: 'Write Error \xe2\x80\x93 Recommend Reassignment',
'L1': 'Medium Error',
'L2': 'Write Error \xe2\x80\x93 Recommend Reassignment'},
4: {0: 'WORM Error - Invalid Overlapping Address Range.',
1: 'WORM Error - Written WORM Area Infringement.',
2: 'WORM Error - No further writes allowed on WORM drive',
3: 'WORM Error - SIM Registry Read Failed',
4: 'WORM Error - SIM Registry Write Failed',
5: 'WORM Error - Illegal write request after Lock',
'L1': 'Illigal Request',
'L2': 'Write Error - WORM'},
128: {3: 'Disc trace write (to clear it) failed 02.',
5: 'Disc trace write failed.',
6: 'Save UDS DRAM trace frames to disc failed.',
8: 'Write Long disc transfer failed.',
'L1': 'Medium Error',
'L2': 'Write Error \xe2\x80\x93 Unified Debug System'},
255: {1: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Write Error \xe2\x80\x93 Too many error recovery revs'}},
17: {0: {0: 'Unrecovered Read Error.',
1: 'Unrecovered Read Error, too many recovery revs.',
2: 'Unrecovered read error, could not read UDS save-on-update trace',
3: 'Unrecovered read error, could not read UDS finished frame index file',
4: 'Unrecovered read error, could not read SMART to include it in UDS',
5: 'Unrecovered read error, UDS read of a finished frame file failed',
6: 'Read SMART error log failed',
7: 'Unrecovered read BBM on partial reallocation',
8: 'Unrecovered read error on media backend',
9: 'SSD Unrecovered Read Error due to ECC Failure',
10: 'SSD Unrecovered Read Error due to Corrupt Bit',
11: 'Unrecovered read flagged error, created with flagged write uncorrectable command',
12: 'Unrecovered read error due to flash channel reset',
128: 'Read during preamp unsafe fault.',
129: 'EDAC HW uncorrectable error.',
130: 'EDAC overrun error.',
131: 'LBA corrupted with Write Long COR_DIS mode.',
132: 'LBA was in Media cache, hardened upon unrec. read error during cleaning',
133: 'EDAC HW uncorrectable error, Super parity or ISP valid and parity recovery attempted.',
134: 'EDAC HW uncorrectable error, Super parity and ISP Invalid.',
160: 'Read preamp unsafe fault with short/open fault set',
'L1': 'Medium Error',
'L2': 'Unrecovered Read Error'},
4: {0: 'Unrecovered Read Error \xe2\x80\x93 Auto Reallocation Failed',
128: 'Write alternate block failed, no servo defects.',
129: 'Alternate block compare test failed.',
130: 'Alternate block sync mark error.',
131: 'Maximum allowed alternate selection exhausted.',
132: 'Resource is not available for a repetitive reallocation.',
133: 'SERV HW EDAC failure.',
134: 'SERV SID failure.',
135: 'Number of reallocation pending Super Block exceeded limit.',
136: 'Reallocation pending sector encountered during Super Block read.',
137: 'Reallocation pending sector encountered during Super Block write.',
'L1': 'Medium Error',
'L2': 'Unrecovered Read Error \xe2\x80\x93 Auto Reallocation Failed'},
20: {0: 'Unrecovered read error- read pseudo-unrecovered from a WRITE LONG',
'L1': 'Medium Error',
'L2': 'Unrecovered Read Error \xe2\x80\x93 LBA marked bad by application'},
255: {1: 'Unrecovered read error- timelimit exceeded.',
'L1': 'Medium Error',
'L2': 'Unrecovered Read Error \xe2\x80\x93 Too many error recovery revs'}},
20: {1: {0: 'Record Not Found.',
128: 'Search exhaust error or congen mode page directory not found',
129: 'Reallocation LBA is restricted from write access or congen compressed XML not found',
130: 'Reallocation LBA is restricted from read access.',
131: 'Read from or Write to log page data on reserved zone failed.',
'L1': 'Medium Error',
'L2': 'Record Not Found'}},
21: {1: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Mechanical Positioning Error'},
3: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Unrecovered write errors due to grown servo flaws'}},
22: {0: {0: 'Data synchronization mark error.',
128: 'Data sync timeout error.',
129: 'Formatter FIFO parity error 01.',
130: 'Formatter FIFO parity error 02.',
131: 'Super Sector - Data sync timeout error',
132: 'Disc Xfr - Data sync timeout error on sector splits',
'L1': 'Medium Error',
'L2': 'Data Synchronization Mark Error'},
1: {0: 'Data missed sync mark error. FRU code is bits mask indicating which fragment(s) have missed sync error. Bit n represents fragment n.',
'L1': 'Medium Error',
'L2': 'Data Synchronization Mark Error'}},
49: {0: {0: 'Medium Format Corrupted.',
1: 'Corruption result of a Mode Select command.',
2: 'Corruption result of a sparing changed condition.',
3: 'Corruption the result of a failed LBA pattern write in Format command.',
4: 'Corruption result of failed user table recovery.',
5: 'Corruption of NVC global header',
6: 'Medium format corrupted from media backend',
7: 'Medium Format Corrupt due to flash identify failure',
8: 'Medium Format Corrupt as a result of failed NVC write or invalid NVC meta data',
9: 'Medium Format Corrupt due to NVC WCD Meta data corruption',
10: 'Media Format Corrupt due to Write failure during MC WCD data restore to disc',
11: 'Medium Format Corrupt because NVC did not burn flash',
12: 'Medium Format Corrupt because Pseudo Error Masks lost after power loss',
13: 'Medium Format Corrupt because unexpected Media Cache Segment Sequence Number',
14: 'Medium Format Corrupt\xc2\xa0due to system reset without saving NVC data',
15: 'Medium Format Corrupt due to firmware reset (jump to 0) without saving NVC data',
16: 'Medium Format Corrupt due to incomplete burn but no actual power loss',
18: 'Medium Format Corrupt because watchdog timer reset',
19: 'Medium Format Corrupted On Assert (intentionally)',
20: 'Medium Format Corrupt due to IP timeout during SCRAM',
21: 'Medium Format Corrupt due to Write Parameter Error',
22: 'Medium Format Corrupt due to GCU Metadata Error',
24: 'Medium Format Corrupt due to System Metadata restore failure',
32: 'Format Corrupt due to Download changes.',
34: 'Scram user NVC restore failed',
'L1': 'Medium Error',
'L2': 'Medium Format Corrupted'},
1: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Corruption in R/W format request.'},
3: {0: 'Sanitize Command Failed',
1: 'Sanitize Command Failed due to file access error',
'L1': 'Medium Error',
'L2': 'Sanitize Command Failed'},
145: {13: 'Corrupt WWN in drive information file.',
'L1': 'Medium Error',
'L2': 'Corrupt WWN in drive information file'}},
50: {1: {0: 'Defect list update failure.',
128: 'Failed to save defect files.',
129: 'Failed to save defect files post format 01.',
130: 'Failed to save defect files post format 02.',
131: 'Failed to save defect files post format 03.',
'L1': 'Medium Error',
'L2': 'Defect List Update Error'},
3: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Defect list longer than allocated memory.'}},
51: {0: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Flash not ready for access.'}},
68: {0: {0: 'No Specific FRU code.',
'L1': 'Medium Error',
'L2': 'Internal Target Failure'}},
93: {0: {1: 'Max Unrecovered Read Error',
'L1': 'Medium Error',
'L2': 'Unrecovered Read Error'}}},
4: {1: {0: {0: 'No index or sector pulses found.',
128: 'Spin up - Media Manager error encountered.',
129: 'Data field timeout error.',
130: "Media Manager's TDT FIFO Counter error.",
131: "Media Manager's Servo Counter error.",
132: "Media Manager's Latency error.",
133: "Media Manager's Index error.",
134: "Media Manager's Servo error.",
135: 'Media Manager errors could not be cleared successfully.',
136: 'Clearing of MM errors due to a servo error failed.',
137: 'SWCE/SGate overlap error.',
138: 'Servo gate timeout error 01.',
139: 'Servo gate timeout error 02.',
140: 'Servo gate timeout error 03.',
141: 'Servo gate timeout error 04.',
142: 'Servo gate timeout error 05.',
143: 'Super Sector - Handshake error.',
144: 'Super Sector - Servo gate timeout error 01.',
145: 'Super Sector - Servo gate timeout error 02.',
146: 'Super Sector - Servo gate timeout error 03.',
147: 'Super Sector - Servo gate timeout error 04.',
148: 'Super Sector - Servo gate timeout error 05.',
149: 'Servo gate timeout error during generation of Aseek Req.',
150: 'BVD check timeout error.',
151: 'NRZ sequencer completion timeout error.',
152: 'Sequencer timeout on Media Manager event..',
153: 'NRZ xfr error on Media Manager event.',
154: 'Disc sequencer handshake error.',
155: 'Medium Latency channel synchronization handshake error.',
156: 'Fast dPES missed servo sample error.',
157: "Media Manager's anticipatory autoseek (ATS2) XFR error.",
158: 'When a reassigned sector is encountered, wait for the NRZ to finish the previous sector',
159: 'Fast IO Data Collection out of sync with sequencer',
160: 'Channel not ready rev count exhausted. Apply to LDPC LLI channels',
161: "Media Manager's anticipatory autoseek (ATS2) Servo error.",
162: 'Media Manager\xe2\x80\x99s anticipatory autoseek (ATS2) Disc Pause Condition',
163: 'BERP infinite loop condition',
164: 'Brownout fault detected during write transfer.',
165: 'Sequencer completion timeout error at reassigned sector.',
166: 'Sequencer S-gate timeout error during start of sector read.',
167: 'Sequencer S-gate timeout error during skipping of a new sector.',
'L1': 'Hardware Error',
'L2': 'No Index/Logical Block Signal'}},
3: {0: {2: 'Gated Channel Fault',
3: 'Write Preamp Unsafe Fault',
4: 'Write Servo Unsafe Fault',
5: 'Read/write channel fault.',
6: 'SFF fault.',
7: 'Write servo field fault.',
8: 'Write Servo unsafe fault.',
9: 'SSD: Peripheral device write fault (flush cache failed)',
16: 'Write Servo sector fault.',
32: 'Read/Write channel fault.',
64: 'Servo fault.',
128: 'Detect of new servo flaws failed.',
129: 'PSG environment fault.',
130: 'Shock event occurred.',
131: 'Unexpected Extended WGATE fault.',
132: 'Channel detected fault during write.',
133: 'Disc locked clock fault detected.',
134: 'Skip Write Detect Dvgas fault',
135: 'Skip Write Detect Rvgas fault',
136: 'Skip Write Detect Fvgas fault',
137: 'Skip Write Detect Dvgas+Rvgas+Fvgas sum threshold exceeded - last SWD fault Dvgas',
138: 'Skip Write Detect Dvgas+Rvgas+Fvgas sum threshold exceeded - last SWD fault Rvgas',
139: 'Skip Write Detect Dvgas+Rvgas+Fvgas sum threshold exceeded - last SWD fault Fvgas',
140: 'Drive free-fall event occurred',
141: 'Large Shock event occured',
144: 'NRZ Write Parity fault.',
145: 'Marvell 8830 TBG Unlock fault.',
146: 'Marvell 8830 WClk Loss fault.',
147: 'EBMS Fault Detect(EFD) Contact fault during write.',
148: 'EBMS Fault Detect(EFD) Contact fault during read.',
149: 'EBMS Fault Detect(EFD) SWOT fault.',
150: 'Marvell SRC SFG Unlock fault.',
255: "LSI 6 channel preamp attempting to write without heat, condition detected by servo and passed as servo fault ( i.e. Preamp error condition indicated by servo fault condition ). This should be recovered by a 'seek away ' performed as part of recovery step",
'L1': 'Hardware Error',
'L2': 'Peripheral Device Write Fault'}},
9: {0: {0: 'Servo track following error.',
64: 'Servo fault, Normally 04/0900/80 would be changed to 04/0900/40 by the firmware.',
128: 'Servo fault, Normally 04/0900/80 would be changed to 04/0900/40 by the firmware.',
129: 'Servo unsafe fault during write.',
130: 'EDAC block address error.',
131: 'Missing MDW information reported by servo detection.',
132: 'Servo command timed out.',
133: 'Seek command timed out.',
134: 'Seek exceeded recovery time limit.',
135: 'Service drive free fall condition timed out.',
136: 'The altitude has exceeded the limit',
137: 'Seek command timed out on alternate sector',
138: 'Super Block marked dirty.',
139: 'Verify of Super Block data failed.',
140: 'Servo fatal error indicated',
141: 'Super Parity long word Cross Check error',
142: 'Super Parity low word Cross Check error',
143: 'Super Parity high word Cross Check error',
144: 'Super Parity data miscompare',
145: 'Invalid Anticipatry Track Seek request.',
146: 'Enhance Super parity regeneration failure.',
147: 'Super parity regeneration failure.',
'L1': 'Hardware Error',
'L2': 'Track Following Error'},
1: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Servo Fault'},
4: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Head Select Fault'},
255: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Servo cal failed as part of self-test'}},
21: {1: {0: 'Mechanical Positioning Error.',
128: 'Servo error encountered during drive spin-up.',
129: 'Servo error encountered during drive spin-down.',
130: 'Spindle failed error.',
131: 'Unrecovered seek error encountered.',
132: 'Servo command failed.',
133: 'Servo heater timing failed.',
134: 'Servo Free-Fall Protection command failed.',
135: 'Servo Disc Slip Full TMFF recalibration failed.',
136: 'Servo Disc Slip Head Switch Timing recalibration failed.',
137: 'Servo Disc Slip Head Switch Track recalibration failed.',
138: 'Servo read heat fast I/O command failed.',
139: 'Spin-up attempt during G2P merge process failed.',
140: 'Spin-down attempt during PList processing failed.',
141: 'Spin-up attempt during PList processing failed.',
'L1': 'Hardware Error',
'L2': 'Mechanical Positioning Error'}},
22: {0: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Data Synchronization Mark Error'}},
25: {0: {0: 'Defect list error.',
1: 'Save user table error.',
2: 'SIM file transfer error.',
3: 'Persistent Reserve Save to disc fail.',
4: 'Defect list error media backend',
128: 'Format - Recover of saved Grown DST file failed.',
129: 'Recovery of saved Non-Resident DST failed.',
130: 'Clear R/W Slip List - Save of R/W Operating Parmaters file failed.',
131: 'Restore Alt List File From media - Failed restoration from media file.',
132: 'Save of Servo Disc Slip Parms to media failed.',
133: 'Read of Servo Disc Slip Parms from media failed 1.',
134: 'Read of Servo Disc Slip Parms from media failed 2.',
135: 'Servo Disc Slip file - invalid format revision.',
136: 'GList to PList - Recover of saved Grown DST file failed.',
137: 'Clear Non-resident Grown DST - Save to media failed.',
'L1': 'Hardware Error',
'L2': 'Defect List Error'}},
28: {0: {0: 'Defect list not found.',
1: 'Defect list processing error.',
2: 'Read Manufacturing Info File failure.',
3: 'Read Manufacturing Info File failure.',
4: 'Defect list not found from media backend',
50: 'Read Manufacturing Info File failure.',
52: 'Read Manufacturing Info File failure.',
129: 'Failure to read Primary Defects file for reporting.',
130: 'Invalid entry count in Plist file.',
131: 'Invalid byte extent value in Plist entry.',
132: 'Process Defect Lists - Sort error due to invalid offset.',
133: 'Process Defect Lists - Sort error due to invalid head.',
134: 'Process Defect Lists - Sort error due to invalid cylinder.',
135: 'Process Defect Lists - Unable to recover the Primary Defect files.',
136: 'Failed to seek to defect files for reassign.',
137: 'Failed to seek to defect files for undo-reassign.',
138: 'Failure to write defects report lists file to media.',
139: 'Read of defects report file from media failed.',
140: 'An invalid defects report file is encountered 01.',
141: 'An invalid defects report file is encountered 02',
142: 'Restore of R/W User Operating Parameters file failed.',
143: 'Invalid Primary Servo Flaws data encountered.',
144: 'Failed to save defect files due to miscompare error.',
146: 'PList overflow error while merging PSFT and PList for reporting.',
147: 'Maximum certify passes of a zone exceeded.',
148: 'Maximum write passes of a zone exceeded.',
149: 'Primary Servo Flaws data retrieval - Unable to read file on disc.',
150: 'Primary Servo Flaws data retrieval - Invalid entry count in file.',
151: 'Defective Sectors List data retrieval - Unable to read file on disc.',
152: 'Defective Sectors List data retrieval - Invalid file header data.',
153: 'PList data retrieval - Invalid entry count in Plist file.',
154: 'PList data retrieval - Unable to read Plist file on disc.',
155: 'System Format - invalid entry count.',
156: 'Primary TA data retrieval - Unable to read file on disc.',
157: 'Primary TA data retrieval - Invalid count.',
158: 'Primary TA data retrieval - Invalid sort.',
159: "Process Defect Lists - Defect doesn't exist in audit space.",
160: 'Retrieve Defects Report List - Not All Entries Available',
161: 'Format - Invalid LBA range in PVT before update of dirty blocks.',
162: 'Format - Invalid Parity Validity Table after clean of dirty blocks.',
163: 'Format - Clean of dirty blocks failed.',
164: 'Format - Save of Parity Validity Table to media failed.',
'L1': 'Hardware Error',
'L2': 'Defect List Not Found'}},
38: {48: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Writing to the Flash Failed'},
49: {0: 'Failed to program PIC code with new firmware',
'L1': 'Hardware Error',
'L2': 'Writing to the PIC Failed'}},
41: {0: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Flashing LED occurred.'}},
50: {0: {8: 'No defect spare location available',
9: 'No defect spare location available for reassign block.',
128: 'Processing of pending reallocation failed.',
129: 'Failed to insert defect to DST.',
130: 'Failed to insert Plist defect to DST.',
131: 'Grown DST file full 01.',
132: 'Grown DST file full 02.',
133: 'Resident DST file full.',
134: 'Failed to insert defective sectors associated w/grown servo flaw.',
135: 'Failure to invalidate Defects Report disc files.',
136: 'Format System Partition \xe2\x80\x93 Failed to insert defective system sectors associated w/ grown servo flaw.',
137: 'Format System Partition \xe2\x80\x93 Failed to insert defective system sectors.',
138: 'Format System Partition \xe2\x80\x93 System Defects file full.',
139: 'Process Defect Lists \xe2\x80\x93 Failed to insert a client specified defect in the defect file.',
140: 'ASFT \xe2\x80\x93 Max # of servo flaws per track exceeded (path #1).',
141: 'ASFT \xe2\x80\x93 Max # of servo flaws per track exceeded (path #2).',
142: 'ASFT full (path #1).',
143: 'ASFT full (path #2).',
144: 'Addition to Reassign Pending List failed.',
145: 'Resource is not available for a new reallocation.',
146: 'No alternates available (path #1).',
147: 'Failed to insert defective sectors associated w/grown servo flaw.',
148: 'Failed to deallocate compromised defects.',
149: 'Format System Partition \xe2\x80\x93 Failed to deallocate compromised.',
150: 'Insertion of DDT entry failed.',
151: 'Compressed DDT file full.',
152: 'Format \xe2\x80\x93 Failed to insert defective sectors associated w/primary servo flaw.',
153: 'Defective Tracks List \xe2\x80\x93 Failed to insert grown defective sectors associated with defective track.',
154: 'Defective Tracks List \xe2\x80\x93 Failed to insert primary defective sectors associated with defective track.',
155: 'Defective Tracks List \xe2\x80\x93 Failed to add new entry to list.',
156: 'Reallocate Block \xe2\x80\x93 Resource is not available for a partial reallocation.',
157: 'Resource is not available for a partial reallocation.',
158: 'Not enough non-defective sectors to allocate for BIPS parity Sectors.',
159: 'BIPS defect table operation failed \xe2\x80\x93 case 1.',
160: 'BIPS defect table operation failed \xe2\x80\x93 case 2.',
161: 'Format \xe2\x80\x93 Failed to add defective track to DST.',
162: 'Format \xe2\x80\x93 Failed to allocate spare sectors.',
163: 'Pad and Fill Defects \xe2\x80\x93 Max number of skipped tracks exceeded.',
164: 'Format \xe2\x80\x93 Failed to allocate spare sectors.',
165: 'Format \xe2\x80\x93 More LBAs than PBAs.',
166: 'Format \xe2\x80\x93 Failed to allocate spare sectors.',
167: 'Format \xe2\x80\x93 Failed to allocate spare sectors.',
168: 'Format \xe2\x80\x93 Failed to allocate spare sectors.',
169: 'Format \xe2\x80\x93 Excessive number of slips not supported by hardware.',
170: 'Invalid HW parity data for parity sector reallocation.',
171: 'Format \xe2\x80\x93 Could not allocate required guard/pad around media cache area on disc',
172: 'Format \xe2\x80\x93 Will not be able to save ISP/MC metadata after the format (a mis-configuration problematic to try to address before this point)',
173: 'Format - Failed to allocate spare sectors.',
174: 'Format - Failed to allocate spare sectors.',
175: 'Format - Failed to allocate spare sectors.',
176: 'Format - Failed to allocate spare sectors.',
177: 'Format - Failed to update parity sectors slip list.',
178: 'Format - Invalid track sector range encountered.',
179: 'Format \xe2\x80\x93 Could not allocate required guard/pad around intermediate super parity area on disc',
180: 'Format \xe2\x80\x93 Media Cache starting DDT entry not found.',
193: 'Format - Attempt to add pad between user area and start/end of Distributed Media Cache failed',
'L1': 'Hardware Error',
'L2': 'DMC area padding failed'},
1: {0: 'Defect list update failure.',
23: 'Saving of the ASFT during idle time failed',
129: 'Plist file overflow error.',
130: 'PSFT file overflow error.',
131: 'Unable to write defect files.',
132: 'Unable to update operating parms file.',
133: 'Plist file overflow error.',
134: 'Plist file overflow error.',
'L1': 'Hardware Error',
'L2': 'Defect List Update Error'}},
53: {0: {8: 'LIP occurred during discovery.',
9: 'LIP occurred during an 8067 command.',
10: 'LIP occurred during an 8045 read.',
11: 'LIP occurred during an 8067 read.',
12: 'LIP occurred during an 8067 write.',
13: 'Parallel ESI deasserted during discovery.',
14: 'Parallel ESI deasserted during an 8067 command.',
15: 'Parallel ESI deasserted during an 8045 read.',
16: 'Parallel ESI deasserted during an 8067 read.',
17: 'Parallel ESI deasserted during an 8067 write.',
'L1': 'Hardware Error',
'L2': 'Unspecified Enclosure Services Failure'},
3: {2: 'Enclosure found but not ready \xe2\x80\x93 No Encl_Ack Negate.',
4: 'Read Data Transfer Enclosure Timeout.',
5: 'Write Data Transfer Enclosure Timeout.',
13: 'Read Data Transfer Bad Checksum.',
14: 'Write Data Transfer Enclosure Timeout.',
15: 'Read Data Transfer Enclosure Timeout.',
'L1': 'Hardware Error',
'L2': 'Enclosure Transfer Failure'},
4: {4: 'Read Data Transfer Refused by Enclosure.',
5: 'Write Data Transfer Refused by Enclosure.',
'L1': 'Hardware Error',
'L2': 'Enclosure Transfer Refused'}},
62: {3: {0: 'No Specific FRU code.',
1: 'Logical Unit Failed Self Test \xe2\x80\x93 TestWriteRead',
2: 'Logical Unit Failed Self Test \xe2\x80\x93 TestRandomRead',
3: 'Logical Unit Failed Self Test \xe2\x80\x93 ScanOuterDiameter',
4: 'Logical Unit Failed Self Test \xe2\x80\x93 ScanInnerDiameter',
5: 'Logical Unit Failed Self Test from media backend',
'L1': 'Hardware Error',
'L2': 'Logical Unit Failed Self Test'},
4: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'H2SAT Foreground Test Failure'}},
64: {0: {128: 'Format \xe2\x80\x93 Exceeded max number of track rewrites during certify retries.',
'L1': 'Hardware Error',
'L2': 'Miscellaneous Error'},
1: {0: 'Buffer memory parity error.',
1: 'Buffer FIFO parity error.',
2: 'IOEDC error.',
3: 'VBM parity error.',
'L1': 'Hardware Error',
'L2': 'DRAM Parity Error'},
145: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Cryptographic Hardware Power-On Self-Test Failure'},
146: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Cryptographic Algorithm Power-On Self-Test Failure'},
147: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Conditional Random Number Generation Self-Test Failure'},
148: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Hidden Root Key Error During Command Execution'},
149: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Entropy Power-On Self-Test Failure'},
150: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Conditional Entropy Self-Test Failure'},
151: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Boot Firmware SHA-256 Power-On Self-Test Failure'},
152: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Boot Firmware RSA Power-On Self-Test Failure'}},
66: {0: {0: 'Power-on or self-test failure.',
1: 'DST failure.',
2: 'SIM Spinning-up state transition failure.',
3: 'SIM Drive Initialization state transition failure.',
4: 'Read/write thread initialization failed.',
20: 'DIC exceeds the time limits consecutively over count limit',
'L1': 'Hardware Error',
'L2': 'Power-On or Self-Test Failure'},
10: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Port A failed loopback test'},
11: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Port B failed loopback test'}},
68: {0: {0: 'Internal target failure',
1: 'Self test buffer test failure.',
2: 'Write read test failure.',
3: 'Data sync timeout error.',
4: 'SSD Test access error (DST)',
5: 'Backend (SSD) DRAM failure (DST)',
6: 'Backend (SSD) SRAM failure (DST)',
7: 'Internal target failure META data test',
8: 'Internal target failure System Area Check',
9: 'wait for ACP to get ready for reset took too long',
10: 'wait for AMP to get ready for reset took too long',
11: 'FDBMotor Leakage detected.',
12: 'wait for I2C to be available took too long',
128: 'Write during preamp unsafe fault.',
129: 'Read write channel fault.',
130: 'Small form factor fault.',
131: 'Write during servo field fault.',
132: 'Media Manager\xe2\x80\x99s TPBA FIFO Counter error.',
133: 'Media Manager\xe2\x80\x99s TPBA FIFO Under-run error.',
134: 'Media Manager\xe2\x80\x99s DDT FIFO Counter error.',
135: 'Media Manager\xe2\x80\x99s DDT FIFO Under-run error.',
136: 'Media Manager\xe2\x80\x99s Parity error.',
137: 'Media Manager\xe2\x80\x99s TDT FIFO Under-run error.',
138: 'Media Manager\xe2\x80\x99s Skip Mask Under-run error.',
139: 'Get Temperature request resulted in invalid temperature.',
140: 'Detected unsupported H/W in a Set Voltage Margin request.',
141: 'Unused Error Code.',
142: 'SMART Initial buffer not ready',
143: 'Formatter EDAC correction memory parity error.',
144: 'NX \xe2\x80\x93 RLL1 error.',
145: 'Disc Buffer parity error.',
146: 'Sequencer encountered an EXE/SGATE overlap error.',
147: 'Formatter Correction Buffer underrun error.',
148: 'Formatter Correction Buffer overrun error.',
149: 'Formatted detected NRZ interface protocol error.',
150: 'Media Manager\xe2\x80\x99s MX Overrun error.',
151: 'Media Manager\xe2\x80\x99s NX Overrun error.',
152: 'Media Manager\xe2\x80\x99s TDT Request error.',
153: 'Media Manager\xe2\x80\x99s SST Overrun error.',
154: 'Servo PZT calibration failed.',
155: 'Fast I/O- Servo Data Update Timeout error.',
156: 'Fast I/O- First wedge Servo data Timeout error.',
157: 'Fast I/O- Max samples per collection exceeded.',
158: 'CR memory EDC error',
159: 'SP block detected an EDC error',
160: 'Preamp heater open/short fault.',
161: 'RW Channel fault- Memory buffer overflow or underflow or parity error during write.',
162: 'RW Channel fault- Memory buffer overflow or read data path FIFO underflow in legacy NRZ mode.',
163: 'RW Channel fault- Preamp fault during R/W.',
164: 'RW Channel fault- SGATE, RGATE, or WGATE overlap.',
165: 'RW Channel fault- Mismatch in split sector controls or sector size controls.',
166: 'RW Channel fault- Write clock or NRZ clock is not running.',
167: 'RW Channel fault- SGATE, RGATE, or WGATE asserted during calibration.',
168: 'RW Channel fault- RWBI changed during a read or write event.',
169: 'RW Channel fault- Mode overlap flag.',
170: 'RW Channel fault- Inappropriate WPLO or RPLO behavior.',
171: 'RW Channel fault- Write aborted.',
172: 'RW Channel fault- Bit count late.',
173: 'RW Channel fault- Servo overlap error',
174: 'RW Channel fault- Last data fault',
176: 'PES threshold in field is too far from the same value calculated in the factory.',
177: 'Not enough Harmonic Ratio samples were gathered',
178: 'Sigma of Harmonic Ratio samples after all discards exceeded the limit',
179: 'No EBMS contact fault, even at lowest threshold value',
180: 'EBMS fault still detected at highest threshold value',
181: 'Formatter detected BFI error.',
182: 'Formatter FIFO Interface error.',
183: 'Media sequencer- Disc sequencer Data transfer size mismatch.',
184: 'Correction buffer active while disc sequencer timeout error (this error code is used to fix the hardware skip mask read transfer issue).',
185: 'Seagate Iterative Decoder \xe2\x80\x93 Channel RSM fault',
186: 'Seagate Iterative Decoder \xe2\x80\x93 Channel WSM fault',
187: 'Seagate Iterative Decoder \xe2\x80\x93 Channel BCI fault',
188: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SRC fault',
189: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SAB fault',
190: 'Seagate Iterative Decoder \xe2\x80\x93 Channel read gate overflow error',
192: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB Bus B parity error',
193: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB buffer error on write',
194: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SOB buffer error on write',
195: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SOB parity error',
196: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SAB buffer error',
197: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SAB bend error',
198: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI buffer sync error',
199: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI data length error on write',
200: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI framing error on write',
201: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI write status error',
202: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI pipe state error (Bonanza), - Channel RSM Gross Error (Caribou- Luxor)',
203: 'Seagate Iterative Decoder \xe2\x80\x93 Channel decoder microcode error',
204: 'Seagate Iterative Decoder \xe2\x80\x93 Channel encoder microcode error',
205: 'Seagate Iterative Decoder \xe2\x80\x93 Channel NRZ parity error',
206: 'Seagate Iterative Decoder \xe2\x80\x93 Symbols per Sector mismatch error',
207: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB Bus A parity error',
208: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB NRZ parity error',
209: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SOB Buffer error on read',
210: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB Buffer error on read',
211: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI data length error on read',
212: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI framing error on read',
217: 'Seagate Iterative Decoder \xe2\x80\x93 Channel WSM Gross error',
218: 'Seagate Iterative Decoder \xe2\x80\x93 Channel ERF buffer error',
224: 'Preamp low voltage error',
225: 'Preamp low write data frequency at common point error',
226: 'Preamp write head open error',
227: 'Preamp write head shorted to ground error',
228: 'Preamp TA sensor open error',
229: 'Preamp temperature error',
230: 'Preamp write without heat error',
231: 'Preamp writer off in write error',
232: 'Preamp writer output buffer error',
233: 'Preamp low write data frequency at the head error',
234: 'Preamp FOS error',
235: 'Preamp TA or contact detect error',
236: 'Preamp SWOT error',
237: 'Preamp serial port communication error',
238: 'HSC magnitude overflow error',
240: 'RW Channel \xe2\x80\x93 RDATA valid overlap fault',
241: 'RW Channel \xe2\x80\x93 RD valid gap fault',
244: 'RW Channel \xe2\x80\x93 W Parity not ready',
247: 'RW Channel \xe2\x80\x93 Wrong sector length',
248: 'RW Channel \xe2\x80\x93 Encoder overflow error',
249: 'RW Channel \xe2\x80\x93 Encoder early termination fault',
250: 'RW Channel \xe2\x80\x93 Iteration parameter error',
251: 'RW Channel \xe2\x80\x93 MXP write fault',
252: 'RW Channel \xe2\x80\x93 Symbol count error',
253: 'RW Channel \xe2\x80\x93 RD Incomplete error',
254: 'RW Channel \xe2\x80\x93 RD Data VGA error',
255: 'RW Channel \xe2\x80\x93 RD Data TA error',
'L1': 'Hardware Error',
'L2': 'Internal Target Failure'},
1: {2: 'RW Channel \xe2\x80\x93 RFM Wrong sector length',
3: 'RW Channel \xe2\x80\x93 RFM FIFO underflow',
4: 'RW Channel \xe2\x80\x93 RFM FIFO Overflow',
5: 'RW Channel \xe2\x80\x93 Vector flow errors',
32: 'HSC - An error occurred when attempting to open the file to be used for Harmonic Sensor Circuitry data collection.',
33: 'HSC - The Standard Deviation of the VGAS data collected by the Harmonic Sensor Circuitry was zero.',
34: 'HSC - The Standard Deviation of the 3rd Harmonic data collected by the Harmonic Sensor Circuitry was zero.',
35: 'HSC - The Servo Loop Code returned at the completion of Harmonic Sensor Circuitry data collection was not 0.',
36: 'HSC - An invalid write pattern was specified Harmonic Sensor Circuitry data collection.',
37: 'AR Sensor - The AR Sensor DAC to Target calculation encountered the need to take the square root of a negative value.',
38: 'AR Sensor - The AR Sensor encountered an error when attempting to open the Background Task file.',
39: 'AR Sensor - The AR Sensor encountered an error when attempting to open the General Purpose Task file.',
40: "AR Sensor - The size of the Background Task file is inadequate to satisfy the AR Sensor's requirements.",
41: "AR Sensor - The size of the General Purpose Task file is inadequate to satisfy the AR Sensor's requirements.",
42: 'AR Sensor - The FAFH Parameter File revision is incompatible with the AR Sensor.',
43: 'AR Sensor - The AR Sensor Descriptor in the FAFH Parameter File is invalid.',
44: 'AR Sensor - The Iterative Call Index specified when invoking the AR Sensor exceeds the maximum supported value.',
45: 'AR Sensor - The AR Sensor encountered an error when performing a Track Position request.',
46: 'AR Sensor - The Servo Data Sample Count specified when invoking the AR Sensor exceeds the maximum supported value.',
47: 'AR Sensor - The AR Sensor encountered an error when attempting to set the read channel frequency.',
48: 'AR Sensor - The 3rd Harmonic value measured by the AR Sensor was 0.',
96: 'RW Channel - LOSSLOCKR fault',
97: 'RW Channel - BLICNT fault',
98: 'RW Channel - LLI ABORT fault',
99: 'RW Channel - WG FILLR fault',
100: 'RW Channel - WG FILLW fault',
101: 'RW Channel - CHAN fault',
102: 'RW Channel - FRAG NUM fault',
103: 'RW Channel - WTG fault',
104: 'RW Channel - CTG fault',
105: 'RW Channel -\xc2\xa0NZRCLR fault',
106: 'RW Channel - \xc2\xa0Read synthesizer prechange fail fault',
107: 'RW Channel -\xc2\xa0Servo synthesizer prechange fail fault',
108: 'RW Channel - Servo Error detected prior to halting Calibration Processor',
109: 'RW Channel -\xc2\xa0Unable to Halt Calibration Processor',
110: 'RW Channel -\xc2\xa0ADC Calibrations already disabled',
111: 'RW Channel -\xc2\xa0Calibration Processor Registers have already been saved',
112: 'RW Channel -\xc2\xa0Address where Calibration Processor Registers are to be saved is invalid',
113: 'RW Channel -\xc2\xa0Array for saving Calibration Processor Register values is too small',
114: 'RW Channel -\xc2\xa0Calibration Processor Register values to be used for AR are invalid',
115: 'RW Channel -\xc2\xa0Synchronous abort complete fault',
116: 'RW Channel -\xc2\xa0Preamble length fault',
117: 'RW Channel -\xc2\xa0TA or media defect event fault',
118: 'RW Channel -\xc2\xa0DPLL frequency overflow/underflow fault',
119: 'RW Channel -\xc2\xa0Zero gain threshold exceeded fault',
120: 'RW Channel -\xc2\xa0DPLL frequency deviation fault',
121: 'RW Channel -\xc2\xa0Extended EVGA overflow/underflow fault',
128: 'RW Channel -\xc2\xa0\xc2\xa0Read VGA gain fault',
129: 'RW Channel -\xc2\xa0Acquire Peak Amplitude flag fault',
130: 'RW Channel -\xc2\xa0Massive drop-out fault',
131: 'RW Channel -\xc2\xa0Low Quality sync mark fault',
132: 'RW Channel -\xc2\xa0NPLD load error fault',
133: 'RW Channel -\xc2\xa0Write path memory fault status bit fault',
134: 'RW Channel -\xc2\xa0WRPO disabled fault',
135: 'RW Channel -\xc2\xa0Preamble quality monitor fault',
136: 'RW Channel -\xc2\xa0Reset detection flag fault',
137: 'RW Channel -\xc2\xa0Packet write fault',
138: 'RW Channel -\xc2\xa0Gate command queue overflow fault',
139: 'RW Channel -\xc2\xa0Gate command queue underflow fault',
140: 'RW Channel -\xc2\xa0Ending write splice fault status fault',
141: 'RW Channel -\xc2\xa0Write-through gap servo collision fault',
142: 'RW Channel - Read Gate Fault',
143: 'Error reading the Preamp Gain register during an HSC operation',
144: 'Error writing the Preamp Gain register during an HSC operation',
145: 'RW Channel - Calibration Processor not halted',
146: 'RW Channel - Background Calibrations already stopped',
147: 'RW Channel -\xc2\xa0Background Calibrations not stopped',
148: 'RW Channel - Calibration Processor halt error',
149: 'RW Channel - Save AR Calibration Processor registers error',
150: 'RW Channel - Load AR Calibration Processor registers error',
151: 'RW Channel - Restore AR Calibration Processor registers error',
152: 'RW Channel - Write Markov Modulation Code Failure Type 0',
153: 'RW Channel - Write Markov Modulation Code Failure Type 1',
154: 'RW Channel - Write Markov Modulation Code Failure Type 2',
155: 'RW Formatter - NRZ Interface Parity Randomizer Nyquist Error',
156: 'RW Formatter - NRZ Interface Parity Randomizer Run Error',
157: 'RW Formatter - DLT Fifo Underrun Error',
158: 'RW Formatter - WDT Fifo Underrun Error',
159: 'RW Formatter - M2 MI error',
'L1': 'Hardware Error',
'L2': 'Internal Target Failure'},
224: {0: 'Failure writing firmware to disc.',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
225: {0: 'Failed to reinitialize the NVC Host',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
226: {0: 'Failed to erase the NVC Header',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
227: {0: 'Failed to write NVC client data to disc',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
228: {0: 'Failed to initialize the NVC header',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
229: {0: 'Failed to initialize the NVC Host',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
230: {0: 'Failed to write MCMT during initialization',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
231: {0: 'Failed to write the ISPT during format',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
232: {0: 'Failed to clear logs during format',
'L1': 'Hardware Error',
'L2': 'Writing to Disc Failed'},
242: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Data Integrity Check Failed on verify'},
246: {0: 'FRU 00 \xe2\x80\x93 09 stand for error on head 0 \xe2\x80\x93 9.',
16: 'Power-on self-test failed.',
'L1': 'Hardware Error',
'L2': 'Data Integrity Check Failed during write'},
251: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Failed to enter Raid Partial Copy Diagnostic mode'},
255: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'XOR CDB check error'}},
93: {0: {1: 'Number of Command Timeouts Exceeded',
'L1': 'Hardware Error',
'L2': 'Command Timeout'}},
101: {0: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Voltage fault.'}},
128: {134: {0: 'Host IOEDC Error on Read detected by host',
1: 'IOEDC error on read.',
2: 'IOECC error on read.',
3: 'FDE IOECC error on read.',
4: 'SSD IOEDC error on read',
5: 'SSD Erased Page Error',
6: 'FDE Sector-bypass datatype mismatch',
'L1': 'Hardware Error',
'L2': 'IOEDC - DataType Error on Read'},
135: {0: 'Host IOEDC Error on Write, this is unused',
1: 'FDE IOEDC Error on Write detected by the FDE logic',
2: 'SSD IOEDC Error on Write',
128: 'Disk IOEDC parity error on write detected by formatter',
129: 'IOECC and IOEDC errors occurred, which is highly probable (when IOECC is enabled) for multiple or single bit corruption.',
130: 'IOECC parity error on write.',
131: 'IOECC error (correctable).',
'L1': 'Hardware Error',
'L2': 'IOEDC Error on Write'},
136: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Host Parity Check Failed'},
137: {128: 'IOEDC parity error on read detected by formatter',
'L1': 'Hardware Error',
'L2': 'IOEDC error on read detected by formatter'},
138: {'L1': 'Hardware Error',
'L2': 'Host FIFO Parity Error detected by Common Buffer',
'fru': ['xx is 00, 01, 02 or 03 ( channel number )']},
139: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Host FIFO Parity Error detected by frame buffer logic'},
140: {0: 'No Specific FRU code.',
1: 'For Read Host Data Frame Buffer Parity Error.',
2: 'For Write Host Data Frame Buffer Parity Error.',
3: 'SSD Buffer Memory Parity Error',
4: 'Host Data Frame Buffer Uncorrectable ECC Error.',
'L1': 'Hardware Error',
'L2': 'Host Data Frame Buffer Uncorrectable ECC Error'},
141: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Host Data Frame Buffer Protection Error'},
142: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Host FIFO overrun or underrun rrror'},
143: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'Host FIFO unknown error'}},
129: {0: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'LA Check Error, LCM bit = 0'}},
130: {0: {0: 'No Specific FRU code.',
1: 'Insufficient return buffer.',
'L1': 'Hardware Error',
'L2': 'Diag internal client detected insufficient buffer'}},
131: {0: {0: 'No Specific FRU code.',
'L1': 'Hardware Error',
'L2': 'DOS Scalars are out of range'}}},
5: {26: {0: {0: 'Parameter list length error.',
1: 'Format Parameter list length error.',
2: 'Mode Select command Parameter list length error.',
3: 'Extended Mode select command Parameter list length error.',
4: 'Mode Select operation Parameter list length error.',
5: 'Check mode page Parameter list length error.',
6: 'Reassign Block command Parameter list length error.',
7: 'Parameter list length error.',
8: 'Parameter data list length error.',
'L1': 'Illegal Request',
'L2': 'Parameter List Length Error'}},
32: {0: {0: 'Invalid Command Operation Code',
1: 'Primary Invalid Command Operation Code',
2: 'Unique command not unlocked code.',
7: 'Glist to Plist Unlock command not unlocked code.',
8: 'Invalid Command Operation Code for SSD Backend',
'L1': 'Illegal Request',
'L2': 'Invalid Command Operation Code'},
243: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Invalid linked command operation code'}},
33: {0: {1: 'Logical block address out of range',
2: 'Invalid LBA in synchronize cache',
3: 'Invalid LBA in read capacity',
4: 'Invalid LB in write same',
5: 'Invalid LBA in read/write long',
6: 'Invalid LBA in seek',
7: 'Logical block address out of range from media backend',
'L1': 'Illegal Request',
'L2': 'Logical Block Address Out of Range'}},
36: {0: {0: 'Invalid field in CDB',
1: 'Bad CDB error.',
2: 'Invalid field in CDB. (Format command)',
3: 'Invalid field in CDB. (Setup R/W Long command)',
4: 'Invalid field in CDB. (Log sense page)',
5: 'Invalid field in CDB. (Log sense parameter)',
6: 'Invalid field in CDB. (Log select command)',
7: 'Invalid Field in CDB \xe2\x80\x93 UDS trigger command.',
8: 'Invalid Field in CDB \xe2\x80\x93 buffer overflow check.',
9: 'Invalid power transition request',
10: 'Invalid power transition mode bit',
11: 'Invalid power transition PCM',
12: 'Invalid page and subpage combination (Log sense)',
13: 'Invalid field in CDB. (Report Zones command- SMR)',
22: 'Invalid field in CDB. (Skip Mask)',
48: 'Invalid Combination of CDB.',
49: 'Change Definition Illegal Parameter.',
50: 'Change Definition Illegal Password',
51: 'Change Definition Unlock Command Error',
52: 'Change Definition Not Supported',
53: 'Change Definition Mismatch in port mode (Single/Dual port: SAS only)',
54: 'Invalid field in CDB from media backend',
'L1': 'Illegal Request',
'L2': 'Invalid Field in CDB'},
1: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Illegal Queue Type for CDB (Low priority commands must be SIMPLE queue)'},
46: {0: 'The Byte Offset exceeds the length of the SMART frame data.',
'L1': 'Illegal Request',
'L2': 'Invalid field in CDB for E6 SMART Dump command, unique to NetApp.'},
240: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Invalid LBA in linked command'},
242: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Invalid linked command operation code'},
243: {128: 'G->P operation requested while drive was formatted w/o PLIST.',
129: 'Servo Flaw already exists in ASFT or PSFT.',
130: 'G->P operation encountered a G-list entry that overlaps an existing P-list entry.',
131: 'G->P operation encountered a Growth Servo Flaw which overlapped an existing Primary defect Servo Flaw.',
132: 'Defects report lists not available for retrieval.',
133: "Servo Flaw doesn't exist in ASFT.",
'L1': 'Illegal Request',
'L2': 'Illegal Servo Flaw operation request'}},
37: {0: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Logical unit not supported'}},
38: {0: {0: 'Invalid Field in Parameter List.',
1: 'Invalid Field in Format Parameter List.',
2: 'Field in RetrieveThirdPartyID Parameter List.',
3: 'Invalid Field in ModeSelectOperation Parameter List.',
4: 'Invalid Field in CheckModePage Parameter List.',
5: 'Invalid Field in ReassignBlocksCmd Parameter List.',
6: 'Invalid Field in PersistentReserveOutCmd Parameter List.',
7: 'Invalid Field Invalid in LogSelectCmd Parameter List.',
8: 'Invalid LogSelectCmd Parameter List length.',
9: 'Invalid Field in WriteBufferCmd Parameter List.',
10: 'Invalid Field in SendDiagnosticCmd Parameter List.',
11: 'Invalid Field in BuildTranslateAddrPage Parameter List.',
12: 'An E0 packet to UDS was too small',
13: 'Invalid FCN ID in E0 packet to UDS.',
14: 'Invalid Field in Retrieved Trace Information packet to UDS (E0).',
15: 'Cannot clear UDS trace, because UDS was not allowed to return it all',
16: 'Cannot enable/disable UDS trace, drive not ready',
17: 'Unsupported block size.',
18: 'UDS trigger command.',
19: 'Invalid remanufacturing command.',
20: 'Invalid command while SMART reporting disabled.',
21: 'Invalid field in parameter list from media backend',
128: 'Invalid input cylinder.',
129: 'Invalid input head.',
130: 'Invalid input sector.',
131: 'Input user LBA is invalid 01.',
132: 'Input user LBA is invalid 02.',
133: 'Input user LBA is invalid 03.',
134: 'Input system LBA is invalid.',
135: 'Client defect list size is invalid.',
136: 'Sort error due to invalid offset.',
137: 'Sort error due to invalid head.',
138: 'Sort error due to invalid cylinder.',
139: 'Failed to validate a client specified byte extent info.',
140: 'Failed to validate a client specified sector extent info.',
141: 'Invalid track in client defect list entry.',
142: 'Input track is invalid.',
143: 'First LBA of input track is invalid.',
144: 'Invalid servo data block length.',
145: 'Invalid servo program block length.',
146: 'Address translation \xe2\x80\x93 input PBA is invalid',
147: 'Address translation \xe2\x80\x93 input symbol extent is invalid.',
148: 'Super sector transfer \xe2\x80\x93 invalid wedge transfer size.',
149: 'Track ZLR Transfer \xe2\x80\x93 Invalid partition.',
150: 'Track ZLR Transfer \xe2\x80\x93 Invalid LBA range on target track.',
151: 'Track ZLR Transfer \xe2\x80\x93 Reallocated LBA found on target track.',
152: 'Input user LBA is invalid 04.',
153: 'Input user LBA is invalid 05.',
154: 'Convert Sector to RLL Data \xe2\x80\x93 Unsupported sector size.',
155: 'Add Servo Flaw \xe2\x80\x93 Invalid input specified.',
156: 'Invalid condition for enabling servo free fall protection (drive not spinning).',
157: 'Invalid condition for disabling servo free fall protection (drive not spinning).',
158: 'Invalid condition for disabling servo free fall protection (protection already disabled).',
159: 'Invalid condition for disabling servo free fall protection (protection already de-activated).',
160: 'Invalid condition for disabling servo free fall protection (free-fall condition is currently active).',
161: 'Invalid drive free-fall control option specified.',
162: 'Check free-fall event failed \xe2\x80\x93 protection not functional.',
163: 'Invalid sector range specified.',
164: 'Invalid count value specified for update.',
165: 'Invalid channel memory select specified for access.',
166: 'Invalid buffer index specified for read channel memory access.',
167: 'Invalid start address specified for read channel memory access.',
168: 'Invalid transfer length specified for read channel memory access.',
169: 'Invalid sector extent info',
175: 'Band translation - invalid input type specified',
176: 'Band translation - invalid output type specified',
177: 'Band translation - invalid input Band ID',
178: 'Band translation - invalid input Band ID',
179: 'Band translation - invalid input track position',
180: 'Band translation - invalid input RAP zone, head',
185: 'Invalid band number.',
186: 'Invalid band lba offset.',
187: 'Invalid user lba.',
189: 'Invalid parameter',
193: 'DITS Buffer ( Dummy Cache ) too small',
'L1': 'Illegal Request',
'L2': 'DITS Buffer ( Dummy Cache ) too small'},
1: {0: 'No Specific FRU code.',
1: 'Log pages unavailable for inclusion in UDS dump.',
'L1': 'Illegal Request',
'L2': 'Parameter Not Supported'},
2: {0: 'No Specific FRU code.',
1: 'DIAG: Invalid input cylinder.',
2: 'DIAG: Invalid input head.',
3: 'DIAG: Invalid input sector.',
4: 'DIAG: Invalid Wedge.',
5: 'DIAG: Invalid LBA.',
6: 'DIAG: Invalid file selection.',
7: 'DIAG: Invalid file length.',
8: 'DIAG: Invalid start offset.',
9: 'DIAG: Write Overflow.',
10: 'DIAG: Backplane Bypass selection invalid.',
11: 'DIAG: Invalid serial number.',
12: 'DIAG: Incomplete DFB.',
13: 'DIAG: Unsupported DFB revision.',
14: 'DIAG: Invalid Temperature selection.',
15: 'DIAG: Invalid Transfer Length.',
16: 'DIAG: Unsupported memory area.',
17: 'DIAG: Invalid command.',
18: 'DIAG: File copy invalid.',
19: 'DIAG: Insufficient data sent from initiator.',
20: 'DIAG: Unsupported DIAG command.',
21: 'DIAG: Flash segment invalid.',
22: 'DIAG: Req flash segment copy invalid.',
23: 'DIAG: Flash access failed.',
24: 'DIAG: Flash segment length invalid.',
25: 'DIAG: File checksum invalid.',
26: 'DIAG: Host DFB Length Invalid',
27: 'DIAG: Unaligned transfer.',
28: 'DIAG: Unsupported operation.',
29: 'DIAG: Backend invalid.',
30: 'DIAG: Flash plane invalid.',
31: 'DIAG: ISP node not found.',
32: 'DIAG: Invalid parameter.',
33: 'DIAG: Format corrupt condition required.',
34: 'DIAG: Clear all scan unit counts not allowed',
35: 'DIAG:\xc2\xa0 Unsupported Flash Device',
36: 'DIAG:\xc2\xa0 Raw flash blocks in MList',
37: 'DIAG:\xc2\xa0 Raw flash format table mismatch',
38: 'DIAG:\xc2\xa0 Raw flash Unused format slot',
39: 'DIAG:\xc2\xa0 Raw flash cannot decide format table',
40: 'DIAG:\xc2\xa0 Raw flash invalid error code',
41: 'DIAG:\xc2\xa0 Write protect condition',
42: 'DIAG: Requested for a Pre-erased block in Nor flash',
57: 'Parameter Data out of range.',
58: 'Parameter Data over write.',
64: 'DIAG: Diag write failed',
65: 'DIAG: DIAG_DST_IS_IN_PROGRESS',
66: 'DIAG: DIAG_TEST_RANGE_IN_SET',
68: 'DIAG: DIAG_BMS_IS_ENABLED',
72: 'DIAG: DIAG_INVALID_START_LBA',
73: 'DIAG: DIAG_INVALID_END_LBA',
'L1': 'Illegal Request',
'L2': 'Parameter Value Invalid'},
3: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Threshold Parameter not supported'},
4: {0: 'Invalid Release of Active Persistent Reserve',
1: 'Invalid release of persistent reservation. (reservation type mismatch)',
'L1': 'Illegal Request',
'L2': 'Invalid Release of Active Persistent Reserve'},
5: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Fail to read valid log dump data'},
152: {0: 'No Specific FRU code.',
1: 'FDE checksum error.',
2: 'Failed Flash Verification on Newly Downloaded Component.',
'L1': 'Illegal Request',
'L2': 'Invalid Field Parameter \xe2\x80\x93 Check Sum'},
153: {1: 'Segment type mismatch.',
2: 'Customer ID mismatch.',
3: 'Drive type mismatch.',
4: 'HW configuration mismatch.',
5: 'Compatibility configuration mismatch.',
6: 'Servo firmware product family mismatch.',
7: 'QNR download is not supported.',
8: 'CAP product family mismatch.',
9: 'RAP product family mismatch.',
10: 'Download segment length too large.',
11: 'Download length invalid.',
12: 'CTPM missing.',
13: 'CFW and CAP mismatch 01.',
14: 'CFW and CAP mismatch 02.',
15: 'CFW and CAP mismatch 03.',
16: 'CFW and RAP mismatch 01.',
17: 'CFW and RAP mismatch 02.',
18: 'CFW and RAP mismatch 03.',
19: 'CFW and SAP mismatch 01.',
20: 'CFW and SAP mismatch 02.',
21: 'CFW and SAP mismatch 03.',
22: 'CFW and SFW mismatch 01.',
23: 'SAP Product family mistmatch',
24: 'CFW and SFW mismatch 03.',
25: 'Download buffer offset invalid.',
26: 'Address translation invalid.',
27: 'CFW and IAP mismatch.',
28: 'Quick Download in Progress.',
29: 'Invalid unlock tags \xe2\x80\x93 customer does not match',
30: 'Invalid unlock tags \xe2\x80\x93 customer does not match',
31: 'Invalid unlock tags \xe2\x80\x93 checksum failure',
32: 'Firmware not backward compatible.',
33: 'Download overlay incompatible.',
34: 'Overlay download failure 1.',
35: 'Overlay download failure 2.',
36: 'Overlay download failure 3.',
37: 'General download failure',
38: 'Trying to download bridge code for wrong product family',
39: 'Factory flags mismatch.',
40: 'Illegal combination \xe2\x80\x93 Missing BootFW module.',
41: 'Illegal combination \xe2\x80\x93 Missing Customer FW Feature Flags module.',
42: 'Illegal combination \xe2\x80\x93 Programmable Inquiry download not supported.',
43: 'Illegal combination \xe2\x80\x93 Missing CustomerFW module.',
44: 'Download Congen header failure',
46: 'Download Congen XML failure',
47: 'Download Congen version failure',
48: 'Download Congen XML SIM MakeLocalFile failure',
49: 'Download Congen mode data failure \xe2\x80\x93 could not save mode header.',
50: 'Download Congen mode data failure \xe2\x80\x93 mode page had sent length/spec length miscompare.',
51: 'Download Congen mode data failure \xe2\x80\x93 mode page had invalid contents.',
52: 'Download Congen mode data failure \xe2\x80\x93 mode page tried to change contents not allowed by change mask.',
53: 'Download Congen mode data failure \xe2\x80\x93 save all mode pages could not write to media.',
54: 'Download Congen mode data failure \xe2\x80\x93 save partial mode pages could not write to media.',
55: 'Download Congen mode data failure \xe2\x80\x93 mode change callbacks did not complete successfully.',
56: 'Package Enforcement Failure \xe2\x80\x93 Package didn\xe2\x80\x99t contain valid SFW component',
57: 'Invalid link rate',
59: 'Unlock code not allowed to be DL if dets is locked',
60: 'DETS is locked, code download is blocked',
61: 'Code download is blocked if DETS is locked',
62: 'Download is blocked due to system area incompatibility with new code',
63: 'Invalid SD&D customer family for customer cross-market-segment downloads.',
64: 'Unlock File failed to be written to the flash',
65: 'Unlock File secuirty headers do not match',
80: 'Download header length invalid',
81: 'Download length is not a multiple of the buffer word size',
82: 'Download length and segment length mismatch',
161: 'Unknown firmware tag type.',
162: 'Attempt to R/W locked LBA band',
163: 'SSD download combined code has mismatched frontend and backend',
164: 'SSD download backend code \xe2\x80\x93 recovery required',
165: 'SSD download a firmware which is mismatched with resident firmware',
166: 'SSD download standalone (non-bundle) firmware in boot mode.',
'L1': 'Illegal Request',
'L2': 'Invalid Field Parameter \xe2\x80\x93 Firmware Tag'},
154: {0: 'Invalid Security Field Parameter in secure download packaging.',
1: 'Attempt to perform secure download with drive not spun up',
2: 'Attempt to download signed non-fde firmware in use state or fail state',
3: 'Attempt to download signed sed code onto a non-sed drive.',
4: 'Download inner signature key index does not match the outer signature key index.',
16: 'Inner firmware signature validation failure.',
18: 'Power Governor feature requires that both CFW and SFW support same number of seek profiles. This sense code indicates an attempt to download a code with mismatching seek profiles count',
20: 'DOS Table Size has been reduced',
'L1': 'Illegal Request',
'L2': 'Invalid Field Parameter \xe2\x80\x93 Firmware Tag'},
155: {0: 'SSD download code mismatched with running frontend code FRU indicate running frontend compatibility number 00-FF',
'L1': 'Illegal Request',
'L2': 'SSD Compatibility Error'}},
44: {0: {0: 'Command Sequence Error.',
1: 'Command Sequence Error. (R/W Buffer command)',
2: 'Command Sequence Error. (Retrieve SDBPP Packet)',
3: 'Command Sequence Error. (Diag Locked)',
4: 'Command Sequence Error. (Concurrent UDS service attempt)',
5: 'Command Sequence Error. (UDS retrieval: back-to-back E0)',
6: 'Command Sequence Error. (Unexpected retrieve trace packet received during non-handshaked UDS retrieval)',
7: 'Command Sequence Error. (Back-to-back E1 commands, illegal during handshaked UDS retrieval and illegal during non-handshaked when it\xe2\x80\x99s time to retrieve the last trace packet)',
8: 'Command Sequence Error. (Send Diag cmd. Before Write Buffer cmd)',
9: 'Command Sequence Error. (Channel BCI logging in online mode)',
10: 'Stop command execution disallowed when Raid Rebuild mode is active/enabled',
11: 'Foreground H2SAT operation currently not allowed.',
'L1': 'Illegal Request',
'L2': 'Command Sequence Error'},
5: {0: 'No Specific FRU code.',
1: 'Power Management frozen (OBSOLETE)',
'L1': 'Illegal Request',
'L2': 'Illegal Power Condition Request'},
128: {0: 'Command Sequence Error. (Illegal to request MC flush while cleaning is disabled.)',
'L1': 'Illegal Request',
'L2': 'Command Sequence Error'}},
50: {1: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Defect List Update Error'}},
53: {1: {3: 'No enclosure found.',
7: 'Unsupported 8045 Enclosure Request.',
'L1': 'Illegal Request',
'L2': 'Unsupported Enclosure Function'}},
71: {6: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'SAS - Physical Test in Progress'}},
73: {0: {0: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Illegal request, Invalid message error'}},
85: {4: {1: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'PRKT table is full'}}},
6: {11: {1: {0: 'No Specific FRU code.',
1: 'Temperature is lower than the low temperature threshold.',
'L1': 'Unit Attention',
'L2': 'Warning \xe2\x80\x93 Specified temperature exceeded'}},
41: {0: {0: 'Power on, reset, or bus device reset occurred. (SPI flash LED)',
1: 'CDB trigger dump and reset occurred..',
2: 'LIP trigger dump and reset occurreD',
3: 'Performing some type of logout, either N_PORT, FCP, or both.',
202: 'The Flight Recorder area in FLASH contains data',
'L1': 'Unit Attention',
'L2': 'Power-On, Reset, or Bus Device Reset Occurred'},
1: {0: 'Power-on reset occurred. (SPI)',
1: 'Power-on reset occurred. (SSI)',
6: 'Power-on reset occurred when rezero with 0xEF in byte 1',
7: 'Power-on reset occurred due to HW controller watchdog expiration',
8: 'Power-on reset initiated by firmware (e.g. to remove lockup conditions)',
9: 'Power-on reset occurred when servo watchdog timer expires',
'L1': 'Unit Attention',
'L2': 'Power-On Reset Occurred'},
2: {0: 'SCSI bus reset occurred.',
2: 'Warm reset occurred.',
'L1': 'Unit Attention',
'L2': 'SCSI Bus Reset Occurred'},
3: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Bus Device Reset'},
4: {0: 'No Specific FRU code.',
1: 'Internal Reset due to Assert Storm Threshold being exceeded.',
3: 'NVC WCD has marked corrupted sector dirty.',
'L1': 'Unit Attention',
'L2': 'Internal Reset'},
5: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Transceiver Mode Changed to SE'},
6: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Transceiver Mode Changed to LVD'},
7: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'IT Nexus Loss'},
8: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Write Log Dump data to disk fail'},
9: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Write Log Dump Entry information fail'},
10: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Reserved disc space is full'},
11: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'SDBP test service contained an error, examine status packet(s) for details'},
12: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'SDBP incoming buffer overflow (incoming packet too big)'},
205: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Flashing LED occurred. (Cold reset)'},
206: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Flashing LED occurred. (Warm reset)'}},
42: {1: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Mode Parameters Changed'},
2: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Log Parameters Changed'},
3: {0: 'Reservations preempted.',
1: 'Reservations preempted. (Clear service action)',
'L1': 'Unit Attention',
'L2': 'Reservations Preempted'},
4: {0: 'Reservations released.',
1: 'Reservations released. (Registration with reg key = 0)',
2: 'Reservations Released. (Preempt service action)',
3: 'Reservations Released. (Release service action)',
'L1': 'Unit Attention',
'L2': 'Reservations Released'},
5: {0: 'Registrations preempted.',
1: 'Registrations preempted 01.',
'L1': 'Unit Attention',
'L2': 'Registrations Preempted'},
9: {0: 'Capacity data changed',
'L1': 'Unit Attention',
'L2': 'Capacity data changed'}},
47: {0: {0: 'No Specific FRU code.',
1: 'Target is already unlocked by another initiator',
'L1': 'Unit Attention',
'L2': 'Tagged Commands Cleared By another Initiator'},
1: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Commands cleared due to power-off warning'}},
63: {0: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Target operating conditions have changed'},
1: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Download Occurred'},
2: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Changed Operating Definition'},
3: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Inquiry Data Has Changed'},
5: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Device Identifier Changed'},
145: {1: 'WWN in ETFLOG does not match CAPM WWN.',
'L1': 'Unit Attention',
'L2': 'WWN in ETFLOG does not match CAPM WWN'}},
91: {0: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Log Exception'}},
92: {0: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'RPL Status Change'}},
93: {0: {0: 'No Specific FRU code.',
4: 'Reallocation.',
5: 'Reallocation AST table.',
6: 'Reallocation DDT table.',
16: 'Hardware failure.',
20: 'Excessive reassigns.',
32: 'General failure.',
40: 'Flash Life Left Failure',
49: 'Head failure.',
50: 'Recovered data error rate.',
51: 'Recovered data error rate during early life (Xiotech SSD Only)',
55: 'Recovered TA.',
56: 'Hard TA event.',
64: 'Head flip.',
65: 'SSE (servo seek error).',
66: 'Write fault.',
67: 'Seek failure.',
69: 'Track following errors (Hit66).',
74: 'Seek performance failure.',
91: 'Spinup failure.',
107: 'Flash spinup failure',
117: 'Multiply threshold config.',
239: 'No control table on disk.(OBSOLETE)',
'L1': 'Unit Attention',
'L2': 'Failure Prediction Threshold Exceeded'},
255: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'False Failure Prediction Threshold Exceeded'}},
128: {144: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Host Read Redundancy Check Error'},
145: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Host Write Redundancy Check Error'},
146: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Disc Read Redundancy Check Error'},
147: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Disc Write Redundancy Check Error'},
148: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Xor Redundancy Check Error'}},
180: {0: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'Unreported Deferred Errors have been logged on log page 34h'}},
255: {0: {0: 'No Specific FRU code.',
'L1': 'Unit Attention',
'L2': 'FC SEL_ID changed'}}},
7: {3: {0: {0: 'No Specific FRU code.',
'L1': 'Data Protect',
'L2': 'Peripheral device write fault'}},
32: {2: {0: 'No Access Rights. Attempt to access security locked LBA.',
1: 'No Access Rights. Sanitize command pre-condition not met.',
'L1': 'Data Protect',
'L2': 'Access Denied'}},
39: {0: {0: 'Write protected.',
1: 'Write protected during ready check.',
2: 'Write protected media backend',
3: 'Write protected due to PIC failure',
4: 'Write protected due to reserved blocks exceeded threshold',
5: 'Write protected due to defects per die exceeded threshold',
6: 'Write protected due to retired blocks exceeded threshold',
7: 'Write protected due to write failure (Nand error)',
8: 'Write Protected due to Auto status command write failure',
9: 'Write protected due to spare blocks exceeded threshold',
10: 'Write protected due to GCU for system data is not available',
11: 'Write protected due to defect table read error during restore',
12: 'Zone is read only ( SMR Only)',
13: 'Write protected due to defect block list overflow',
'L1': 'Data Protect',
'L2': 'Write Protected'}}},
9: {8: {0: {0: 'No Specific FRU code.',
'L1': 'Firmware Error Constants',
'L2': 'Logical Unit Communication Failure'}},
128: {0: {0: 'General firmware error.',
1: 'Firmware error during CDB check.',
2: 'Error recovering log tables.',
3: 'UDS has no packet to send back, but it neglected to make this clear to the SDBP layer (E1 command).',
4: 'Processed unsupported UDS session (UDS should have rejected).',
5: 'Packet retrieval allowed by upper levels of UDS when no UDS trace retrieval was active.',
6: 'UDS trace retrieval is trying to include an empty finished frame trace file.',
7: 'Unexpected content split across UDS retrieved trace packets.',
8: 'UDS trace retrieval sense data without FAIL status or vice versa.',
9: 'UDS trace retrieval: Internal confusion over amount of trace.',
10: 'UDS \xe2\x80\x93 retrieval failure.',
11: '\xe2\x80\x9cDummy Cache\xe2\x80\x9d file request failed',
12: 'Failed to fix up system parameters during head depop.',
13: 'Write same command call to XOR data copy failed.',
14: 'Write same command call to create cache segment from buffer failed to allocate sufficient space.',
15: 'Request to read servo data timed out.',
16: 'Loading Disc Firmware failed',
17: 'Disc Firmware Signature Verification Failed.',
18: 'WriteAndOrVerifyCmd Xor copy failure',
19: 'WriteAndOrVerifyCmd request cache segment allocation failed',
20: 'Write Buffer detected an Unknown Error.',
21: 'Write Buffer detected a Corrupted Data Error.',
22: 'Write Buffer detected a Permanent Error.',
23: 'Write Buffer detected a Service Delivery/Target Failure Error.',
24: 'Phy Log Retrieval Failed',
25: 'failed to issue command to auxiliary processor',
26: "failed memory allocation on auxiliary processor's heap",
27: 'Loading PIC Firmware Failed',
28: 'Loading FME Firmware Failed',
29: 'LDevFormat() failed to allocate sufficient space.',
30: 'LDevFormat() call to XorCopyData() failed',
45: 'InitSurface() failed to allocate sufficient space.',
46: 'InitSurface () call to XorCopyData() failed',
61: 'Log Page cache not allocated',
62: 'Log Page cache not allocated',
63: 'Log Page not enough cache available',
64: 'SMART Frame Index corrupted on disc and not recoverable via f/w.',
65: 'NVC Disabled by error condition',
66: 'Log data save to disc failed',
67: 'Wait for phy after reset too long',
68: 'H2SAT unexpected condition occurred',
70: 'Memory allocation failed during Reassign Blocks command',
74: 'Diag command attempted to execute missing or incompatible Overlay code',
75: 'Wait for phy after reset too long',
80: 'Flash management access failed',
128: 'Invalid prime request.',
129: 'Request cannot be processed.',
130: 'Unsupported fault.',
131: 'Track address fault.',
132: 'Servo-Disc synchronization error.',
133: 'End of transfer reached prematurely.',
134: 'Unexpected sequencer timeout error.',
135: 'Unknown error in the NRZ Transfer logic.',
136: 'Unknown EDAC error.',
137: "Unknown Media Manager's error.",
138: 'Invalid disc halt.',
139: 'Unexpected sequencer halt condition.',
140: 'Unexpected sequencer halt.',
141: 'Unknown sequencer timeout error.',
142: 'Unknown NRZ interface error.',
143: 'Disc was soft halted.',
144: 'Fault condition error.',
145: 'Correct Buffer Completion timeout error.',
146: 'Maximum write passes of a zone exceeded. (Changed to 04/1C00/93)',
147: 'Maximum certify passes of a zone exceeded. (Changed to 04/1C00/94)',
148: 'Recovered seek error encountered.',
149: 'Forced to enter error recovery before error is encountered.',
150: 'Recovered servo command error.',
151: 'Partial reallocation performed.',
152: 'Transfer was truncated.',
153: 'Transfer completed.',
154: 'Track transfer completed.',
155: 'Scan Defect - Allocated scan time exceeded.',
156: 'IOEDIOECC parity error on write',
157: 'IOECC parity error on write',
158: 'IOECC error (correctable)',
159: 'EDAC stopped for FW erasure',
160: 'Reallocate Block - Input was not marked for pending reallocation.',
161: 'Input LBA was not found in the RST.',
162: 'Input PBA was not found in the resident DST 1',
163: 'Input PBA was not found in the resident DST 2',
164: 'DST Mgr - Skootch failed 1',
165: 'DST Mgr - Skootch failed 2',
166: 'DST Mgr - Insert failed',
167: 'Correction Buffer over-run, under-run, or EDC error',
168: 'Form FIFO over/under run error',
169: 'Failed to transition to active power',
170: 'Input LBA was marked as logged',
171: 'Format - Max number of servo flaws per track exceeded in servo coast',
172: 'Format - Write servo unsafe errors when the track already has multiple flaws',
173: "Formatter's parity RAM progress is not in sync with transfer.",
174: 'Disc Xfr - Conflict of R/W request resource.',
175: 'Conflict of R/W resource during write attempt of super block data.',
176: "Formatter's parity RAM progress not in sync with alt transfer.",
177: "Formatter's parity RAM is invalid for parity sectors update.",
178: "Formatter's parity RAM is invalid for parity sectors alt-update.",
179: 'Parity secs read of expected reallocated sectors not reallocated.',
180: 'Parity sectors write of expected reallocated sectors not reallocated.',
181: 'PVT not showing all super blocks valid on successful format.',
182: 'Sector Data Regen - Restart of transfer is required.',
183: 'Sector Data Regen - Restart of transfer failed on a reallocated blk.',
184: 'Sector Data Regeneration - Restart of transfer failed.',
185: 'Format - Dirty super blk on nedia not reported in PVT.',
186: 'Super Block Read - No user sectors available.',
187: 'Full R/W reallocation code support is not available.',
188: 'Full R/W reallocation code support is not available.',
189: 'Full R/W reallocation code support is not available.',
190: 'Super Block Read - Recovered Data using SuperC Block.',
191: 'ATIC DERPR Retry - Recovered Data using DERP ATIC retry.',
192: 'Unexpected Servo Response - Retry count equals zero for a non-PZT request',
193: 'Recovered Data using Intermediate Super Parity',
194: 'Overlapping Defect Blocks',
195: 'Missing Defect Blocks',
196: 'Input LBA was not protected from torn write',
197: 'Formatter transfer did not halt properly',
198: 'Servo DC calibration failed',
199: 'Invalid band LBA range encountered during dirty super blocks update attempt',
200: 'Detect Formatter FIFO pointer synchronization loss error',
201: 'Detect Formatter FIFO pointer synchronization loss error',
202: 'Full reallocation support not available',
203: 'Invalid block for unmark DART pending reallocation',
204: 'Mark pending DART skipped',
205: 'Outercode recovery scratchpad buffer size insufficient',
206: 'Recovered data using firmware Iterative OuterCode(IOC)',
207: 'AFH Heater DAC is <= 0',
208: 'AFH Calculated Heater DAC value is >= max allocated memory for heater DAC',
209: 'AFH DAC value supplied to the DAC actuation path is > -b/2a',
210: 'ATS2 Seek Error occurred along with Track address fault error',
211: 'Buffer overflow detected in Legacy mode read.',
'L1': 'Firmware Error Constants',
'L2': 'General Firmware Error Qualifier'},
82: {'L1': 'Firmware Error Constants',
'L2': 'General Firmware Error Qualifier',
'fru': ['Error byte returned by PMC code for various DITS APIs']}}},
11: {0: {30: {0: 'Invoke within a TCG session',
'L1': 'Aborted Command',
'L2': 'Sanitize command aborted'}},
8: {0: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Logical unit communication failure'},
1: {0: 'Logical Unit Communication Time-Out.',
128: 'Servo command timed out.',
129: 'Seek operation timed out.',
130: 'Seek operation has exceeded the recovery time limit.',
'L1': 'Aborted Command',
'L2': 'Logical Unit Communication Time-Out'}},
12: {16: {0: 'Write command requires initial access to a mapped out head.',
1: 'Write command attempted a seek to access a mapped out head.',
2: 'Write command encountered an alternate block mapped to a bad head.',
'L1': 'Aborted Command',
'L2': 'Command aborted due to multiple write errors'}},
14: {1: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'SAS abort command (10.2.3)'},
2: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'SAS abort command (9.2.6.3.3.8.1)'}},
16: {1: {0: 'No specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Logical Block guard check failed'},
2: {0: 'No specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Logical Block application tag check failed'},
3: {0: 'No specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Logical Block reference tag check failed'}},
17: {3: {1: 'Read command requires initial access to a mapped out head.',
2: 'Read command attempted a seek to access a mapped out head.',
3: 'Read command encountered an alternate block mapped to a bad head.',
4: 'Prefetch command for FIM has detected a failed LBA in the range',
'L1': 'Aborted Command',
'L2': 'Command aborted due to multiple read errors'}},
63: {15: {0: 'Echo buffer overwritten.',
1: 'Read buffer echo error.',
'L1': 'Aborted Command',
'L2': 'Echo buffer overwritten'}},
67: {0: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Message reject error'}},
68: {0: {0: 'Timed out while waiting in queue.',
1: 'Timed out during error recovery.',
2: 'Timed out while executing command.',
'L1': 'Aborted Command',
'L2': 'Overall Command Timeout'},
246: {0: 'FRU 00 \xe2\x80\x93 09 stand for error on head 0 \xe2\x80\x93 9.',
'L1': 'Aborted Command',
'L2': 'Data Integrity Check Failed during write'}},
69: {0: {0: 'Select/Reselection Failure.',
1: 'Select/Reselection time out.',
'L1': 'Aborted Command',
'L2': 'Select/Reselection Failure'}},
71: {0: {0: 'SCSI Parity Error in message phase.',
1: 'SCSI parity error in command phase.',
3: 'SCSI parity error in data phase.',
8: 'SCSI CRC error in data phase.',
'L1': 'Aborted Command',
'L2': 'SCSI Parity Error'},
3: {1: 'SCSI CRC error in command IU.',
8: 'SCSI CRC error in data (out) IU.',
'L1': 'Aborted Command',
'L2': 'Information Unit CRC Error'},
128: {9: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Fibre Channel Sequence Error'}},
72: {0: {1: 'Initiator detected error message received, selection path.',
2: 'Initiator detected error message received, reselection path.',
'L1': 'Aborted Command',
'L2': 'Initiator Detected Error Message Received'}},
73: {0: {1: 'Invalid message received, selection path.',
2: 'Invalid message received, reselection path.',
'L1': 'Aborted Command',
'L2': 'Invalid message received'}},
75: {0: {0: 'No Specific FRU code.',
2: 'Invalid source ID.',
3: 'Invalid destination ID.',
4: 'Running Disparity error.',
5: 'Invalid CRC.',
16: 'Invalid data frame during transfer and no xfr done.',
'L1': 'Aborted Command',
'L2': 'DATA phase error'},
1: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Invalid transfer tag'},
2: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Too much write data'},
3: {0: 'No Specific FRU code.',
1: 'Link reset occurred during transfer.',
5: 'Break received in the middle of a data frame',
6: 'Break received but unbalanced ACK/NAKs',
'L1': 'Aborted Command',
'L2': 'ACK NAK Timeout'},
4: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'NAK received'},
5: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Data offset error'},
6: {0: 'No Specific FRU code.',
1: 'Break Response Timeout',
2: 'Done Response Timeout',
3: 'SAS Credit Timeout',
'L1': 'Aborted Command',
'L2': 'Initiator Response Timeout'},
32: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'SAS Credit Timeout'},
255: {0: 'Drive type mismatch \xe2\x80\x93 download firmware',
'L1': 'Aborted Command',
'L2': 'Check FW Tags'}},
78: {0: {0: 'No Specific FRU code.',
1: 'SAS - Overlapped Commands Attempted.',
2: 'UDS trigger on non-queued cmd with outstanding NCQ cmds',
'L1': 'Aborted Command',
'L2': 'Overlapped Commands Attempted'}},
85: {4: {0: 'Cannot reassign if Media cache is not empty',
'L1': 'Aborted Command',
'L2': 'Insufficient Resources'}},
116: {8: {5: 'No Specific FRU code.',
'L1': 'Illegal Request',
'L2': 'Invalid Field Parameter \xe2\x80\x93 Check Sum'}},
128: {0: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Logical Unit Access Not Authorized.'}},
129: {0: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'LA Check Error.'},
1: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Unexpected Boot FW execution delay.'},
2: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Unexpected Customer FW execution delay.'},
3: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Unexpected FW download delay.'}},
251: {1: {0: 'No Specific FRU code.',
'L1': 'Aborted Command',
'L2': 'Command maps to a head marked as bad'}}},
13: {33: {0: {0: 'No Specific FRU code.',
1: 'UDS Trace retrieval complete.',
'L1': 'Volume Overflow Constants',
'L2': 'Logical Block Address Out of Range'}}},
14: {29: {0: {0: 'Miscompare During Verify Operation.',
128: 'Data miscompare error.',
129: 'Data miscompare error at erasure correction.',
'L1': 'Data Miscompare',
'L2': 'Miscompare During Verify Operation'}}}}
| 82.481225 | 297 | 0.40913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81,906 | 0.490625 |
f6ce2226b79bcac9a02e30d6519e6b995825bb27 | 263 | py | Python | kuwala/common/python_utils/src/time_utils.py | bmahmoudyan/kuwala | 7951ed49ac1c31c874a4446bb4661152c4d69c90 | [
"Apache-2.0"
] | 381 | 2021-04-08T13:04:57.000Z | 2022-03-29T09:49:46.000Z | kuwala/common/python_utils/src/time_utils.py | bmahmoudyan/kuwala | 7951ed49ac1c31c874a4446bb4661152c4d69c90 | [
"Apache-2.0"
] | 92 | 2021-04-20T12:28:40.000Z | 2022-03-30T17:55:36.000Z | kuwala/common/python_utils/src/time_utils.py | bmahmoudyan/kuwala | 7951ed49ac1c31c874a4446bb4661152c4d69c90 | [
"Apache-2.0"
] | 27 | 2021-04-26T17:52:32.000Z | 2022-03-21T19:36:34.000Z | import time
from time import sleep
def print_elapsed_time(exit_event):
start_time = time.time()
while True:
if exit_event.is_set():
break
print(f'Running for {round(time.time() - start_time)} s', end='\r')
sleep(1)
| 18.785714 | 75 | 0.608365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.205323 |
f6cea815c5270561f528bdb9b5e87bb0ee900148 | 8,218 | py | Python | adapted/adapt_cli.py | GoodTown/IDeAS-ADAPT-Client-GUI | 3b124cc1f8a9f49d1e84a90e519f4927e43ac547 | [
"Apache-2.0"
] | null | null | null | adapted/adapt_cli.py | GoodTown/IDeAS-ADAPT-Client-GUI | 3b124cc1f8a9f49d1e84a90e519f4927e43ac547 | [
"Apache-2.0"
] | null | null | null | adapted/adapt_cli.py | GoodTown/IDeAS-ADAPT-Client-GUI | 3b124cc1f8a9f49d1e84a90e519f4927e43ac547 | [
"Apache-2.0"
] | null | null | null | """ Adapt Server
This module contains all the functionality necessary to
setup an Adapt Server node (not really).
Example
-------
python3 adapt_server.py -v ingest -s testfile.txt
"""
import filesystem
from filesystem import FSOperationError
import sys
import os
import argparse
import logging
from general import hash_entity
from user import User, Keystore
from asset import Asset
from blockchain import BCDB
import config as cfg
logging.basicConfig(format='%(message)s', level=logging.INFO)
LOG = logging.getLogger(__name__)
class AdaptServer():
"""A class to represent an Adapt Server node.
Attributes
----------
user_list : list
Holds list of users. (Will probably be removed)
Methods
-------
ingest(file_path, user):
Ingests a new file (or directory of files) into ADAPT.
retrieve(tid, user):
Retrieves a file from ADAPT.
commit(file_path, prev_tid, user):
Commits a modified version of a file already existing in ADAPT.
"""
def initKS(self, num_blocks=5000):
self.keystore = Keystore()
self.keystore.save()
print("keystore init")
def setDebug(self):
LOG.setLevel(logging.DEBUG)
filesystem.LOG.setLevel(logging.DEBUG)
def __init__(self, bc_address):
"""Constructs all necessary attributes for an AdaptServer object.
"""
self.user_list = []
self.keystore = None
self.fs = filesystem.UpssFs()
self.bdb = BCDB(bc_address)
def ingest(self, file_path, user):
"""Ingests a new file (or directory of files) into ADAPT
Parameters
----------
file_path : str
Path of file(or directory) to ingest
user : User
User performing the ingest
Returns
-------
txid : str
Transaction id if ingestion is successful
"""
file_path = os.path.abspath(file_path)
if not os.path.exists(file_path):
sys.exit("File path provided not valid.")
try:
self.fs.push(file_path)
except FSOperationError:
LOG.error("Ingest operation failed.")
sys.exit(1)
fhash = hash_entity(file_path)
LOG.debug(f"Hash for {file_path}:\n{fhash}\n")
filename = os.path.basename(file_path)
data = self.fs.get_data(filename)
LOG.debug(f"Blockname: {data['bname']}\n")
A = Asset(
user.public_key,
fhash,
'PUT',
tags={'ingest'},
bc_handle=self.bdb
)
A.push(user.private_key)
self.keystore.add(A.id, data['bpointer'], filename)
self.keystore.save()
return A
def retrieve(self, tid, user):
"""Retrieves a file from ADAPT
Parameters
----------
tid : str
Transaction id of requested file
user : User
User performing the retrieval
Returns
-------
txid : str
Transaction id if retrieval is successfull
"""
prev_asset = Asset.from_id(tid, self.bdb)
LOG.debug(f"RETRIEVE:\n{prev_asset}")
try:
(bpointer,filename) = self.keystore[tid]
LOG.debug(f"bpointer: {bpointer}")
except KeyError:
LOG.error("Key does not exist in keystore. Exiting now.")
sys.exit()
file_path = f"{filesystem.LOC}/{filename}"
try:
self.fs.pull(bpointer, file_path)
except FSOperationError:
LOG.error("Retrieve operation failed.")
sys.exit(1)
LOG.info(f"Copied {filename} from ADAPT-FS to ADAPT workspace")
local_file_hash = hash_entity(file_path)
# bc_file_hash = prev_asset.filehash
# Check if the file has been modified without it being recorded on the blockchain
# if local_file_hash != bc_file_hash:
# LOG.error(f"{file_path} has been modified or tampered with.")
LOG.debug(f"fhash: {local_file_hash}")
A = Asset(
user.public_key,
local_file_hash,
'GET',
parent=prev_asset.id,
tags={'retrieve'},
bc_handle=self.bdb
)
A.push(user.private_key)
return A
def commit(self, file_path, prev_tid, user):
"""Commits a modified version of a file already existing in ADAPT
Parameters
----------
file_path : str
Path of file to commit
prev_tid : str
Transaction id of file prior to modifications
user : User
User performing the ingest
Returns
-------
txid : str
Transaction id if commit is successfull
"""
file_path = os.path.abspath(file_path)
if not os.path.exists(file_path):
sys.exit("File path provided not valid.")
prev_asset = Asset.from_id(prev_tid, self.bdb)
LOG.debug(f"COMMIT: {prev_asset}")
try:
(bpointer,filename) = self.keystore[prev_tid]
LOG.debug(f"bpointer: {bpointer}")
except KeyError:
LOG.error("Key does not exist in keystore. Exiting now.")
sys.exit()
try:
self.fs.push(file_path, filename)
except FSOperationError:
LOG.error("Commit operation failed.")
sys.exit(1)
try:
data = self.fs.get_data(filename)
except FSOperationError:
LOG.error("Could not find information on given file.")
sys.exit(1)
LOG.debug(f"New Blockname: {data['bname']}")
newfhash = hash_entity(file_path)
LOG.debug(f"Blockpointer: {data['bpointer']}")
A = Asset(
user.public_key,
newfhash,
'PUT',
parent=prev_asset.id,
tags={'commit'},
bc_handle=self.bdb
)
A.push(user.private_key)
self.keystore.add(A.id, data['bpointer'], filename)
self.keystore.save()
return A
def main():
parser = argparse.ArgumentParser(description="Advanced Detection and Prevention of Tampering")
parser.add_argument('-v', '--verbose', help='increase output verbosity (DEV USE ONLY)', action="store_true")
subparser = parser.add_subparsers(dest='command')
ingest = subparser.add_parser('ingest', help="ingest a file into ADAPT")
retrieve = subparser.add_parser('retrieve', help="retrieve a file from ADAPT")
commit = subparser.add_parser('commit', help="commit a file into ADAPT")
init = subparser.add_parser('init', help="initialize ADAPT (only the filesystem)")
ingest.add_argument('-s', '--source', type=str, required=True, help="stuff")
retrieve.add_argument('-t', '--tid', type=str, required=True)
commit.add_argument('-s', '--source', type=str, required=True)
commit.add_argument('-t', '--tid', type=str, required=True)
init.add_argument('-n', '--num_blocks', type=int, required=False, default=5000, help="number of blocks")
args = parser.parse_args()
user = User("Foobar", "1234")
node = AdaptServer(cfg.node_addresses['dev'])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if args.verbose:
LOG.setLevel(logging.DEBUG)
filesystem.LOG.setLevel(logging.DEBUG)
if args.command == 'init':
node.keystore = Keystore()
node.keystore.save()
try:
node.fs.initialize(args.num_blocks)
except FSOperationError:
LOG.error("Filesystem Initialization Failed.")
sys.exit(1)
else:
node.keystore = Keystore.load()
if args.command == 'ingest':
tid = node.ingest(args.source, user)
LOG.info(f"TID: {tid}")
elif args.command == 'retrieve':
tid = node.retrieve(args.tid, user)
LOG.info(f"TID: {tid}")
elif args.command == 'commit':
tid = node.commit(args.source, args.tid, user)
LOG.info(f"TID: {tid}")
if __name__ == "__main__":
main()
| 27.577181 | 112 | 0.582015 | 5,633 | 0.685447 | 0 | 0 | 0 | 0 | 0 | 0 | 3,226 | 0.392553 |
f6cf6e5adafcafd146a24c56c7720f6b81509ba4 | 1,229 | py | Python | app/classes.py | lightness/EmploymentAgency | ab9e4d339621a00a3f20a9e0e5a645e445188df0 | [
"Unlicense"
] | 1 | 2019-06-13T14:38:54.000Z | 2019-06-13T14:38:54.000Z | app/classes.py | lightness/EmploymentAgency | ab9e4d339621a00a3f20a9e0e5a645e445188df0 | [
"Unlicense"
] | null | null | null | app/classes.py | lightness/EmploymentAgency | ab9e4d339621a00a3f20a9e0e5a645e445188df0 | [
"Unlicense"
] | null | null | null | from django.core.urlresolvers import reverse, reverse_lazy
ALERT_TYPES = ("alert-success", "alert-info", "alert-warning", "alert-danger",)
BUTTON_TYPES = ("btn-default", "btn-primary", "btn-success", "btn-warning", "btn-danger", "btn-info", "btn-link",)
DEFAULT_ALERT_TYPE = ALERT_TYPES[0]
DEFAULT_BUTTON_TYPE = BUTTON_TYPES[0]
class Alert(object):
alert_class = None
text = None
button_class = None
button_text = None
button_redirect_url = None
def __init__(self, text, **kwargs):
# text
self.text = text
# alert class
alert_class = kwargs.get('alert_class', DEFAULT_ALERT_TYPE)
if alert_class in ALERT_TYPES:
self.alert_class = alert_class
else:
self.alert_class = DEFAULT_ALERT_TYPE
# button class
button_class = kwargs.get('button_class', DEFAULT_BUTTON_TYPE)
if button_class in BUTTON_TYPES:
self.button_class = button_class
else:
self.button_class = DEFAULT_BUTTON_TYPE
# button text
self.button_text = kwargs.get('button_text', None)
# button redirect view
self.button_redirect_url = reverse(kwargs.get('button_redirect_url','Home')) | 35.114286 | 114 | 0.6607 | 897 | 0.729862 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.223759 |
f6cf8e1e4cf3c4ec76807ee6ba0e6caf90f8c85f | 316 | py | Python | classes/__init__.py | OmarThinks/MoRG | fecf78e15453b0efa9223cd5196fea8176cdfdf3 | [
"MIT"
] | null | null | null | classes/__init__.py | OmarThinks/MoRG | fecf78e15453b0efa9223cd5196fea8176cdfdf3 | [
"MIT"
] | null | null | null | classes/__init__.py | OmarThinks/MoRG | fecf78e15453b0efa9223cd5196fea8176cdfdf3 | [
"MIT"
] | null | null | null | """
try:
from .NotReceived import NotReceived
from .errors import *
from .classreader import *
from .checkpoint import Checkpoint
except Exception as e:
from NotReceived import NotReceived
from errors import *
from classreader import *
from checkpoint import Checkpoint
"""
"""
import sys
print(sys.path)""" | 19.75 | 37 | 0.759494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.993671 |
f6cfba446c1b7d27bb2983b109b1c4404b84a1da | 2,084 | py | Python | project_name/project_name/settings/dev.py | aexeagmbh/django-project-template | 123c4bd8b79320b460677d8df42895600ad99393 | [
"MIT"
] | null | null | null | project_name/project_name/settings/dev.py | aexeagmbh/django-project-template | 123c4bd8b79320b460677d8df42895600ad99393 | [
"MIT"
] | null | null | null | project_name/project_name/settings/dev.py | aexeagmbh/django-project-template | 123c4bd8b79320b460677d8df42895600ad99393 | [
"MIT"
] | null | null | null | # coding=utf-8
"""Development settings and globals."""
from .base import *
# ######### DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# ######### END DEBUG CONFIGURATION
# ######### EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# ######### END EMAIL CONFIGURATION
# ######### DATABASE CONFIGURATION
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '{{ project_name }}',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# ######### TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.cache.CacheDebugPanel',
# 'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.profiling.ProfilingDebugPanel',
]
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# boto (not use on dev):
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': '{}.show_toolbar'.format(__name__),
}
def show_toolbar(request):
return DEBUG
# ######### END TOOLBAR CONFIGURATION
BROKER_URL = 'amqp://guest@rabbitmq'
AMQP_HTTP_API_URL = 'rabbitmq:15672'
CELERY_RESULT_BACKEND = 'disabled'
ENVIRONMENT = 'dev'
try:
from .local_settings import *
except ImportError:
print('No local settings found')
| 26.717949 | 93 | 0.712092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,438 | 0.690019 |
f6d0cac352b221bf2f7870f056af97d65cf0f1e3 | 354 | py | Python | examples/unlock_antidotes.py | astitva22/Pixelate-22-Sample-Arena | 0bee7aea075d760e60456d38225bb3d6d1b68fed | [
"MIT"
] | 1 | 2022-03-01T20:39:25.000Z | 2022-03-01T20:39:25.000Z | examples/unlock_antidotes.py | astitva22/Pixelate-22-Sample-Arena | 0bee7aea075d760e60456d38225bb3d6d1b68fed | [
"MIT"
] | null | null | null | examples/unlock_antidotes.py | astitva22/Pixelate-22-Sample-Arena | 0bee7aea075d760e60456d38225bb3d6d1b68fed | [
"MIT"
] | 7 | 2022-03-01T20:37:14.000Z | 2022-03-09T06:27:38.000Z | import gym
import pixelate_arena
import time
import pybullet as p
import os
if __name__ == "__main__":
parent_path = os.path.dirname(os.getcwd())
os.chdir(parent_path)
env = gym.make("pixelate_arena-v0")
x=0
while True:
p.stepSimulation()
if x==10000:
env.unlock_antidotes()
x+=1
time.sleep(1) | 20.823529 | 46 | 0.629944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.081921 |
f6d1ef89f7ebaa279f2502614a3dee1d2c83c0b6 | 646 | py | Python | web_service/src/__init__.py | leckijakub/hipotap | 28fb20a165d0c50379828285efe18169e3ecce0f | [
"MIT"
] | null | null | null | web_service/src/__init__.py | leckijakub/hipotap | 28fb20a165d0c50379828285efe18169e3ecce0f | [
"MIT"
] | 4 | 2022-03-21T20:45:22.000Z | 2022-03-30T18:15:40.000Z | web_service/src/__init__.py | leckijakub/hipotap | 28fb20a165d0c50379828285efe18169e3ecce0f | [
"MIT"
] | null | null | null | from flask import Flask
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = "secret-key-goes-here"
# blueprint for auth routes in our app
from .blue_prints.auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
from .blue_prints.main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .blue_prints.offers import offers as offers_blueprint
app.register_blueprint(offers_blueprint)
from .blue_prints.orders import orders as orders_blueprint
app.register_blueprint(orders_blueprint)
return app
| 26.916667 | 62 | 0.756966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.168731 |
f6d2aef8cfd625f778e471dacf0dbcddb046931b | 3,005 | py | Python | BertLibrary/models/BertModel.py | PinkDraconian/Bert-as-a-Library | 04171b439b970e9c0f4384095f9275ae5520c177 | [
"MIT"
] | 13 | 2019-08-06T14:26:57.000Z | 2021-09-20T15:05:29.000Z | BertLibrary/models/BertModel.py | PinkDraconian/Bert-as-a-Library | 04171b439b970e9c0f4384095f9275ae5520c177 | [
"MIT"
] | 1 | 2020-03-10T17:04:40.000Z | 2021-01-12T13:53:41.000Z | BertLibrary/models/BertModel.py | PinkDraconian/Bert-as-a-Library | 04171b439b970e9c0f4384095f9275ae5520c177 | [
"MIT"
] | 2 | 2021-03-16T11:40:39.000Z | 2021-07-06T19:21:50.000Z | import os
import tensorflow as tf
from BertLibrary.bert_predictor import BertPredictor
from BertLibrary.bert_trainer import BertTrainer
from BertLibrary.bert_evaluator import BertEvaluator
from tensorflow.estimator import Estimator
from tensorflow.estimator import RunConfig
from BertLibrary.bert.run_classifier import *
import BertLibrary.bert.modeling as modeling
import BertLibrary.bert.tokenization as tokenization
class BertModel:
def __init__(self,
model_dir,
ckpt_name,
do_lower_case,
max_seq_len,
batch_size,
labels,
trainable=True,
keep_checkpoint_max=5,
config=None):
self.model_dir = model_dir
self.bert_config, self.vocab_file, \
self.init_checkpoint = self.get_model_configs(model_dir, ckpt_name)
self.do_lower_case = do_lower_case
self.max_seq_len = max_seq_len
self.batch_size = batch_size
self.processer = None
self.keep_checkpoint_max = keep_checkpoint_max
self.labels = labels
self.config = config if config else None
self.predictor = None
self.trainable = trainable
def build(self, model_fn_args, config_args):
config = self.get_config(**config_args)
model_fn = self.get_model_fn(**model_fn_args)
self.estimator = Estimator(
model_fn=model_fn,
config=config,
params={'batch_size': self.batch_size})
self.tokenizer = tokenization.FullTokenizer(
vocab_file=self.vocab_file, do_lower_case=self.do_lower_case)
def get_model_configs(self, base_dir, ckpt_name):
bert_config_file = os.path.join(base_dir, 'bert_config.json')
vocab_file = os.path.join(base_dir, 'vocab.txt')
init_checkpoint = os.path.join(base_dir, ckpt_name)
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
return bert_config, vocab_file, init_checkpoint
def get_config(self, ckpt_output_dir='./output', save_check_steps=1000):
if not self.config:
self.config = tf.ConfigProto(device_count={'GPU': 1})
self.config.gpu_options.allow_growth = True
self.config.gpu_options.per_process_gpu_memory_fraction = 0.5
run_config = RunConfig(
model_dir=ckpt_output_dir,
session_config=self.config,
keep_checkpoint_max=self.keep_checkpoint_max,
save_checkpoints_steps=save_check_steps)
return run_config
def get_predictor(self):
return BertPredictor(self.estimator, self.processer, self.config)
def get_trainer(self):
assert self.trainable, 'This model cannot be trained'
return BertTrainer(self)
def get_evaluator(self, iter_steps=1000):
return BertEvaluator(self, iter_steps=iter_steps)
def get_model_fn(self, *args):
return NotImplementedError() | 34.94186 | 79 | 0.674542 | 2,582 | 0.859235 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.028619 |
f6d33fa7bc783b74658c4fb6fc2bd1803a290e65 | 2,331 | py | Python | neuroscout/tasks/utils.py | effigies/BLiMP | 509192cd593e0a496a441b08ac0e2d48f9627d55 | [
"BSD-3-Clause"
] | null | null | null | neuroscout/tasks/utils.py | effigies/BLiMP | 509192cd593e0a496a441b08ac0e2d48f9627d55 | [
"BSD-3-Clause"
] | null | null | null | neuroscout/tasks/utils.py | effigies/BLiMP | 509192cd593e0a496a441b08ac0e2d48f9627d55 | [
"BSD-3-Clause"
] | null | null | null | """ utils """
import json
import tarfile
from ..utils.db import put_record, dump_pe
from ..models import Analysis, PredictorEvent
from ..schemas.analysis import AnalysisFullSchema, AnalysisResourcesSchema
def update_record(model, exception=None, **fields):
if exception is not None:
if 'traceback' in fields:
fields['traceback'] = f"{fields['traceback']}. \
Error:{str(exception)}"
if 'status' not in fields:
fields['status'] = 'FAILED'
put_record(fields, model)
return fields
def write_jsons(objects, base_dir):
""" Write JSON objects to file
Args:
objects (list of tuples) Pairs of JSON-objects and base file name
base_dir: Path-like directory to write to
Returns:
string path, base_name
"""
results = []
for obj, file_name in objects:
path = (base_dir / file_name).with_suffix('.json')
json.dump(obj, path.open('w'))
results.append((str(path), path.name))
return results
def write_tarball(paths, filename):
""" Write tarball of files in paths
Args:
paths (list): list of file paths to include
filename (str): full path name of tarball
"""
with tarfile.open(filename, "w:gz") as tar:
for path, arcname in paths:
tar.add(path, arcname=arcname)
def dump_analysis(analysis_id, run_id=None):
"""" Serialize analysis and related PredictorEvents to JSON.
Queries PredictorEvents to get all events for all runs and predictors. """
# Query for analysis
analysis = Analysis.query.filter_by(hash_id=analysis_id).one()
# Dump analysis JSON
analysis_json = AnalysisFullSchema().dump(analysis)[0]
resources_json = AnalysisResourcesSchema().dump(analysis)[0]
# Query and dump PredictorEvents
pred_ids = [p['id'] for p in analysis_json['predictors']]
all_runs = [r['id'] for r in analysis_json['runs']]
if run_id is None:
run_id = all_runs
if not set(run_id) <= set(all_runs):
raise ValueError("Incorrect run id specified")
pes = PredictorEvent.query.filter(
(PredictorEvent.predictor_id.in_(pred_ids)) &
(PredictorEvent.run_id.in_(run_id)))
return (analysis.id, analysis_json, resources_json, dump_pe(pes),
analysis.dataset.local_path)
| 31.931507 | 78 | 0.661948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 774 | 0.332046 |
f6d7d9fa5e972fea2d582789bda3bf36878133b2 | 3,673 | py | Python | test/priors/test_half_cauchy_prior.py | noamsgl/gpytorch | c3e3109c756ace47d82ff8b1f52b2d77748ff09c | [
"MIT"
] | 188 | 2017-06-09T20:42:18.000Z | 2018-02-15T11:17:09.000Z | test/priors/test_half_cauchy_prior.py | noamsgl/gpytorch | c3e3109c756ace47d82ff8b1f52b2d77748ff09c | [
"MIT"
] | 49 | 2017-07-18T02:55:17.000Z | 2018-02-15T21:23:42.000Z | test/priors/test_half_cauchy_prior.py | noamsgl/gpytorch | c3e3109c756ace47d82ff8b1f52b2d77748ff09c | [
"MIT"
] | 24 | 2017-07-12T17:29:52.000Z | 2018-02-15T19:25:07.000Z | #!/usr/bin/env python3
import unittest
import torch
from torch.distributions import HalfCauchy
from gpytorch.priors import HalfCauchyPrior
from gpytorch.test.utils import least_used_cuda_device
class TestHalfCauchyPrior(unittest.TestCase):
def test_half_cauchy_prior_to_gpu(self):
if torch.cuda.is_available():
prior = HalfCauchy(1.0).cuda()
self.assertEqual(prior.concentration.device.type, "cuda")
self.assertEqual(prior.rate.device.type, "cuda")
def test_half_cauchy_prior_validate_args(self):
with self.assertRaises(ValueError):
HalfCauchyPrior(-1, validate_args=True)
with self.assertRaises(ValueError):
HalfCauchyPrior(-1, validate_args=True)
def test_half_cauchy_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1)
dist = HalfCauchy(0.1)
t = torch.tensor(1.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([1.5, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([[1.0, 0.5], [3.0, 0.25]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
def test_half_cauchy_prior_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_gamma_prior_log_prob(cuda=True)
def test_half_cauchy_prior_log_prob_log_transform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1, transform=torch.exp)
dist = HalfCauchy(0.1)
t = torch.tensor(0.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([-1, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([[-1, 0.5], [0.1, -2.0]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
def test_half_cauchy_prior_log_prob_log_transform_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_half_cauchy_prior_log_prob_log_transform(cuda=True)
def test_half_cauchy_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1)
dist = HalfCauchy(0.1)
t = torch.ones(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.ones(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
scale = torch.tensor([0.1, 1.0], device=device)
prior = HalfCauchyPrior(scale)
dist = HalfCauchy(scale)
t = torch.ones(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.ones(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
with self.assertRaises(ValueError):
prior.log_prob(torch.ones(3, device=device))
with self.assertRaises(ValueError):
prior.log_prob(torch.ones(2, 3, device=device))
def test_half_cauchy_prior_batch_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_half_cauchy_prior_batch_log_prob(cuda=True)
if __name__ == "__main__":
unittest.main()
| 41.738636 | 84 | 0.665941 | 3,424 | 0.932208 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.020964 |
f6d888c56b6287070e718e5217a701c20c0ee095 | 2,303 | py | Python | Tools/decrypt_ulog.py | lgarciaos/Firmware | 26dba1407bd1fbc65c23870a22fed904afba6347 | [
"BSD-3-Clause"
] | 4,224 | 2015-01-02T11:51:02.000Z | 2020-10-27T23:42:28.000Z | Tools/decrypt_ulog.py | lgarciaos/Firmware | 26dba1407bd1fbc65c23870a22fed904afba6347 | [
"BSD-3-Clause"
] | 11,736 | 2015-01-01T11:59:16.000Z | 2020-10-28T17:13:38.000Z | Tools/decrypt_ulog.py | lgarciaos/Firmware | 26dba1407bd1fbc65c23870a22fed904afba6347 | [
"BSD-3-Clause"
] | 11,850 | 2015-01-02T14:54:47.000Z | 2020-10-28T16:42:47.000Z | #!/usr/bin/env python3
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Cipher import ChaCha20
from Crypto.Hash import SHA256
import binascii
import argparse
#from pathlib import Path
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""CLI tool to decrypt an ulog file\n""")
parser.add_argument("ulog_file", help=".ulog file", nargs='?', default=None)
parser.add_argument("ulog_key", help=".ulogk, encrypted key", nargs='?', default=None)
parser.add_argument("rsa_key", help=".pem format key for decrypting the ulog key", nargs='?', default=None)
args = parser.parse_args()
# Only generate a key pair, don't sign
if not args.ulog_file or not args.ulog_key or not args.rsa_key:
print('Need all arguments, the encrypted ulog file, the key and the key decryption key')
sys.exit(1);
# Read the private RSA key to decrypt the cahcha key
with open(args.rsa_key, 'rb') as f:
r = RSA.importKey(f.read(), passphrase='')
# Read the encrypted xchacha key and the nonce
with open(args.ulog_key, 'rb') as f:
ulog_key_header = f.read(22)
# Parse the header
try:
# magic
if not ulog_key_header.startswith(bytearray("ULogKey".encode())):
raise Exception()
# version
if ulog_key_header[7] != 1:
raise Exception()
# expected key exchange algorithm (RSA_OAEP)
if ulog_key_header[16] != 4:
raise Exception()
key_size = ulog_key_header[19] << 8 | ulog_key_header[18];
nonce_size = ulog_key_header[21] << 8 | ulog_key_header[20];
ulog_key_cipher = f.read(key_size)
nonce = f.read(nonce_size)
except:
print("Keyfile format error")
sys.exit(1);
# Decrypt the xchacha key
cipher_rsa = PKCS1_OAEP.new(r,SHA256)
ulog_key = cipher_rsa.decrypt(ulog_key_cipher)
#print(binascii.hexlify(ulog_key))
# Read and decrypt the .ulgc
cipher = ChaCha20.new(key=ulog_key, nonce=nonce)
with open(args.ulog_file, 'rb') as f:
with open(args.ulog_file.rstrip(args.ulog_file[-1]), 'wb') as out:
out.write(cipher.decrypt(f.read()))
| 36.555556 | 111 | 0.639601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.280938 |
f6dbbab722f0cfd5200a6eb72ad30ddd4c962c78 | 1,179 | py | Python | modules/math-codes/modules/algebra/linear-equations/src/test_slope.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | 1 | 2020-09-06T22:17:19.000Z | 2020-09-06T22:17:19.000Z | modules/math-codes/modules/algebra/linear-equations/src/test_slope.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | null | null | null | modules/math-codes/modules/algebra/linear-equations/src/test_slope.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import pandas as pd
df = pd.DataFrame ({'x': range(-10, 10+1)})
df['y'] = (3*df['x'] - 4) / 2
plt.figure(figsize=(10, 10))
plt.plot(df.x, df.y, color="grey", marker = "o")
plt.xlabel('x')
plt.ylabel('y = 3*x -4 / 2')
plt.title("Relação Linear entre as variáveis x e y = 3*x -4/2")
plt.xticks(range(-10, 10+1, 1))
plt.yticks(range(-20, 20+1, 1))
plt.axhline() # Adiciona uma linda na horizontal (h) - (eixo-x)
plt.axvline() # Adiciona uma linha na vertical (v) - (eixo-y)
plt.annotate('x-intercept',(1.333, 0)) # Adiciona um texto em uma coordenada (x, y) predefinida.
plt.annotate('y-intercept',(0,-2)) # Adiciona um texto em uma coordenada (x, y) predefinida.
m = 1.5 # Salva a inclinação (slope) que nós calculamos.
yInt = -2 # Pega a interceptação no eixo "y" - (Quando x é zero).
# Traçar a inclinação (slope) da interceptação em y para 1x
mx = [0, 1] # Vai começar no x = 0 e aumentar 1 unidade, ou seja, x = 1.
my = [yInt, yInt + m] # y vai começar na interceptação -2, quando x = 0, e vai incrementar inclinação m = 1,5.
plt.plot(mx, my, color='red', lw=5)
plt.grid()
plt.savefig('../images/plot-04.png', format='png')
plt.show()
| 39.3 | 110 | 0.65564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 688 | 0.573812 |
f6de464d223fc210c33e43316c1d71d4ff532268 | 190 | py | Python | website/message/admin.py | m3alamin/message-system | 44b50be1426236483ff14e2ea3ed76755ad81ea8 | [
"MIT"
] | 1 | 2018-07-23T10:51:20.000Z | 2018-07-23T10:51:20.000Z | website/message/admin.py | m3alamin/message-system | 44b50be1426236483ff14e2ea3ed76755ad81ea8 | [
"MIT"
] | null | null | null | website/message/admin.py | m3alamin/message-system | 44b50be1426236483ff14e2ea3ed76755ad81ea8 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Message, Reply, Reader
# Register your models here.
admin.site.register(Message)
admin.site.register(Reply)
admin.site.register(Reader)
| 23.75 | 42 | 0.805263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.147368 |
f6ded3c6b708c223c654cc059475981aefcf7444 | 2,570 | py | Python | Authors' code/Few_shot_learning/models/selector.py | onicolini/zero-shot_knowledge_transfer | 9dd6d08eadb8243881f0fb8e9ac2d5653dd25229 | [
"MIT"
] | null | null | null | Authors' code/Few_shot_learning/models/selector.py | onicolini/zero-shot_knowledge_transfer | 9dd6d08eadb8243881f0fb8e9ac2d5653dd25229 | [
"MIT"
] | null | null | null | Authors' code/Few_shot_learning/models/selector.py | onicolini/zero-shot_knowledge_transfer | 9dd6d08eadb8243881f0fb8e9ac2d5653dd25229 | [
"MIT"
] | 1 | 2019-10-27T15:44:17.000Z | 2019-10-27T15:44:17.000Z | from models.lenet import *
from models.wresnet import *
import os
def select_model(dataset,
model_name,
pretrained=False,
pretrained_models_path=None):
if dataset in ['SVHN', 'CIFAR10', 'CINIC10', 'CIFAR100']:
n_classes = 100 if dataset == 'CIFAR100' else 10
assert model_name in ['LeNet', 'WRN-16-1', 'WRN-16-2', 'WRN-40-1', 'WRN-40-2']
if model_name=='LeNet':
model = LeNet32(n_classes=n_classes)
elif model_name=='WRN-16-1':
model = WideResNet(depth=16, num_classes=n_classes, widen_factor=1, dropRate=0.0)
elif model_name=='WRN-16-2':
model = WideResNet(depth=16, num_classes=n_classes, widen_factor=2, dropRate=0.0)
elif model_name=='WRN-40-1':
model = WideResNet(depth=40, num_classes=n_classes, widen_factor=1, dropRate=0.0)
elif model_name=='WRN-40-2':
model = WideResNet(depth=40, num_classes=n_classes, widen_factor=2, dropRate=0.0)
if pretrained:
model_path = os.path.join(pretrained_models_path, dataset, model_name, "last.pth.tar")
print('Loading Model from {}'.format(model_path))
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
elif dataset=='ImageNet':
assert model_name in ['ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152']
if model_name == 'ResNet18':
model = resnet18(pretrained=pretrained)
elif model_name == 'ResNet34':
model = resnet34(pretrained=pretrained)
elif model_name == 'ResNet50':
model = resnet50(pretrained=pretrained)
elif model_name == 'ResNet101':
model = resnet101(pretrained=pretrained)
elif model_name == 'ResNet152':
model = resnet152(pretrained=pretrained)
else:
raise NotImplementedError
return model
if __name__ == '__main__':
import torch
from torchsummary import summary
import random
import time
random.seed(1234) # torch transforms use this seed
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
support_x_task = torch.autograd.Variable(torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1))
t0 = time.time()
model = select_model('CIFAR10', model_name='WRN-16-2')
output, act = model(support_x_task)
print("Time taken for forward pass: {} s".format(time.time() - t0))
print("\nOUTPUT SHAPE: ", output.shape)
summary(model, (3, 32, 32)) | 38.939394 | 98 | 0.633463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.163424 |
f6df43c8e47a8e2830e3c578575609c364fe0d3b | 4,818 | py | Python | navigator/auth/handlers.py | phenobarbital/navigator-api | 15a0336b570ec861bdcc9c225e6f1b5684900a9d | [
"Apache-2.0",
"BSD-3-Clause"
] | 10 | 2020-07-27T03:33:20.000Z | 2022-02-18T21:25:49.000Z | navigator/auth/handlers.py | phenobarbital/navigator-api | 15a0336b570ec861bdcc9c225e6f1b5684900a9d | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2020-09-07T15:20:54.000Z | 2021-05-28T00:56:45.000Z | navigator/auth/handlers.py | phenobarbital/navigator-api | 15a0336b570ec861bdcc9c225e6f1b5684900a9d | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-07-27T07:36:45.000Z | 2021-09-26T18:36:34.000Z | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
import asyncio
import base64
import json
import os
import sys
from aiohttp import web
from navigator.conf import (
DEBUG,
SESSION_PREFIX,
SESSION_URL,
SESSION_KEY,
config
)
from navigator.handlers import nav_exception_handler
from navigator.exceptions import (
NavException,
UserDoesntExists,
InvalidAuth
)
from navigator.auth.sessions import get_session, new_session
from navigator.views import BaseView, BaseHandler
from asyncdb.utils.encoders import BaseEncoder, DefaultEncoder
from navigator.auth.models import User
class UserHandler(BaseView):
async def session(self):
session = None
try:
session = await get_session(self.request)
except Exception as err:
print(err)
return self.critical(
request=self.request,
exception=err
)
return session
async def get(self):
""" Getting Session information."""
session = await self.session()
try:
if not session:
headers = {"x-status": "Empty", "x-message": "Invalid User Session"}
return self.no_content(headers=headers)
else:
try:
sessionid = session[SESSION_KEY]
except KeyError:
return self.error('Invalid Session, missing Session ID')
headers = {"x-status": "OK", "x-message": "Session OK"}
userdata = dict(session)
data = {
"session_id": sessionid,
**userdata
}
if data:
return self.json_response(
response=data,
headers=headers
)
except Exception as err:
return self.error(
self.request,
exception=err
)
async def delete(self):
""" Close and Delete User Session."""
session = await self.session()
try:
app = self.request.app
router = app.router
session.invalidate()
print(session)
except Exception as err:
print(err, err.__class__.__name__)
return self.critical(
request=self.request,
exception=err,
state=501
)
# return a redirect to LOGIN
return web.HTTPFound(router["login"].url_for())
async def put(self):
"""Re-login and re-authenticate..."""
class UserInfo(BaseHandler):
async def session(self, request):
session = None
try:
session = await get_session(request)
except Exception as err:
print(err)
return self.critical(
request=request,
exception=err
)
return session
async def profile(self, request):
session = await self.session(request)
print(session)
if not session:
headers = {"x-status": "Empty", "x-message": "Invalid User Session"}
return self.no_content(headers=headers)
else:
try:
sessionid = session['id']
except KeyError:
return self.error('Invalid Session, missing Session ID')
# getting User information
try:
user_id = request["user_id"]
except KeyError:
info = session[sessionid]
user_id = info['user_id']
try:
user = await User.get(user_id=user_id)
return web.Response(
text=user.json(ensure_ascii=True, indent=4),
status=200,
content_type="application/json"
)
except Exception as err:
print(err)
return self.critical(
request=request,
exception=err
)
async def logout(self, request):
""" Close and Delete User Session."""
session = await self.session(request)
try:
app = request.app
router = app.router
session.invalidate()
except Exception as err:
print(err, err.__class__.__name__)
response = {
"message": f"Exception on: {err.__class__.__name__}",
"error": str(err)
}
args = {
"status": 501,
"content_type": "application/json",
"text": json.dumps(response, cls=DefaultEncoder)
}
return web.Response(**args)
# return a redirect to LOGIN
# TODO: configure the return of LOGOUT
return web.HTTPFound('/')
| 29.558282 | 84 | 0.527397 | 4,210 | 0.873807 | 0 | 0 | 0 | 0 | 4,112 | 0.853466 | 687 | 0.14259 |
f6e068263f9613c699cad7addbef56e8d591ae52 | 868 | py | Python | images.py | krutika-bhalla/Web-Scraping | 01169cc2d031b6d0353a1ff87b6d19d26d370af8 | [
"Apache-2.0"
] | null | null | null | images.py | krutika-bhalla/Web-Scraping | 01169cc2d031b6d0353a1ff87b6d19d26d370af8 | [
"Apache-2.0"
] | null | null | null | images.py | krutika-bhalla/Web-Scraping | 01169cc2d031b6d0353a1ff87b6d19d26d370af8 | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
from PIL import Image
from io import BytesIO
import requests
import os
def start_search():
search = input("Enter Search Item: ")
params = {"q": search}
dir_name = search.replace(" ", "_").lower()
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
r = requests.get("http://www.bing.com/images/search", params=params)
soup = BeautifulSoup(r.text,"html.parser")
links = soup.findAll("a",{"class":"thumb"})
for items in links:
img_obj = requests.get(items.attrs["href"])
print("Getting: ", items.attrs["href"])
title = items.attrs["href"].split("/")[-1]
try:
img = Image.open(BytesIO(img_obj.content))
img.save("./" + dir_name + "/" + title, img.format)
except:
print("error")
start_search()
start_search() | 25.529412 | 72 | 0.599078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.162442 |
f6e0f69bab1cd6496a44232820ff0d043380ef43 | 12,822 | py | Python | repeat_cook_funcs.py | byu-imaal/dns-cookies-pam21 | a0e79a3538f2c32aa3f23d89b96ad1afe046d0f3 | [
"BSD-2-Clause"
] | null | null | null | repeat_cook_funcs.py | byu-imaal/dns-cookies-pam21 | a0e79a3538f2c32aa3f23d89b96ad1afe046d0f3 | [
"BSD-2-Clause"
] | null | null | null | repeat_cook_funcs.py | byu-imaal/dns-cookies-pam21 | a0e79a3538f2c32aa3f23d89b96ad1afe046d0f3 | [
"BSD-2-Clause"
] | null | null | null | """
Collection of functions for analyzing repeat cookie data
Designed to run a single function via CLI
"""
import argparse
import inspect
import json
import math
import subprocess
import sys
from collections import Counter
from collections import defaultdict
from shared.colors import color
from tqdm import tqdm
def scook_length(it):
lengths_cook = defaultdict(int)
lengths_ip = defaultdict(int)
for j in it:
local_lengths = set()
for q in j['queries']:
if q['scook'] is not None and len(q['scook']) > 0:
local_lengths.add(len(q['scook']))
lengths_cook[len(q['scook'])] += 1
for l in local_lengths:
lengths_ip[l] += 1
print(lengths_cook)
print(lengths_ip)
def partial_interop(it):
def is_interop(q):
return q['scook'].startswith('01000000') and q['tsdiff'] is not None
partial_users = defaultdict(set)
for j in it:
interop_count = 0
normal_count = 0
for q in j['queries']:
if q['scook'] is not None and len(q['scook']) > 0:
interop = is_interop(q)
interop_count += 1 if interop else 0
normal_count += 1 if not interop else 0
if interop_count > 1 and normal_count > 1:
partial_users[j['ip']] = set([q['scook'] for q in j['queries'] if q['scook'] is not None])
for ip, cooks in partial_users.items():
print(ip)
print(f'\t{" ".join(cooks)}')
print(f"#### {len(partial_users.keys())} ####")
def find_statics(it):
domains = defaultdict(list)
for j in it:
for q in j['queries']:
if q['scook'] is not None and len(q['scook']) > 0:
domains[j['ip'] + f" ({j['domain']})"].append(q['scook'])
num_static = 0
for d, cooks in domains.items():
if len(set(cooks)) / len(cooks) < 0.1 and len(cooks) > 10:
num_static += 1
print(f'{d} : {len(set(cooks))}/{len(cooks)}')
for c in set(cooks):
print(f'\t{c}')
print(f'#### {num_static} ####')
def print_non_bind_statics(it):
domains = defaultdict(list)
for j in it:
for q in j['queries']:
if q['scook'] is not None and len(q['scook']) > 0 and not q['isbind']:
domains[j['ip'] + f" ({j['domain']})"].append(q['scook'])
total = 0
for d, cooks in domains.items():
if len(cooks) > 20 and len(Counter(cooks)) == 1:
total += 1
print(d)
for c, num in Counter(cooks).items():
if num > 1:
print(f'\t{num}: {c}')
print('*' * 100)
print(f'#### {total} ####')
def print_nonces(it):
for j in it:
print(f"\n{'*' * 50}\n{j['domain']} - {j['ip']}")
for q in j['queries']:
if q['isbind']:
print(f'{q["scook"][:8]}\t', end='')
def nonce_bit_entropy(it):
def entropy(s):
""" Calculate entropy. Number of bits needed to represent a byte """
b = bytearray.fromhex(s)
freqs = [c / len(b) for c in Counter(b).values()]
return -sum(f * math.log2(f) for f in freqs)
data = defaultdict(str)
for j in it:
for q in j['queries']:
if q['isbind'] and not q['scook'].startswith("01000000"):
data[j['domain']] += q['scook'][:8]
for domain, combined_cooks in data.items():
ent = entropy(combined_cooks)
print(color(f'{domain} ({len(combined_cooks)}): {ent}',
fg=('red' if ent < 7 else 'white')))
def nonce_uniq(it):
data = defaultdict(list)
for j in it:
for q in j['queries']:
if q['isbind'] and not q['scook'].startswith("01000000"):
data[f"{j['domain']} {j['ip']}"].append(q['scook'][:8])
num_under_lim = 0
for domain, cook_list in data.items():
div = len(set(cook_list)) / len(cook_list)
if div < .9:
num_under_lim += 1
# print(domain.split()[1])
print(f'{domain} ({len(cook_list)}): {div:.4f}')
print('*' * 50)
print(f'Number printed: {num_under_lim} ({num_under_lim / len(data)})')
def ts_diff(it):
data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
data[j['domain']][j['ip']][q['tsdiff']] += 1
for domain, ips in data.items():
print(domain)
for ip, counts in ips.items():
print(color(f' {ip}: ', fg=('blue' if len(counts) >= 5 else None)), end='')
for diff, count in sorted(counts.items()):
c_func = lambda x: 'red' if math.fabs(x) > 60 else 'green' if math.fabs(x) < 3 else None
print(color(f'{diff}:{count}, ', fg=c_func(diff)), end='')
print()
def ts_stats(it):
ip_sets = defaultdict(set)
domain_sets = defaultdict(set)
data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
data[j['domain']][j['ip']][q['tsdiff']] += 1
for domain, ips in data.items():
domain_sets['total'].add(domain)
for ip, counts in ips.items():
ip_sets['total'].add(ip)
if len(counts) >= 10:
for d, c in sorted(counts.items()):
print(f'{d}:{c} ', end='')
print('\n' + ('#' * 50))
ip_sets['slow'].add(ip)
domain_sets['slow'].add(domain)
if not any([math.fabs(t) > 60 for t in counts.keys()]):
ip_sets['all_accurate'].add(ip)
if all([math.fabs(t) > 60 for t in counts.keys()]):
ip_sets['none_accurate'].add(ip)
if any([math.fabs(t) > 60 for t in counts.keys()]) and any([math.fabs(t) <= 5 for t in counts.keys()]):
ip_sets['some_off_in_ip'].add(ip)
domain_sets['some_off_in_ip'].add(ip)
if all([i in ip_sets['all_accurate'] for i in ips.keys()]):
domain_sets['all_accurate'].add(domain)
if not all([i in ip_sets['all_accurate'] for i in ips.keys()]) and any(
[i in ip_sets['all_accurate'] for i in ips.keys()]):
print(domain)
for i in ips.keys():
print(f'\t{i}: {i in ip_sets["all_accurate"]}')
domain_sets['some_off_ip'].add(domain)
ip_counts = {ip: len(s) for ip, s in ip_sets.items()}
domain_counts = {domain: len(s) for domain, s in domain_sets.items()}
print(f'ips: {ip_counts}')
print(f'domains: {domain_counts}')
print(domain_sets['some_off_ip'])
def _print_ts(ip: str, d: dict):
# d should be a mapping of method -> list of ts diffs
print('*' * 50)
print(ip)
for m, ts in d.items():
print(f'{m:>8} {len(set(ts)):>4}|', end='')
for i, t in enumerate(ts):
if i == 10:
print('|', end='')
print(f'{t:>8}', end='')
print()
def print_mixed_ip(it):
""" mix of accurate and out of sync """
data = defaultdict(lambda: defaultdict(list))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
data[j['ip']][q['method']].append(q['tsdiff'])
users = 0
for ip, d in data.items():
# more than 8 unique diffs for none/repeat but less than 3 for follow
# (ignore 1st since it doesn't use new cookie)
# also ensure we had at least 5 follow cookies
combined = set(d['follow'] + d['none'] + d['repeat'])
if len(combined) < 10 and any([math.fabs(t) > 60 for t in combined]) and any(
[math.fabs(t) <= 5 for t in combined]):
users += 1
_print_ts(ip, d)
print(f'mixed ips: {users}')
def print_slow_ts(it):
""" """
diff_data = defaultdict(lambda: defaultdict(list))
cookie_data = defaultdict(lambda: defaultdict(list))
recv_data = defaultdict(lambda: defaultdict(list))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
diff_data[j['ip']][q['method']].append(q['tsdiff'])
cookie_data[j['ip']][q['method']].append(q['tscook'])
recv_data[j['ip']][q['method']].append(q['tsrecv'])
users = 0
for ip, d in diff_data.items():
# more than 8 unique diffs for none/repeat but less than 3 for follow
# (ignore 1st since it doesn't use new cookie)
# also ensure we had at least 5 follow cookies
combined = set(d['follow'] + d['none'] + d['repeat'])
if len(combined) >= 10:
users += 1
print('*' * 50)
print(ip)
for m, ts in d.items():
print(f'{m:>8} {len(set(ts)):>4}|', end='')
for i, t in enumerate(ts):
if i == 10:
print('|', end='')
print(f'{str(cookie_data[ip][m][i])[-4:]},{t:<8}', end='')
print()
print(f'mixed ips: {users}')
def classify_hold_impl(it):
""" pattern where the cookies is semi-static during none/repeat, but live in follow """
data = defaultdict(lambda: defaultdict(list))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
data[j['ip']][q['method']].append(q['tsdiff'])
users = 0
for ip, d in data.items():
# more than 8 unique diffs for none/repeat but less than 3 for follow
# (ignore 1st since it doesn't use new cookie)
# also ensure we had at least 5 follow cookies
if (len(set(d['none'])) > 8 or len(set(d['repeat'])) > 8) and \
len(set(d['follow'][1:])) < 3 and len(d['follow']) > 5:
users += 1
_print_ts(ip, d)
print(f'users: {users}')
def classify_opposite_hold(it):
""" pattern where the cookies is live during none then static afterwards """
data = defaultdict(lambda: defaultdict(list))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
data[j['ip']][q['method']].append(q['tsdiff'])
users = 0
for ip, d in data.items():
if (len(set(d['follow'])) > 8 or len(set(d['repeat'])) > 8) and \
len(set(d['none'])) < 3 and len(d['none']) > 5:
users += 1
_print_ts(ip, d)
print(f'users: {users}')
def classify_no_hold(it):
""" """
data = defaultdict(lambda: defaultdict(list))
for j in it:
for q in j['queries']:
if q['tsdiff'] is not None:
data[j['ip']][q['method']].append(q['tsdiff'])
users = 0
for ip, d in data.items():
if len(set(d['follow'] + d['none'] + d['repeat'])) >= 10:
if not (len(set(d['follow'][1:])) <= 4 or len(set(d['none'])) <= 4 or len(set(d['repeat'])) <= 4):
if (d['follow'][:10] == sorted(d['follow'][:10], reverse=True) and
d['follow'][10:] == sorted(d['follow'][10:], reverse=True) and
d['repeat'][:10] == sorted(d['repeat'][:10], reverse=True) and
d['repeat'][10:] == sorted(d['repeat'][10:], reverse=True) and
d['none'][:10] == sorted(d['none'][:10], reverse=True) and
d['none'][10:] == sorted(d['none'][10:], reverse=True)) or \
(d['follow'][:10] == sorted(d['follow'][:10]) and
d['follow'][10:] == sorted(d['follow'][10:]) and
d['repeat'][:10] == sorted(d['repeat'][:10]) and
d['repeat'][10:] == sorted(d['repeat'][10:]) and
d['none'][:10] == sorted(d['none'][:10]) and
d['none'][10:] == sorted(d['none'][10:])):
users += 1
_print_ts(ip, d)
print(f'users: {users}')
if __name__ == "__main__":
possible_funcs = []
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isfunction(obj) and not name.startswith('_'):
possible_funcs.append(obj)
parser = argparse.ArgumentParser(description="")
parser.add_argument('input', help="Input file")
parser.add_argument('func', help="Function to run", choices=[f.__name__ for f in possible_funcs])
args = parser.parse_args()
lc = int(subprocess.run(args=['wc', '-l', args.input], check=True, encoding='utf-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.split()[0])
for f in possible_funcs:
if f.__name__ == args.func:
with open(args.input, 'r') as in_file:
f(tqdm(map(json.loads, in_file), total=lc))
| 36.016854 | 115 | 0.522461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,962 | 0.231009 |
f6e1f7647b60fef3a5175db89e79a4186e1503a5 | 816 | py | Python | data/count_office31.py | mo6zes/Reproducing-Deep-Fair-Clustering | 91f915436821eb05cdd021d3e9eb050a248fe993 | [
"Unlicense"
] | 4 | 2021-01-30T12:36:18.000Z | 2022-03-23T10:10:45.000Z | data/count_office31.py | mo6zes/Reproducing-Deep-Fair-Clustering | 91f915436821eb05cdd021d3e9eb050a248fe993 | [
"Unlicense"
] | 1 | 2021-11-05T09:16:36.000Z | 2021-11-05T15:27:25.000Z | data/count_office31.py | mo6zes/Reproducing-Deep-Fair-Clustering | 91f915436821eb05cdd021d3e9eb050a248fe993 | [
"Unlicense"
] | 1 | 2021-03-21T19:44:45.000Z | 2021-03-21T19:44:45.000Z | from office31 import office31
from office31 import download_and_extract_office31
from pathlib import Path
import os
#Hacky script to count how many images are in each folder/cluster in both sources
out_name = "./data/office31/office31_count.txt"
def count_items(src="amazon"):
file_open=open(out_name, "a")
label = 0
d = "./data/office31/"+src+"/images/"
count = {}
for path in os.listdir(d):
full_path = os.path.join(d, path)
for f in os.listdir(full_path):
if count.keys().__contains__(path):
count[path] += 1
else:
count[path] = 1
label +=1
file_open.write('\n'+src+'\n')
file_open.write(str(count)+'')
file_open=open(out_name, "w")
file_open.write('')
count_items("amazon")
count_items("webcam") | 30.222222 | 81 | 0.634804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.229167 |
f6e28a13700fedee22ada39a3fa8792554af7720 | 11,213 | py | Python | mm2s5/mm2s5.py | scottkirkwood/mm2s5 | 05160ead97b28a82e3d6f529672c8cd2067ff27c | [
"Apache-2.0"
] | null | null | null | mm2s5/mm2s5.py | scottkirkwood/mm2s5 | 05160ead97b28a82e3d6f529672c8cd2067ff27c | [
"Apache-2.0"
] | null | null | null | mm2s5/mm2s5.py | scottkirkwood/mm2s5 | 05160ead97b28a82e3d6f529672c8cd2067ff27c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: latin1 -*-
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Patched by Iceberg Luo to support FreeMind 0.9.x 2009
"""Convert a Memory Map File into an S5 presentation
If you create a mind map with FreeMind the title of the mind-map (center circle)
will be the title of the slide.
A top level node called "__meta__" can be used to set the metadata for
the presentation. The immediate children are keys and it's first child is a value.
title: Title of presentation, not needed since I get it from the top node
subtitle: Witty subtitle of the presentation
author: You probably want to change this.
company: And this
template: which subdirectory to use under the "ui" directory, 'default' is default
presdate: Date of the presentation
content_type: defaults to 'application/xhtml+xml; charset=utf-8'
header:
footer:
If the first character of the first line is a '<' then we won't add
the the <ul> list to the markup.
The icons can have special meaning:
The "Not OK" icon the slide will be skipped.
The "OK' icon will have no additional markup on the text (i.e. no <ul>)
The "Stop" icon will build the slide one line at a time.
The "Priority 1" icon will use an ordered list
"""
__author__ = 'scott@forusers.com (Scott Kirkwood)'
__version__ = '0.2.3'
import sys
import optparse
from xml.etree import ElementTree
import codecs
class Mm2S5:
def __init__(self):
self.et_in = None
self.meta = {
'title' : 'Title',
'subtitle': '',
'author' : """The Author.
You don't need to change this code.
Instead, you should write a __meta__ node in your mm file.
Refer to the docstring for details.""",
'company' : 'See above',
'template' : 'default',
'presdate' : 'Today',
'content_type' : 'application/xhtml+xml; charset=utf-8',
'header' : '',
'footer' : None,
'generator' : 'mm2s5.py',
}
def open(self, infilename):
""" Open the .mm file and create a S5 file as a list of lines """
infile = file(infilename).read()
self.et_in = self.xmlparse(infile)
lines = self.convert()
return lines
def write(self, outfilename, lines):
""" Write out the lines, written as a convenience function
Writing out the HTML in correct UTF-8 format is a little tricky."""
outfile = codecs.open(outfilename, 'w', 'utf-8')
outfile.write(u'\n'.join(lines))
outfile.close()
def xmlparse(self, text):
""" import the XML text into self.et_in """
return ElementTree.XML(text)
def convert(self):
""" Convert self.et_in to a HTML as a list of lines in S5 format """
self._grab_meta()
lines = []
lines.append("""<?xml version="1.0" encoding="UTF-8"?>""")
lines.append("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">""")
lines.append('<head>')
lines.append("""
<title>%(title)s</title>
<meta name="version" content="S5 1.1" />
<meta name="generator" content="%(generator)s" />
<meta name="presdate" content="%(presdate)s" />
<meta name="author" content="%(author)s" />
<meta name="company" content="%(company)s" />
<meta http-equiv="Content-type" content="%(content_type)s" />
<!-- S5 format see Eric A. Meyer, http://meyerweb.com/eric/tools/s5/ -->
<link rel="stylesheet" href="ui/%(template)s/slides.css" type="text/css"
media="projection" id="slideProj" />
<link rel="stylesheet" href="ui/%(template)s/outline.css" type="text/css" x
media="screen" id="outlineStyle" />
<link rel="stylesheet" href="ui/%(template)s/print.css" type="text/css"
media="print" id="slidePrint" />
<link rel="stylesheet" href="ui/%(template)s/opera.css" type="text/css"
media="projection" id="operaFix" />
<script src="ui/%(template)s/slides.js" type="text/javascript"></script>
""" % self.meta)
lines.append('</head>')
lines.append('<body>')
lines.append("""<div class="layout">
<div id="controls"><!-- DO NOT EDIT --></div>
<div id="currentSlide"><!-- DO NOT EDIT --></div>
<div id="header">%(header)s</div>
<div id="footer">%(footer)s</div>
</div>""" % self.meta)
lines.append('<div class="presentation">')
presentation = self.et_in.find('node')
lines.append(' <div class="slide">')
lines.append(' <h1>%s</h1>' % (self.meta['title']))
lines.append(' <h2>%s</h2>' % (self.meta['subtitle']))
lines.append(' <h3>%s</h3>' % (self.meta['author']))
lines.append(' <h4>%s</h4>' % (self.meta['company']))
lines.append(' </div>')
for page in presentation.findall('node'):
# Skip the __meta__ node, if any
if page.attrib['TEXT'] == '__meta__':
continue
attribs = self._get_list_attributes(page)
if 'skip' in attribs:
continue
lines.append(' <div class="slide">')
lines.append(' <h1>%s</h1>' % (page.attrib['TEXT']))
lines.append(' <div class="slidecontent">')
self._doList(lines, page, 0)
lines.append(' </div>') # content
lines.append(' </div>') # slide
lines.append('</div>') # Presentation
lines.append('</body>')
lines.append('</html>')
return lines
def _get_list_attributes(self, page):
""" If there's a special icon, return some attributes
Also, handle HTML markup a bit differently
"""
ret = {}
for icon in page.findall('icon'):
icon_type = icon.attrib['BUILTIN']
if icon_type == 'button_ok':
ret['no_ul'] = True
elif icon_type == "stop": # Stop light icon
ret['ul_class'] = "incremental"
elif icon_type == 'button_cancel':
ret['skip'] = True
elif icon_type == 'full-1':
ret['ol'] = True
# Special case, if the first node starts with <
# Then we'll assume markup and not do
# a <ul> etc.
node = page.find('node')
if node != None and \
(node.attrib['TEXT'].startswith('<') or
node.attrib['TEXT'] == '__table__'):
ret['no_ul'] = True
return ret
def _grab_meta(self):
""" Grab a "page" called __meta__, if any """
titles = self.et_in.find('node').attrib['TEXT'].split('\n')
self.meta['title'] = titles[0]
if len(titles) > 1:
self.meta['subtitle'] = titles[1]
for cur_node in self.et_in.getiterator('node'):
if cur_node.attrib.get('TEXT') == '__meta__': # Probably due to FreeMind 0.9, we might not have TEXT attribute
for sub_attrib in cur_node.findall('node'):
key = sub_attrib.attrib['TEXT']
sub_value = sub_attrib.find('node')
if sub_value:
value = sub_value.attrib['TEXT']
self.meta[key] = value
if self.meta['footer'] == None:
self.meta['footer'] = '<h1>%(company)s</h2><h2>%(title)s</h2>' % self.meta
def _doList(self, lines, sub, depth):
""" Recurse this list of items
Code is a little messier than I would like """
if sub == None or len(sub) == 0:
return
attribs = self._get_list_attributes(sub)
if 'ul_class' in attribs:
ul_class = ' class="%s"' % (attribs['ul_class'])
else:
ul_class = ''
indent = ' ' * (depth + 2)
if 'no_ul' not in attribs:
if 'ol' in attribs:
lines.append('%s<ol%s>' % (indent, ul_class,))
end = '%s</ol>' % (indent)
else:
lines.append('%s<ul%s>' % (indent, ul_class,))
end = '%s</ul>' % (indent)
else:
end = None
for line in sub.findall('node'):
text = line.attrib.get('TEXT') # Probably due to FreeMind 0.9, we might not have TEXT attribute
if not text: # FreeMind 0.9 's HTML node stores text in html format
p = (line
._children[0]#Element richcontent
._children[0]#Element html
._children[1]#Element body
._children[0])#Element p
if p.text:
text=p.text
elif p.tag == 'img':
text='<img src="%s">' % p.get('src')
if text == '__table__':
lines += self._insert_table(text, line, depth)
else:
lines += self._insert_line_item(text, line, depth, attribs)
self._doList(lines, line, depth + 1)
if end:
lines.append(end)
def _insert_line_item(self, text, line, depth, attribs):
""" Insert a line item <li></li> """
indent = ' ' * (depth + 3)
lines = []
if not text:
return lines
text = text.replace('<html>', '')
if 'LINK'in line.attrib:
text = '<a href="%s">%s</a>' % (line.attrib['LINK'], text)
if 'no_ul' not in attribs:
text = text.replace('\n', '<br/>\n')
lines.append('%s<li>%s</li>' % (indent, text))
else:
lines.append('%s' % (text))
return lines
def _insert_table(self, unused_text, line, depth):
""" If we get a special node called __table__ insert the children
as rows in a table (descendants are columns in that row) """
lines = []
indent = ' ' * (depth + 2)
table = line
lines.append('%s<table>' % (indent))
for row in table.findall('node'):
lines.append('%s <tr>' % (indent))
for col in row.getiterator('node'):
lines.append('%s <td>%s</td>' % (indent, col.attrib['TEXT']))
lines.append('%s </tr>' % (indent))
lines.append('%s</table>' % (indent))
return lines
def show_version():
print 'mm2s5 version %s.' % __version__
print 'Written by %s' % __author__
def parse_command_line():
usage = """%prog <mmfile> [<htmloutput>]
Create a FreeMind (.mm) document (see http://freemind.sourceforge.net/wiki/index.php/Main_Page)
the main node will be the title page and the lower nodes will be pages.
"""
parser = optparse.OptionParser(usage)
parser.add_option('-v', '--version', dest='version', action='store_true',
help='Show version information and exit.')
(options, args) = parser.parse_args()
if options.version:
show_version()
sys.exit(0)
if len(args) == 0:
parser.print_usage()
sys.exit(-1)
infile = args[0]
if not infile.endswith('.mm'):
print "Input file must end with '.mm'"
parser.print_usage()
sys.exit(-1)
if len(args) == 1:
outfile = infile.replace('.mm', '.html')
elif len(args) == 2:
outfile = args[1]
else:
parser.print_usage()
sys.exit(-1)
mm2s5 = Mm2S5()
lines = mm2s5.open(infile)
mm2s5.write(outfile, lines)
if __name__ == "__main__":
parse_command_line()
| 33.471642 | 116 | 0.605725 | 8,090 | 0.721484 | 0 | 0 | 0 | 0 | 0 | 0 | 6,064 | 0.540801 |
f6e2d912d61b57a1bc1a9fc88cfe7298ffc1b85b | 480 | py | Python | covid19sweden/__init__.py | martinbenes1996/covid19sweden | 100993e41c302ffd96fa39857dd8ddd21267f1ed | [
"MIT"
] | 1 | 2020-06-06T21:02:10.000Z | 2020-06-06T21:02:10.000Z | covid19sweden/__init__.py | martinbenes1996/covid19sweden | 100993e41c302ffd96fa39857dd8ddd21267f1ed | [
"MIT"
] | 1 | 2020-06-25T00:18:21.000Z | 2020-06-26T02:34:19.000Z | covid19sweden/__init__.py | martinbenes1996/covid19sweden | 100993e41c302ffd96fa39857dd8ddd21267f1ed | [
"MIT"
] | 1 | 2020-06-06T21:04:22.000Z | 2020-06-06T21:04:22.000Z | # -*- coding: utf-8 -*-
"""Webscraper for Swedish data.
Reference: https://www.scb.se/hitta-statistik/statistik-efter-amne/befolkning/befolkningens-sammansattning/befolkningsstatistik/pong/tabell-och-diagram/preliminar-statistik-over-doda/
Todo:
* caching
"""
import pkg_resources
from .main import *
from . import fohm
from .scb import *
from .backup import *
try:
__version__ = pkg_resources.get_distribution("covid19sweden").version
except:
__version__ = None | 24 | 183 | 0.75625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.58125 |
f6e364a4d8935e2990712e28b098a07099b88272 | 395 | py | Python | public/ViPER/modules/head.py | severnake/ViPER | e788f73bfe894f7fd03081782c87778de38a8df2 | [
"MIT"
] | 23 | 2016-11-26T06:55:44.000Z | 2021-07-27T19:28:05.000Z | public/ViPER/modules/head.py | severnake/ViPER | e788f73bfe894f7fd03081782c87778de38a8df2 | [
"MIT"
] | 2 | 2021-03-11T04:35:03.000Z | 2021-05-11T22:03:33.000Z | public/ViPER/modules/head.py | severnake/ViPER | e788f73bfe894f7fd03081782c87778de38a8df2 | [
"MIT"
] | 7 | 2017-08-12T10:44:41.000Z | 2022-03-22T05:49:49.000Z | import requests
from termcolor.termcolor import colored, cprint
class header:
"""
Class for extracting headers
"""
def __init__(self):
pass
def get_headers(self, target):
req = requests.head(target)
req = req.headers
for i in req.items():
cprint(i[0].ljust(60)+i[1].rjust(50),'blue')
| 23.235294 | 60 | 0.531646 | 328 | 0.83038 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.156962 |
f6e4193df3aa52bc4f5ce439f49c5d54acb0e849 | 1,190 | py | Python | src/airflow_actionproject/callables/action.py | actionprojecteu/airflow-actionproject | 6518e73e5709ebce927e0b463b5e36d80194491c | [
"MIT"
] | null | null | null | src/airflow_actionproject/callables/action.py | actionprojecteu/airflow-actionproject | 6518e73e5709ebce927e0b463b5e36d80194491c | [
"MIT"
] | null | null | null | src/airflow_actionproject/callables/action.py | actionprojecteu/airflow-actionproject | 6518e73e5709ebce927e0b463b5e36d80194491c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
# ---------------
# Airflow imports
# ---------------
#--------------
# local imports
# -------------
from airflow_actionproject.hooks.action import ActionDatabaseHook
# -----------------------
# Module global variables
# -----------------------
# ----------------
# Module constants
# ----------------
def check_number_of_entries(conn_id, start_date, n_entries, project, true_task_id, false_task_id, obs_type='observation'):
'''Callable to use with BranchPythonOperator'''
next_task = false_task_id
with ActionDatabaseHook(conn_id) as hook:
observations = list(
hook.download(
start_date = start_date,
end_date = '2999-12-31T23:59:59.99999Z', # far away,
n_entries = n_entries+1,
project = project,
obs_type = obs_type,
)
)
if len(observations) >= (n_entries+1):
next_task = true_task_id
return next_task
| 24.285714 | 122 | 0.512605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.540336 |
f6e5c99d7184da2fbbd07b8e09ae981f7738a398 | 5,838 | py | Python | stp_raet/test/test_communication.py | ArtObr/indy-plenum | c568eefb0042b3ec3aec84e9241cb1b5df419365 | [
"Apache-2.0"
] | null | null | null | stp_raet/test/test_communication.py | ArtObr/indy-plenum | c568eefb0042b3ec3aec84e9241cb1b5df419365 | [
"Apache-2.0"
] | null | null | null | stp_raet/test/test_communication.py | ArtObr/indy-plenum | c568eefb0042b3ec3aec84e9241cb1b5df419365 | [
"Apache-2.0"
] | null | null | null | from ioflo.base.consoling import getConsole
from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer
from raet.raeting import AutoMode, Acceptance
from raet.road.estating import RemoteEstate
from raet.road.stacking import RoadStack
from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote
from stp_core.common.log import getlogger
from stp_core.network.port_dispenser import genHa
logger = getlogger()
def testPromiscuousConnection(tdir):
alpha = RoadStack(name='alpha',
ha=genHa(),
auto=AutoMode.always,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
main=True,
auto=AutoMode.always,
basedirpath=tdir)
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testRaetPreSharedKeysPromiscous(tdir):
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = RoadStack(name='alpha',
ha=genHa(),
sigkey=alphaSigner.keyhex,
auto=AutoMode.always,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
main=True,
auto=AutoMode.always,
basedirpath=tdir)
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha,
verkey=betaSigner.verhex)
alpha.addRemote(betaRemote)
alpha.allow(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testRaetPreSharedKeysNonPromiscous(tdir):
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
alphaPrivateer = Privateer()
betaPrivateer = Privateer()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = RoadStack(name='alpha',
ha=genHa(),
sigkey=alphaSigner.keyhex,
prikey=alphaPrivateer.keyhex,
auto=AutoMode.never,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
prikey=betaPrivateer.keyhex,
main=True,
auto=AutoMode.never,
basedirpath=tdir)
alpha.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": betaSigner.verhex,
"pubhex": betaPrivateer.pubhex
}, "beta")
beta.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": alphaSigner.verhex,
"pubhex": alphaPrivateer.pubhex
}, "alpha")
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.allow(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testConnectionWithHaChanged(tdir):
console = getConsole()
console.reinit(verbosity=console.Wordage.verbose)
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
alphaPrivateer = Privateer()
betaPrivateer = Privateer()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = None
def setupAlpha(ha):
nonlocal alpha
alpha = RoadStack(name='alpha',
ha=ha,
sigkey=alphaSigner.keyhex,
prikey=alphaPrivateer.keyhex,
auto=AutoMode.never,
basedirpath=tdir)
alpha.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": betaSigner.verhex,
"pubhex": betaPrivateer.pubhex
}, "beta")
oldHa = genHa()
setupAlpha(oldHa)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
prikey=betaPrivateer.keyhex,
main=True,
auto=AutoMode.never,
basedirpath=tdir, mutable=True)
beta.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": alphaSigner.verhex,
"pubhex": alphaPrivateer.pubhex
}, "alpha")
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
logger.debug("beta knows alpha as {}".
format(getRemote(beta, "alpha").ha))
cleanup(alpha)
newHa = genHa()
logger.debug("alpha changing ha to {}".format(newHa))
setupAlpha(newHa)
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
logger.debug("beta knows alpha as {}".
format(getRemote(beta, "alpha").ha))
finally:
cleanup(alpha, beta)
| 29.484848 | 73 | 0.577595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.066461 |
f6e6fadb85ba7a38ec23f0b2f12e5f4cc663ece0 | 14,393 | py | Python | RedYoshiBot/server/CTGP7ServerDatabase.py | mariohackandglitch/RedYoshiBot | 436d4d22b75673f1b10e1ac478e797f8544e388d | [
"Apache-2.0"
] | 4 | 2018-03-12T17:02:57.000Z | 2021-03-26T18:44:21.000Z | RedYoshiBot/server/CTGP7ServerDatabase.py | mariohackandglitch/RedYoshiBot | 436d4d22b75673f1b10e1ac478e797f8544e388d | [
"Apache-2.0"
] | null | null | null | RedYoshiBot/server/CTGP7ServerDatabase.py | mariohackandglitch/RedYoshiBot | 436d4d22b75673f1b10e1ac478e797f8544e388d | [
"Apache-2.0"
] | null | null | null | import threading
import sqlite3
from enum import Enum
import time
import datetime
from ..CTGP7Defines import CTGP7Defines
current_time_min = lambda: int(round(time.time() / 60))
class ConsoleMessageType(Enum):
SINGLE_MESSAGE = 0
TIMED_MESSAGE = 1
SINGLE_KICKMESSAGE = 2
TIMED_KICKMESSAGE = 3
class CTGP7ServerDatabase:
def __init__(self):
self.isConn = False
self.conn = None
self.lock = threading.Lock()
self.kickCallback = None
def setKickLogCallback(self, callback):
self.kickCallback = callback
def connect(self):
if not self.isConn:
self.conn = sqlite3.connect('RedYoshiBot/server/data/data.sqlite', check_same_thread=False)
self.isConn = True
def disconnect(self):
if (self.isConn):
self.commit()
with self.lock:
self.isConn = False
self.conn.close()
self.conn = None
def commit(self):
if (self.isConn):
with self.lock:
self.conn.commit()
def set_database_config(self, field, value):
with self.lock:
c = self.conn.cursor()
c.execute("UPDATE config SET value = ? WHERE field = ?", (str(value), str(field)))
def get_database_config(self, field):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM config WHERE field = ?", (str(field),))
for row in rows:
return row[1]
def get_online_region(self):
return int(self.get_database_config("onlregion"))
def get_debugonline_region(self):
return int(self.get_database_config("onlregion")) + 2
def set_online_region(self, value):
self.set_database_config("onlregion", value)
def get_track_freq_split(self):
return int(self.get_database_config("trackfreqsplit"))
def set_track_freq_split(self, value):
self.set_database_config("trackfreqsplit", value)
def get_ctww_version(self):
return int(self.get_database_config("ctwwver"))
def set_ctww_version(self, value):
self.set_database_config("ctwwver", value)
def get_beta_version(self):
return int(self.get_database_config("betaver"))
def set_beta_version(self, value):
self.set_database_config("betaver", value)
def get_stats_dirty(self):
return int(self.get_database_config("stats_dirty")) == 1
def set_stats_dirty(self, isDirty):
self.set_database_config("stats_dirty", 1 if isDirty else 0)
def get_most_played_tracks(self, course_type, amount):
currsplit = self.get_track_freq_split()
with self.lock:
c = self.conn.cursor()
c2 = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_tracksfreq WHERE split = ? AND type = ? ORDER BY freq DESC", (int(currsplit), int(course_type)))
i = 0
ret = []
for row in rows:
if (i >= amount): break
prevValue = c2.execute("SELECT SUM(freq) FROM stats_tracksfreq WHERE id = ? AND split < ?", (str(row[0]), int(currsplit))).fetchone()[0]
ret.append([row[0], row[2], 0 if prevValue is None else prevValue])
i += 1
return ret
def increment_track_frequency(self, szsName, value):
currsplit = self.get_track_freq_split()
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_tracksfreq WHERE id = ? AND split = ?", (str(szsName),int(currsplit)))
for _ in rows:
c.execute("UPDATE stats_tracksfreq SET freq = freq + {} WHERE id = ? AND split = ?".format(str(int(value))), (str(szsName),int(currsplit)))
return
courseType = CTGP7Defines.getTypeFromSZS(szsName)
if (courseType != -1):
c.execute('INSERT INTO stats_tracksfreq VALUES (?,?,?,?)', (str(szsName), int(currsplit), int(value), int(courseType)))
def get_stats(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_general WHERE 1=1")
ret = {}
i = 0
names = [description[0] for description in rows.description]
for row in rows:
for val in row:
ret[names[i]] = val
i += 1
break
return ret
def increment_general_stats(self, param, value):
with self.lock:
c = self.conn.cursor()
c.execute("UPDATE stats_general SET {} = {} + {} WHERE 1=1".format(param, param, str(int(value))))
def fetch_stats_seqid(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_seqid WHERE cID = ?", (int(cID),))
for row in rows:
newSeqID = row[1] + 1
c.execute("UPDATE stats_seqid SET seqID = ? WHERE cID = ?", (int(newSeqID), int(cID)))
return newSeqID
c.execute('INSERT INTO stats_seqid VALUES (?,?)', (int(cID), int(1)))
return 1
def get_stats_seqid(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_seqid WHERE cID = ?", (int(cID),))
for row in rows:
return row[1]
return 0
def get_unique_console_count(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT COUNT(*) FROM stats_seqid")
for row in rows:
return row[0]
return 0
def delete_console_message(self, cID):
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM console_message WHERE cID = ?", (int(cID),))
def set_console_message(self, cID, messageType, message, amountMin=None, isSilent=False):
currTime = current_time_min() if amountMin is not None else None
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM console_message WHERE cID = ?", (int(cID),))
c.execute('INSERT INTO console_message VALUES (?,?,?,?,?)', (int(cID), str(message), int(messageType), currTime, amountMin))
if (self.kickCallback):
self.kickCallback(cID, messageType, message, amountMin, isSilent)
def get_console_message(self, cID, realConsoleID): # Real console ID is to keep track if cID is 0
ret = None
startTime = None
amountTime = None
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_message WHERE cID = ?", (int(cID),))
for row in rows:
messageText = row[1]
messageType = row[2]
startTime = row[3]
amountTime = row[4]
ret = [messageType, messageText, startTime, amountTime]
if (ret is not None):
if (ret[0] == ConsoleMessageType.SINGLE_KICKMESSAGE.value and self.get_console_is_admin(realConsoleID)):
ret[0] = ConsoleMessageType.SINGLE_MESSAGE.value
elif (ret[0] == ConsoleMessageType.TIMED_KICKMESSAGE.value and self.get_console_is_admin(realConsoleID)):
ret[0] = ConsoleMessageType.TIMED_MESSAGE.value
if ret[0] == ConsoleMessageType.SINGLE_MESSAGE.value or ret[0] == ConsoleMessageType.SINGLE_KICKMESSAGE.value:
self.delete_console_message(cID)
elif (startTime is not None and amountTime is not None and startTime + amountTime < current_time_min()):
self.delete_console_message(cID)
if (ret is None and cID != 0):
ret = self.get_console_message(0, realConsoleID)
return tuple(ret) if ret is not None else None
def set_console_is_verified(self, cID, isVerified):
wasVerified = self.get_console_is_verified(cID)
if (wasVerified == isVerified):
return
with self.lock:
c = self.conn.cursor()
if (isVerified):
c.execute('INSERT INTO verified_consoles VALUES (?)', (int(cID),))
else:
c.execute("DELETE FROM verified_consoles WHERE cID = ?", (int(cID),))
def get_console_is_verified(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM verified_consoles WHERE cID = ?", (int(cID),))
for row in rows:
return True
return False
def set_console_is_admin(self, cID, isAdmin):
wasAdmin = self.get_console_is_admin(cID)
if (wasAdmin == isAdmin):
return
with self.lock:
c = self.conn.cursor()
if (isAdmin):
c.execute('INSERT INTO admin_consoles VALUES (?)', (int(cID),))
else:
c.execute("DELETE FROM admin_consoles WHERE cID = ?", (int(cID),))
def get_console_is_admin(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM admin_consoles WHERE cID = ?", (int(cID),))
for row in rows:
return True
return False
def set_console_last_name(self, cID, lastName):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_name WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE console_name SET name = ? WHERE cID = ?", (str(lastName), int(cID)))
return
c.execute('INSERT INTO console_name VALUES (?,?)', (int(cID), str(lastName)))
def get_console_last_name(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_name WHERE cID = ?", (int(cID),))
for row in rows:
return str(row[1])
return "(Unknown)"
def set_console_vr(self, cID, vr):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE console_vr SET ctvr = ?, cdvr = ? WHERE cID = ?", (int(vr[0]), int(vr[1]), int(cID)))
return
c.execute('INSERT INTO console_vr VALUES (?,?,?)', (int(cID), int(vr[0]), int(vr[1])))
def get_console_vr(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr WHERE cID = ?", (int(cID),))
for row in rows:
return (row[1], row[2])
return (1000, 1000)
def get_unique_console_vr_count(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT COUNT(*) FROM console_vr")
for row in rows:
return row[0]
return 0
def get_most_users_vr(self, mode, amount):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr ORDER BY {} DESC".format("ctvr" if mode == 0 else "cdvr"))
i = 0
ret = []
for row in rows:
if (i >= amount): break
ret.append([row[0], row[1] if mode == 0 else row[2]])
i += 1
return ret
def increment_today_launches(self):
with self.lock:
now = datetime.datetime.utcnow().strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM launch_times WHERE date = ?", (now,))
for row in rows:
c.execute("UPDATE launch_times SET value = ? WHERE date = ?", (row[1] + 1, now))
return
c.execute('INSERT INTO launch_times VALUES (?,?)', (now, 1))
def get_daily_launches(self, date: datetime.datetime):
with self.lock:
d = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM launch_times WHERE date = ?", (d,))
for row in rows:
return row[1]
return 0
def increment_today_unique_consoles(self):
with self.lock:
now = datetime.datetime.utcnow().strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM new_launch_times WHERE date = ?", (now,))
for row in rows:
c.execute("UPDATE new_launch_times SET value = ? WHERE date = ?", (row[1] + 1, now))
return
c.execute('INSERT INTO new_launch_times VALUES (?,?)', (now, 1))
def get_daily_unique_consoles(self, date: datetime.datetime):
with self.lock:
d = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM new_launch_times WHERE date = ?", (d,))
for row in rows:
return row[1]
return 0
def set_discord_link_console(self, discordID, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE discord_link SET discordID = ? WHERE cID = ?", (int(discordID), int(cID)))
return
c.execute('INSERT INTO discord_link VALUES (?,?)', (int(cID), int(discordID)))
def get_discord_link_console(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE cID = ?", (int(cID),))
for row in rows:
return row[1]
return None
def get_discord_link_user(self, discordID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE discordID = ?", (int(discordID),))
for row in rows:
return row[0]
return None
def delete_discord_link_console(self, cID):
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM discord_link WHERE cID = ?", (int(cID),)) | 39.111413 | 155 | 0.556868 | 14,210 | 0.987285 | 0 | 0 | 0 | 0 | 0 | 0 | 2,462 | 0.171055 |
f6e7d67ad18d6db3685d0903fe664237cb666d24 | 299 | py | Python | autographql/apps.py | ehsu0407/django-autographql | 95147501b2e542ee2a4b7903546665fb84d5115e | [
"MIT"
] | 1 | 2021-11-20T03:52:51.000Z | 2021-11-20T03:52:51.000Z | autographql/apps.py | ehsu0407/django-autographql | 95147501b2e542ee2a4b7903546665fb84d5115e | [
"MIT"
] | null | null | null | autographql/apps.py | ehsu0407/django-autographql | 95147501b2e542ee2a4b7903546665fb84d5115e | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class AutographqlConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'autographql'
def ready(self):
import autographql.converters
import autographql.filters.converters
import autographql.monkeypatch
| 24.916667 | 56 | 0.735786 | 262 | 0.876254 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.147157 |
f6ea9490bd3489c0fea713bbe5332c6ac5c901a3 | 982 | py | Python | FD_wave_example.py | miaoziemm/Seismic_Forward_Engine | 163a589254389772820744452e406f947e29e085 | [
"MIT"
] | 1 | 2021-11-05T13:17:02.000Z | 2021-11-05T13:17:02.000Z | FD_wave_example.py | miaoziemm/Seismic_Forward_Engine | 163a589254389772820744452e406f947e29e085 | [
"MIT"
] | null | null | null | FD_wave_example.py | miaoziemm/Seismic_Forward_Engine | 163a589254389772820744452e406f947e29e085 | [
"MIT"
] | null | null | null | import taichi as ti
from FD_wave.wave_module_2d4d import wave
from FD_wave.receiver_module import receiver
import Visualization.SFE_visual as vis
ti.init(arch=ti.gpu)
frame = 1
c = ti.field(dtype=ti.f32, shape=(600,600))
c_s = ti.field(dtype=ti.f32, shape=(1000, 1000))
wave_cs = wave(300, 400, 600, 600, 10.0, 10.0, 1, 50, 1, 1e-3, 3, 20)
wave_cs.mod_default()
wave_cs.PML_cal()
gui = ti.GUI("wave", (600, 600))
receiver_cs = receiver('PIC', 1000, 1000)
receiver_cs.rec_init(600, 600)
gui_rec = ti.GUI("rec", (1000, 1000))
while frame < 1000:
wave_cs.wave_field_cal(frame)
receiver_cs.rec_gather(wave_cs.p, int(frame/1))
receiver_cs.rec_dynamic(wave_cs.dt, int(frame/1), 12.0)
vis.SFE_2mix_show(c, wave_cs.p, wave_cs.model_v)
vis.SFE_gray_show(c_s, receiver_cs.rec_value)
gui.set_image(c)
gui_rec.set_image(c_s)
gui.show()
gui_rec.show()
frame += 1
path='./data/rec_seis.txt'
receiver_cs.export(receiver_cs.rec_value, path)
| 23.380952 | 69 | 0.704684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.037678 |
f6eaf7e5bb35cf8e7466e792deece88534700ba4 | 3,425 | py | Python | ENGR 102.py | jemmypotter/Python | c8fc5c49c5289dc0598c93eef4cfbcb5701c2b32 | [
"bzip2-1.0.6"
] | null | null | null | ENGR 102.py | jemmypotter/Python | c8fc5c49c5289dc0598c93eef4cfbcb5701c2b32 | [
"bzip2-1.0.6"
] | null | null | null | ENGR 102.py | jemmypotter/Python | c8fc5c49c5289dc0598c93eef4cfbcb5701c2b32 | [
"bzip2-1.0.6"
] | null | null | null | critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, 'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'The Night Listener': 4.5, 'Superman Returns': 4.0, 'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
# a dictionary for movie critics and their ratings
print (critics['Lisa Rose'])#output is {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.5}
print (critics['Lisa Rose']['Lady in the Water']) #output is 2.5
from math import sqrt
#returns a distance based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
si={} #get the list of shared items
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1 #this means their ratings are identical
if len(si)==0:
return 0 #if they are no ratings than it will be count as 0
#sum_of_squares=sum([pow(prefs[person1][item]-prefs[person2][item],2)
def distance(dict,per1,per2):
shared_items={}
for item in dict[per1]: #an item is in the dict of person 1
if item in dict[per2]: #if same item is in the dict of person 2
shared_items[item]=1 #the value will be 1
if len(shared_items)==0:
return 0
inital_dis=sum([pow(dict[per1][item]-dict[per2][item],2)
for item in dict[per1] if item in dict[per2] ])
all_sum=sqrt(inital_dis)
return 1/(1+all_sum)
print (distance(critics,'Lisa Rose','Toby'))
print (distance(critics, 'Lisa Rose', 'Gene Seymour'))
# Perason correlation score
def sim_pearson(dict,pers1,pers2):
si={}
for item in dict[pers1]:#an item is in the dict for person 1
if item in dict[pers2]: #the item is also is in the dict for person 2
si[item]=1 #the value will be 1
n=len(si)
if n==0: #if there is no commen item than the value will be 0
return 0
#adding all the preferences
sum1=sum([dict[pers1][item] for item in si])
sum2=sum([dict[pers2][item] for item in si])
sum1sq=sum([pow(dict[pers1][item],2) for item in si])
sum2sq=sum([pow(dict[pers2][item],2) for item in si])
All_Sum=sum([dict[pers1][item]*dict[pers2][item] for item in si])
num=All_Sum-(sum1*sum2/n)
den=sqrt((sum1sq-pow(sum1,2)/n)*(sum2sq-pow(sum2,2)/n))
if den==0:
return 0
r= num/den
return r
print (sim_pearson(critics,'Lisa Rose','Toby'))
#returns the best matches for person from the critics dict
#number of results and similarity function are optinal params
| 38.483146 | 191 | 0.637664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,743 | 0.508905 |
f6ec3cd9ae8ae57a80bd9ab3c56c8873b3588b8f | 110 | py | Python | src/applications/core/admin.py | sleonvaz/rindus-task | fc3fc1454d5b29bb8df6194a86612ad6462ee08c | [
"MIT"
] | null | null | null | src/applications/core/admin.py | sleonvaz/rindus-task | fc3fc1454d5b29bb8df6194a86612ad6462ee08c | [
"MIT"
] | null | null | null | src/applications/core/admin.py | sleonvaz/rindus-task | fc3fc1454d5b29bb8df6194a86612ad6462ee08c | [
"MIT"
] | null | null | null | from django.contrib import admin
from applications.core.models import Clients
admin.site.register(Clients)
| 15.714286 | 44 | 0.827273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f6ecd0aafed1e62eb63756a09afea3562d8bef9e | 99 | py | Python | api/newsCategory/apps.py | jhonatantft/ckl | 8a0d533922fa091ac5f2dbe50caee3920ec2b90d | [
"MIT"
] | null | null | null | api/newsCategory/apps.py | jhonatantft/ckl | 8a0d533922fa091ac5f2dbe50caee3920ec2b90d | [
"MIT"
] | 2 | 2021-05-08T21:26:43.000Z | 2022-02-19T00:26:17.000Z | api/newsCategory/apps.py | jhonatantft/ckl | 8a0d533922fa091ac5f2dbe50caee3920ec2b90d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class NewscategoryConfig(AppConfig):
name = 'newsCategory'
| 16.5 | 36 | 0.777778 | 62 | 0.626263 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.141414 |
f6ed93bc4cbe3c8880ecf1e3e007e8f0fa6018d9 | 77,326 | py | Python | cfdm/data/data.py | NCAS-CMS/cfdm | 8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc | [
"MIT"
] | 22 | 2018-11-07T18:16:22.000Z | 2022-03-16T16:05:21.000Z | cfdm/data/data.py | davidhassell/cfdm | 8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc | [
"MIT"
] | 119 | 2019-04-08T08:00:24.000Z | 2022-03-22T08:21:22.000Z | cfdm/data/data.py | davidhassell/cfdm | 8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc | [
"MIT"
] | 8 | 2019-04-09T10:12:26.000Z | 2021-07-22T02:41:15.000Z | import itertools
import logging
import netCDF4
import numpy
from .. import core
from ..constants import masked as cfdm_masked
from ..decorators import (
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
from ..functions import abspath
from ..mixin.container import Container
from ..mixin.netcdf import NetCDFHDF5
from . import NumpyArray, abstract
logger = logging.getLogger(__name__)
class Data(Container, NetCDFHDF5, core.Data):
"""An orthogonal multidimensional array with masking and units.
.. versionadded:: (cfdm) 1.7.0
"""
def __init__(
self,
array=None,
units=None,
calendar=None,
fill_value=None,
source=None,
copy=True,
dtype=None,
mask=None,
_use_array=True,
**kwargs,
):
"""**Initialisation**
:Parameters:
array: data_like, optional
The array of values.
{{data_like}}
Ignored if the *source* parameter is set.
*Parameter example:*
``array=[34.6]``
*Parameter example:*
``array=[[1, 2], [3, 4]]``
*Parameter example:*
``array=numpy.ma.arange(10).reshape(2, 1, 5)``
units: `str`, optional
The physical units of the data. Ignored if the *source*
parameter is set.
The units may also be set after initialisation with the
`set_units` method.
*Parameter example:*
``units='km hr-1'``
*Parameter example:*
``units='days since 2018-12-01'``
calendar: `str`, optional
The calendar for reference time units. Ignored if the
*source* parameter is set.
The calendar may also be set after initialisation with the
`set_calendar` method.
*Parameter example:*
``calendar='360_day'``
fill_value: optional
The fill value of the data. By default, or if set to
`None`, the `numpy` fill value appropriate to the array's
data type will be used (see
`numpy.ma.default_fill_value`). Ignored if the *source*
parameter is set.
The fill value may also be set after initialisation with
the `set_fill_value` method.
*Parameter example:*
``fill_value=-999.``
dtype: data-type, optional
The desired data-type for the data. By default the
data-type will be inferred form the *array* parameter.
The data-type may also be set after initialisation
with the `dtype` attribute.
*Parameter example:*
``dtype=float``
*Parameter example:*
``dtype='float32'``
*Parameter example:*
``dtype=numpy.dtype('i2')``
mask: data_like, optional
Apply this mask to the data given by the *array*
parameter. By default, or if *mask* is `None`, no mask
is applied. May be any data_like object that
broadcasts to *array*. Masking will be carried out
where mask elements evaluate to `True`.
{{data_like}}
This mask will applied in addition to any mask already
defined by the *array* parameter.
source: optional
Initialise the array, units, calendar and fill value
from those of *source*.
{{init source}}
copy: `bool`, optional
If False then do not deep copy input parameters prior
to initialisation. By default arguments are deep
copied.
kwargs: ignored
Not used. Present to facilitate subclassing.
"""
if dtype is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = array.astype(dtype)
array = NumpyArray(array)
if mask is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = numpy.ma.array(array, mask=mask)
array = NumpyArray(array)
super().__init__(
array=array,
units=units,
calendar=calendar,
fill_value=fill_value,
source=source,
copy=copy,
_use_array=_use_array,
)
self._initialise_netcdf(source)
def __array__(self, *dtype):
"""The numpy array interface.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
dtype: optional
Typecode or data-type to which the array is cast.
:Returns:
`numpy.ndarray`
An independent numpy array of the data.
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3])
>>> a = numpy.array(d)
>>> print(type(a))
<class 'numpy.ndarray'>
>>> a[0] = -99
>>> d
<{{repr}}{{class}}(3): [1, 2, 3]>
>>> b = numpy.array(d, float)
>>> print(b)
[1. 2. 3.]
"""
array = self.array
if not dtype:
return array
else:
return array.astype(dtype[0], copy=False)
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
try:
shape = self.shape
except AttributeError:
shape = ""
else:
shape = str(shape)
shape = shape.replace(",)", ")")
return f"<{ self.__class__.__name__}{shape}: {self}>"
def __format__(self, format_spec):
"""Interpret format specifiers for size 1 arrays.
**Examples:**
>>> d = {{package}}.{{class}}(9, 'metres')
>>> f"{d}"
'9 metres'
>>> f"{d!s}"
'9 metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(): 9 metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([[9]], 'metres')
>>> f"{d}"
'[[9]] metres'
>>> f"{d!s}"
'[[9]] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(1, 1): [[9]] metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([9, 10], 'metres')
>>> f"{d}"
>>> '[9, 10] metres'
>>> f"{d!s}"
>>> '[9, 10] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(2): [9, 10] metres>'
>>> f"{d:.3f}"
Traceback (most recent call last):
...
ValueError: Can't format Data array of size 2 with format code .3f
"""
if not format_spec:
return super().__format__("")
n = self.size
if n == 1:
return "{x:{f}}".format(x=self.first_element(), f=format_spec)
raise ValueError(
f"Can't format Data array of size {n} with "
f"format code {format_spec}"
)
def __getitem__(self, indices):
"""Return a subspace of the data defined by indices.
d.__getitem__(indices) <==> d[indices]
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__setitem__`, `_parse_indices`
:Returns:
`{{class}}`
The subspace of the data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1].shape
(1, 10, 1)
>>> d[:, 0].shape
(1, 1, 9)
>>> d[..., 6:3:-1, 3:6].shape
(1, 3, 3)
>>> d[0, [2, 9], [4, 8]].shape
(1, 2, 2)
>>> d[0, :, -2].shape
(1, 10, 1)
"""
indices = self._parse_indices(indices)
array = self._get_Array(None)
if array is None:
raise ValueError("No array!!")
array = array[tuple(indices)]
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def __int__(self):
"""Called by the `int` built-in function.
x.__int__() <==> int(x)
"""
if self.size != 1:
raise TypeError(
"only length-1 arrays can be converted to "
f"Python scalars. Got {self}"
)
return int(self.array)
def __iter__(self):
"""Called when an iterator is required.
x.__iter__() <==> iter(x)
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3], 'metres')
>>> for e in d:
... print(repr(e))
...
1
2
3
>>> d = {{package}}.{{class}}([[1, 2], [4, 5]], 'metres')
>>> for e in d:
... print(repr(e))
...
<{{repr}}Data(2): [1, 2] metres>
<{{repr}}Data(2): [4, 5] metres>
>>> d = {{package}}.{{class}}(34, 'metres')
>>> for e in d:
... print(repr(e))
Traceback (most recent call last):
...
TypeError: Iteration over 0-d Data
"""
ndim = self.ndim
if not ndim:
raise TypeError(f"Iteration over 0-d {self.__class__.__name__}")
if ndim == 1:
i = iter(self.array)
while 1:
try:
yield next(i)
except StopIteration:
return
else:
# ndim > 1
for n in range(self.shape[0]):
out = self[n, ...]
out.squeeze(0, inplace=True)
yield out
def __setitem__(self, indices, value):
"""Assign to data elements defined by indices.
d.__setitem__(indices, x) <==> d[indices]=x
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
**Broadcasting**
The value, or values, being assigned must be broadcastable to the
shape defined by the indices, using the numpy broadcasting rules.
**Missing data**
Data array elements may be set to missing values by assigning them
to `masked`. Missing values may be unmasked by assigning them to
any other value.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__getitem__`, `_parse_indices`
:Returns:
`None`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1] = -10
>>> d[:, 0] = range(9)
>>> d[..., 6:3:-1, 3:6] = numpy.arange(-18, -9).reshape(3, 3)
>>> d[0, [2, 9], [4, 8]] = {{package}}.{{class}}([[-2, -3]])
>>> d[0, :, -2] = {{package}}.masked
"""
indices = self._parse_indices(indices)
array = self.array
if value is cfdm_masked or numpy.ma.isMA(value):
# The data is not masked but the assignment is masking
# elements, so turn the non-masked array into a masked
# one.
array = array.view(numpy.ma.MaskedArray)
self._set_subspace(array, indices, numpy.asanyarray(value))
self._set_Array(array, copy=False)
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
units = self.get_units(None)
calendar = self.get_calendar(None)
isreftime = False
if units is not None:
if isinstance(units, str):
isreftime = "since" in units
else:
units = "??"
try:
first = self.first_element()
except Exception:
out = ""
if units and not isreftime:
out += f" {units}"
if calendar:
out += f" {calendar}"
return out
size = self.size
shape = self.shape
ndim = self.ndim
open_brackets = "[" * ndim
close_brackets = "]" * ndim
mask = [False, False, False]
if size == 1:
if isreftime:
# Convert reference time to date-time
if first is numpy.ma.masked:
first = 0
mask[0] = True
try:
first = type(self)(
numpy.ma.array(first, mask=mask[0]), units, calendar
).datetime_array
except (ValueError, OverflowError):
first = "??"
out = f"{open_brackets}{first}{close_brackets}"
else:
last = self.last_element()
if isreftime:
if last is numpy.ma.masked:
last = 0
mask[-1] = True
# Convert reference times to date-times
try:
first, last = type(self)(
numpy.ma.array(
[first, last], mask=(mask[0], mask[-1])
),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
first, last = ("??", "??")
if size > 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
elif shape[-1:] == (3,):
middle = self.second_element()
if isreftime:
# Convert reference time to date-time
if middle is numpy.ma.masked:
middle = 0
mask[1] = True
try:
middle = type(self)(
numpy.ma.array(middle, mask=mask[1]),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
middle = "??"
out = (
f"{open_brackets}{first}, {middle}, {last}{close_brackets}"
)
elif size == 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
else:
out = f"{open_brackets}{first}, {last}{close_brackets}"
if isreftime:
if calendar:
out += f" {calendar}"
elif units:
out += f" {units}"
return out
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
def _item(self, index):
"""Return an element of the data as a scalar.
It is assumed, but not checked, that the given index selects
exactly one element.
:Parameters:
index:
:Returns:
The selected element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]], 'km')
>>> x = d._item((0, -1))
>>> print(x, type(x))
3 <class 'int'>
>>> x = d._item((0, 1))
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> d._item((slice(None), slice(1, 2)))
masked
"""
array = self[index].array
if not numpy.ma.isMA(array):
return array.item()
mask = array.mask
if mask is numpy.ma.nomask or not mask.item():
return array.item()
return numpy.ma.masked
def _parse_axes(self, axes):
"""Parses the data axes and returns valid non-duplicate axes.
:Parameters:
axes: (sequence of) `int`
The axes of the data.
{{axes int examples}}
:Returns:
`tuple`
**Examples:**
>>> d._parse_axes(1)
(1,)
>>> e._parse_axes([0, 2])
(0, 2)
"""
if axes is None:
return axes
ndim = self.ndim
if isinstance(axes, int):
axes = (axes,)
axes2 = []
for axis in axes:
if 0 <= axis < ndim:
axes2.append(axis)
elif -ndim <= axis < 0:
axes2.append(axis + ndim)
else:
raise ValueError(f"Invalid axis: {axis!r}")
# Check for duplicate axes
n = len(axes2)
if n > len(set(axes2)) >= 1:
raise ValueError(f"Duplicate axis: {axes2}")
return tuple(axes2)
def _set_Array(self, array, copy=True):
"""Set the array.
.. seealso:: `_set_CompressedArray`
:Parameters:
array: `numpy` array_like or `Array`, optional
The array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_Array(a)
"""
if not isinstance(array, abstract.Array):
if not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = NumpyArray(array)
super()._set_Array(array, copy=copy)
def _set_CompressedArray(self, array, copy=True):
"""Set the compressed array.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `_set_Array`
:Parameters:
array: subclass of `CompressedArray`
The compressed array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_CompressedArray(a)
"""
self._set_Array(array, copy=copy)
@classmethod
def _set_subspace(cls, array, indices, value):
"""Set a subspace of the data array defined by indices."""
axes_with_list_indices = [
i for i, x in enumerate(indices) if not isinstance(x, slice)
]
if len(axes_with_list_indices) < 2:
# --------------------------------------------------------
# At most one axis has a list-of-integers index so we can
# do a normal numpy assignment
# --------------------------------------------------------
array[tuple(indices)] = value
else:
# --------------------------------------------------------
# At least two axes have list-of-integers indices so we
# can't do a normal numpy assignment
# --------------------------------------------------------
indices1 = indices[:]
for i, x in enumerate(indices):
if i in axes_with_list_indices:
# This index is a list of integers
y = []
args = [iter(x)] * 2
for start, stop in itertools.zip_longest(*args):
if not stop:
y.append(slice(start, start + 1))
else:
step = stop - start
stop += 1
y.append(slice(start, stop, step))
indices1[i] = y
else:
indices1[i] = (x,)
if numpy.size(value) == 1:
for i in itertools.product(*indices1):
array[i] = value
else:
indices2 = []
ndim_difference = array.ndim - numpy.ndim(value)
for i, n in enumerate(numpy.shape(value)):
if n == 1:
indices2.append((slice(None),))
elif i + ndim_difference in axes_with_list_indices:
y = []
start = 0
while start < n:
stop = start + 2
y.append(slice(start, stop))
start = stop
indices2.append(y)
else:
indices2.append((slice(None),))
for i, j in zip(
itertools.product(*indices1), itertools.product(*indices2)
):
array[i] = value[j]
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def compressed_array(self):
"""Returns an independent numpy array of the compressed data.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_compressed_axes`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`numpy.ndarray`
An independent numpy array of the compressed data.
**Examples:**
>>> a = d.compressed_array
"""
ca = self._get_Array(None)
if not ca.get_compression_type():
raise ValueError("not compressed: can't get compressed array")
return ca.compressed_array
@property
def datetime_array(self):
"""Returns an independent numpy array of datetimes.
Specifically, returns an independent numpy array containing
the date-time objects corresponding to times since a reference
date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
'standard' (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `array`, `datetime_as_string`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time objects.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> a = d.datetime_array
>>> print(a)
[cftime.DatetimeGregorian(2019, 1, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 2, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-01 00:00:00
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> a = d.datetime_array
>>> print(a)
[cftime.Datetime360Day(2019, 1, 2, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 2, 3, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-03 00:00:00
"""
array = self.array
mask = None
if numpy.ma.isMA(array):
# num2date has issues if the mask is nomask
mask = array.mask
if mask is numpy.ma.nomask or not numpy.ma.is_masked(array):
mask = None
array = array.view(numpy.ndarray)
if mask is not None and not array.ndim:
# Fix until num2date copes with scalar aarrays containing
# missing data
return array
array = netCDF4.num2date(
array,
units=self.get_units(None),
calendar=self.get_calendar("standard"),
only_use_cftime_datetimes=True,
)
if mask is None:
# There is no missing data
array = numpy.array(array, dtype=object)
else:
# There is missing data
array = numpy.ma.masked_where(mask, array)
if not numpy.ndim(array):
array = numpy.ma.masked_all((), dtype=object)
return array
@property
def datetime_as_string(self):
"""Returns an independent numpy array with datetimes as strings.
Specifically, returns an independent numpy array containing
string representations of times since a reference date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
"standard" (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `array`, `datetime_array`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time strings.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> print(d.datetime_as_string)
['2019-01-01 00:00:00' '2019-02-01 00:00:00' '2019-03-01 00:00:00']
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> print(d.datetime_as_string)
['2019-01-02 00:00:00' '2019-02-03 00:00:00' '2019-03-01 00:00:00']
"""
return self.datetime_array.astype(str)
@property
def mask(self):
"""The Boolean missing data mask of the data array.
The Boolean mask has True where the data array has missing data
and False otherwise.
:Returns:
`{{class}}`
The Boolean mask as data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.ma.array(
... [[280.0, -99, -99, -99],
... [281.0, 279.0, 278.0, 279.5]],
... mask=[[0, 1, 1, 1], [0, 0, 0, 0]]
... ))
>>> d
<{{repr}}Data(2, 4): [[280.0, ..., 279.5]]>
>>> print(d.array)
[[280.0 -- -- --]
[281.0 279.0 278.0 279.5]]
>>> d.mask
<{{repr}}Data(2, 4): [[False, ..., False]]>
>>> print(d.mask.array)
[[False True True True]
[False False False False]]
"""
return type(self)(numpy.ma.getmaskarray(self.array))
# ----------------------------------------------------------------
# Methods
# ----------------------------------------------------------------
def any(self):
"""Test whether any data array elements evaluate to True.
Performs a logical or over the data array and returns the
result. Masked values are considered as False during computation.
:Returns:
`bool`
`True` if any data array elements evaluate to True,
otherwise `False`.
**Examples:**
>>> d = {{package}}.{{class}}([[0, 0, 0]])
>>> d.any()
False
>>> d[0, 0] = {{package}}.masked
>>> print(d.array)
[[-- 0 0]]
>>> d.any()
False
>>> d[0, 1] = 3
>>> print(d.array)
[[-- 3 0]]
>>> d.any()
True
>>> d[...] = {{package}}.masked
>>> print(d.array)
[[-- -- --]]
>>> d.any()
False
"""
masked = self.array.any()
if masked is numpy.ma.masked:
masked = False
return masked
@_inplace_enabled(default=False)
def apply_masking(
self,
fill_values=None,
valid_min=None,
valid_max=None,
valid_range=None,
inplace=False,
):
"""Apply masking.
Masking is applied according to the values of the keyword
parameters.
Elements that are already masked remain so.
.. versionadded:: (cfdm) 1.8.2
.. seealso:: `get_fill_value`, `mask`
:Parameters:
fill_values: `bool` or sequence of scalars, optional
Specify values that will be set to missing data. Data
elements exactly equal to any of the values are set to
missing data.
If True then the value returned by the `get_fill_value`
method, if such a value exists, is used.
Zero or more values may be provided in a sequence of
scalars.
*Parameter example:*
Specify a fill value of 999: ``fill_values=[999]``
*Parameter example:*
Specify fill values of 999 and -1.0e30:
``fill_values=[999, -1.0e30]``
*Parameter example:*
Use the fill value already set for the data:
``fill_values=True``
*Parameter example:*
Use no fill values: ``fill_values=False`` or
``fill_value=[]``
valid_min: number, optional
A scalar specifying the minimum valid value. Data elements
strictly less than this number will be set to missing
data.
valid_max: number, optional
A scalar specifying the maximum valid value. Data elements
strictly greater than this number will be set to missing
data.
valid_range: (number, number), optional
A vector of two numbers specifying the minimum and maximum
valid values, equivalent to specifying values for both
*valid_min* and *valid_max* parameters. The *valid_range*
parameter must not be set if either *valid_min* or
*valid_max* is defined.
*Parameter example:*
``valid_range=[-999, 10000]`` is equivalent to setting
``valid_min=-999, valid_max=10000``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with masked values. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(12).reshape(3, 4), 'm')
>>> d[1, 1] = {{package}}.masked
>>> print(d.array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking().array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0, 11]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 --]]
>>> print(d.apply_masking(valid_min=3).array)
[[-- -- -- 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(valid_max=6).array)
[[0 1 2 3]
[4 -- 6 --]
[-- -- -- --]]
>>> print(d.apply_masking(valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 7]
[8 -- -- --]]
>>> d.set_fill_value(7)
>>> print(d.apply_masking(fill_values=True).array)
[[0 1 2 3]
[4 -- 6 --]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=True,
... valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 --]
[8 -- -- --]]
"""
if valid_range is not None:
if valid_min is not None or valid_max is not None:
raise ValueError(
"Can't set 'valid_range' parameter with either the "
"'valid_min' nor 'valid_max' parameters"
)
try:
if len(valid_range) != 2:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
except TypeError:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
valid_min, valid_max = valid_range
d = _inplace_enabled_define_and_cleanup(self)
if fill_values is None:
fill_values = False
if isinstance(fill_values, bool):
if fill_values:
fill_value = self.get_fill_value(None)
if fill_value is not None:
fill_values = (fill_value,)
else:
fill_values = ()
else:
fill_values = ()
else:
try:
_ = iter(fill_values)
except TypeError:
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
else:
if isinstance(fill_values, str):
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
mask = None
if fill_values:
array = self.array
mask = array == fill_values[0]
for fill_value in fill_values[1:]:
mask |= array == fill_value
if valid_min is not None:
if mask is None:
array = self.array
mask = array < valid_min
else:
mask |= array < valid_min
if valid_max is not None:
if mask is None:
array = self.array
mask = array > valid_max
else:
mask |= array > valid_max
if mask is not None:
array = numpy.ma.where(mask, cfdm_masked, array)
d._set_Array(array, copy=False)
return d
def copy(self, array=True):
"""Return a deep copy.
``d.copy()`` is equivalent to ``copy.deepcopy(d)``.
:Parameters:
array: `bool`, optional
If False then do not copy the array. By default the array
is copied.
:Returns:
`{{class}}`
The deep copy.
**Examples:**
>>> e = d.copy()
>>> e = d.copy(array=False)
"""
return super().copy(array=array)
def creation_commands(
self, name="data", namespace=None, indent=0, string=True
):
"""Return the commands that would create the data object.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
name: `str` or `None`, optional
Set the variable name of `Data` object that the commands
create.
{{namespace: `str`, optional}}
{{indent: `int`, optional}}
{{string: `bool`, optional}}
:Returns:
{{returns creation_commands}}
**Examples:**
>>> d = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]],
... units='degrees_east')
>>> print(d.creation_commands())
data = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]], units='degrees_east', dtype='f8')
>>> d = {{package}}.{{class}}(['alpha', 'beta', 'gamma', 'delta'],
... mask = [1, 0, 0, 0])
>>> d.creation_commands(name='d', namespace='', string=False)
["d = Data(['', 'beta', 'gamma', 'delta'], dtype='U5', mask=Data([True, False, False, False], dtype='b1'))"]
"""
namespace0 = namespace
if namespace is None:
namespace = self._package() + "."
elif namespace and not namespace.endswith("."):
namespace += "."
mask = self.mask
if mask.any():
if name == "mask":
raise ValueError(
"When the data is masked, the 'name' parameter "
"can not have the value 'mask'"
)
masked = True
array = self.filled().array.tolist()
else:
masked = False
array = self.array.tolist()
units = self.get_units(None)
if units is None:
units = ""
else:
units = f", units={units!r}"
calendar = self.get_calendar(None)
if calendar is None:
calendar = ""
else:
calendar = f", calendar={calendar!r}"
fill_value = self.get_fill_value(None)
if fill_value is None:
fill_value = ""
else:
fill_value = f", fill_value={fill_value}"
dtype = self.dtype.descr[0][1][1:]
if masked:
mask = mask.creation_commands(
name="mask", namespace=namespace0, indent=0, string=True
)
mask = mask.replace("mask = ", "mask=", 1)
mask = f", {mask}"
else:
mask = ""
if name is None:
name = ""
else:
name = name + " = "
out = []
out.append(
f"{name}{namespace}{self.__class__.__name__}({array}{units}"
f"{calendar}, dtype={dtype!r}{mask}{fill_value})"
)
if string:
indent = " " * indent
out[0] = indent + out[0]
out = ("\n" + indent).join(out)
return out
@_inplace_enabled(default=False)
def filled(self, fill_value=None, inplace=False):
"""Replace masked elements with the fill value.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
fill_value: scalar, optional
The fill value. By default the fill returned by
`get_fill_value` is used, or if this is not set then the
netCDF default fill value for the data type is used (as
defined by `netCDF.fillvals`).
{{inplace: `bool`, optional}}
:Returns:
`Data` or `None`
The filled data, or `None` if the operation was in-place.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]])
>>> print(d.filled().array)
[[1 2 3]]
>>> d[0, 0] = {{package}}.masked
>>> print(d.filled().array)
[[-9223372036854775806 2 3]]
>>> d.set_fill_value(-99)
>>> print(d.filled().array)
[[-99 2 3]]
>>> print(d.filled(1e10).array)
[[10000000000 2 3]]
"""
d = _inplace_enabled_define_and_cleanup(self)
if fill_value is None:
fill_value = d.get_fill_value(None)
if fill_value is None:
default_fillvals = netCDF4.default_fillvals
fill_value = default_fillvals.get(d.dtype.str[1:], None)
if fill_value is None and d.dtype.kind in ("SU"):
fill_value = default_fillvals.get("S1", None)
if fill_value is None: # should not be None by this stage
raise ValueError(
"Can't determine fill value for "
f"data type {d.dtype.str!r}"
) # pragma: no cover
array = self.array
if numpy.ma.isMA(array):
array = array.filled(fill_value)
d._set_Array(array, copy=False)
return d
@_inplace_enabled(default=False)
def insert_dimension(self, position=0, inplace=False):
"""Expand the shape of the data array.
Inserts a new size 1 axis, corresponding to a given position in
the data array shape.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `squeeze`, `transpose`
:Parameters:
position: `int`, optional
Specify the position that the new axis will have in the
data array. By default the new axis has position 0, the
slowest varying position. Negative integers counting from
the last position are allowed.
*Parameter example:*
``position=2``
*Parameter example:*
``position=-1``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with expanded axes. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.insert_dimension('domainaxis3').shape
(1, 96, 73, 19)
>>> d.insert_dimension('domainaxis3', position=3).shape
(19, 73, 96, 1)
>>> d.insert_dimension('domainaxis3', position=-1, inplace=True)
>>> d.shape
(19, 73, 1, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
# Parse position
ndim = d.ndim
if -ndim - 1 <= position < 0:
position += ndim + 1
elif not 0 <= position <= ndim:
raise ValueError(
f"Can't insert dimension: Invalid position: {position!r}"
)
array = numpy.expand_dims(self.array, position)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def get_count(self, default=ValueError()):
"""Return the count variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_index`, `get_list`
:Parameters:
default: optional
Return the value of the *default* parameter if a count
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The count variable.
**Examples:**
>>> c = d.get_count()
"""
try:
return self._get_Array().get_count()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no count variable"
)
def get_index(self, default=ValueError()):
"""Return the index variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_list`
:Parameters:
default: optional
Return *default* if index variable has not been set.
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The index variable.
**Examples:**
>>> i = d.get_index()
"""
try:
return self._get_Array().get_index()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no index variable"
)
def get_list(self, default=ValueError()):
"""Return the list variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_index`
:Parameters:
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The list variable.
**Examples:**
>>> l = d.get_list()
"""
try:
return self._get_Array().get_list()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no list variable"
)
def get_compressed_dimension(self, default=ValueError()):
"""Returns the compressed dimension's array position.
That is, returns the position of the compressed dimension
in the compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_axes`,
`get_compression_type`
:Parameters:
default: optional
Return the value of the *default* parameter there is no
compressed dimension. If set to an `Exception` instance
then it will be raised instead.
:Returns:
`int`
The position of the compressed dimension in the compressed
array.
**Examples:**
>>> d.get_compressed_dimension()
2
"""
try:
return self._get_Array().get_compressed_dimension()
except (AttributeError, ValueError):
return self._default(
default,
f"{ self.__class__.__name__!r} has no compressed dimension",
)
def _parse_indices(self, indices):
"""Parse indices of the data and return valid indices in a list.
:Parameters:
indices: `tuple` (not a `list`!)
:Returns:
`list`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d._parse_indices((slice(None, None, None), 1, 2))
[slice(None, None, None), slice(1, 2, 1), slice(2, 3, 1)]
>>> d._parse_indices((1,))
[slice(1, 2, 1), slice(None, None, None), slice(None, None, None)]
"""
shape = self.shape
parsed_indices = []
if not isinstance(indices, tuple):
indices = (indices,)
# Initialise the list of parsed indices as the input indices
# with any Ellipsis objects expanded
length = len(indices)
n = len(shape)
ndim = n
for index in indices:
if index is Ellipsis:
m = n - length + 1
parsed_indices.extend([slice(None)] * m)
n -= m
else:
parsed_indices.append(index)
n -= 1
length -= 1
len_parsed_indices = len(parsed_indices)
if ndim and len_parsed_indices > ndim:
raise IndexError(
f"Invalid indices for data with shape {shape}: "
f"{parsed_indices}"
)
if len_parsed_indices < ndim:
parsed_indices.extend([slice(None)] * (ndim - len_parsed_indices))
if not ndim and parsed_indices:
raise IndexError(
"Scalar data can only be indexed with () or Ellipsis"
)
for i, (index, size) in enumerate(zip(parsed_indices, shape)):
if isinstance(index, slice):
continue
if isinstance(index, int):
# E.g. 43 -> slice(43, 44, 1)
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
if getattr(getattr(index, "dtype", None), "kind", None) == "b":
# E.g. index is [True, False, True] -> [0, 2]
#
# Convert Booleans to non-negative integers. We're
# assuming that anything with a dtype attribute also
# has a size attribute.
if index.size != size:
raise IndexError(
"Invalid indices for data "
f"with shape {shape}: {parsed_indices}"
)
index = numpy.where(index)[0]
if not numpy.ndim(index):
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
len_index = len(index)
if len_index == 1:
# E.g. [3] -> slice(3, 4, 1)
index = index[0]
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
# E.g. [1, 3, 4] -> [1, 3, 4]
pass
parsed_indices[i] = index
return parsed_indices
def maximum(self, axes=None):
"""Return the maximum of an array or the maximum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `minimum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the maximum. By default the
maximum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Maximum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.max()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[23]]]]>
>>> print(e.array)
[[[[23]]]]
>>> e = d.max(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[8, ..., 23]]]]>
>>> print(e.array)
[[[[ 8 9 10 11]]
[[20 21 22 23]]]]
>>> e = d.max([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[11, 23]]]]>
>>> print(e.array)
[[[[11]]
[[23]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find maximum of data: {error}")
array = self.array
array = numpy.amax(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def minimum(self, axes=None):
"""Return the minimum of an array or minimum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `maximum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the minimum. By default the
minimum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Minimum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.min()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[0]]]]>
>>> print(e.array)
[[[[0]]]]
>>> e = d.min(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[0, ..., 15]]]]>
>>> print(e.array)
[[[[ 0 1 2 3]]
[[12 13 14 15]]]]
>>> e = d.min([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[0, 12]]]]>
>>> print(e.array)
[[[[ 0]]
[[12]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find minimum of data: {error}")
array = self.array
array = numpy.amin(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
@_inplace_enabled(default=False)
def squeeze(self, axes=None, inplace=False):
"""Remove size 1 axes from the data.
By default all size 1 axes are removed, but particular axes may be
selected with the keyword arguments.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
The positions of the size one axes to be removed. By
default all size one axes are removed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The data with removed data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(1, 73, 1, 96)
>>> f.squeeze().shape
(73, 96)
>>> d.squeeze(0).shape
(73, 1, 96)
>>> d.squeeze([-3, 2]).shape
(73, 96)
>>> d.squeeze(2, inplace=True)
>>> d.shape
(1, 73, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't squeeze data: {error}")
shape = d.shape
if axes is None:
axes = tuple([i for i, n in enumerate(shape) if n == 1])
else:
# Check the squeeze axes
for i in axes:
if shape[i] > 1:
raise ValueError(
"Can't squeeze data: "
f"Can't remove axis of size {shape[i]}"
)
if not axes:
return d
array = self.array
array = numpy.squeeze(array, axes)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def sum(self, axes=None):
"""Return the sum of an array or the sum along axes.
Missing data array elements are omitted from the calculation.
.. seealso:: `max`, `min`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to calculate the sum. By default the
sum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
The sum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.sum()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[276]]]]>
>>> print(e.array)
[[[[276]]]]
>>> e = d.sum(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[12, ..., 57]]]]>
>>> print(e.array)
[[[[12 15 18 21]]
[[48 51 54 57]]]]
>>> e = d.sum([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[66, 210]]]]>
>>> print(e.array)
[[[[ 66]]
[[210]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't sum data: {error}")
array = self.array
array = numpy.sum(array, axis=axes, keepdims=True)
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
@_inplace_enabled(default=False)
def transpose(self, axes=None, inplace=False):
"""Permute the axes of the data array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `squeeze`
:Parameters:
axes: (sequence of) `int`
The new axis order. By default the order is reversed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with permuted data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.transpose().shape
(96, 73, 19)
>>> d.transpose([1, 0, 2]).shape
(73, 19, 96)
>>> d.transpose([-1, 0, 1], inplace=True)
>>> d.shape
(96, 19, 73)
"""
d = _inplace_enabled_define_and_cleanup(self)
ndim = d.ndim
# Parse the axes. By default, reverse the order of the axes.
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't transpose data: {error}")
if axes is None:
if ndim <= 1:
return d
axes = tuple(range(ndim - 1, -1, -1))
elif len(axes) != ndim:
raise ValueError(
f"Can't transpose data: Axes don't match array: {axes}"
)
# Return unchanged if axes are in the same order as the data
if axes == tuple(range(ndim)):
return d
array = self.array
array = numpy.transpose(array, axes=axes)
d._set_Array(array, copy=False)
return d
def get_compressed_axes(self):
"""Returns the dimensions that are compressed in the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`list`
The dimensions of the data that are compressed to a single
dimension in the underlying array. If the data are not
compressed then an empty list is returned.
**Examples:**
>>> d.shape
(2, 3, 4, 5, 6)
>>> d.compressed_array.shape
(2, 14, 6)
>>> d.get_compressed_axes()
[1, 2, 3]
>>> d.get_compression_type()
''
>>> d.get_compressed_axes()
[]
"""
ca = self._get_Array(None)
if ca is None:
return []
return ca.get_compressed_axes()
def get_compression_type(self):
"""Returns the type of compression applied to the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `compression_axes`,
`get_compressed_dimension`
:Returns:
`str`
The compression type. An empty string means that no
compression has been applied.
**Examples:**
>>> d.get_compression_type()
''
>>> d.get_compression_type()
'gathered'
>>> d.get_compression_type()
'ragged contiguous'
"""
ma = self._get_Array(None)
if ma is None:
return ""
return ma.get_compression_type()
@classmethod
def empty(cls, shape, dtype=None, units=None, calendar=None):
"""Create a new data array without initialising the elements.
Note that the mask of the returned empty data is hard.
.. seealso:: `full`, `ones`, `zeros`
:Parameters:
shape: `int` or `tuple` of `int`
The shape of the new array.
dtype: `numpy.dtype` or any object convertible to `numpy.dtype`
The data-type of the new array. By default the
data-type is ``float``.
units: `str` or `Units`
The units for the empty data array.
calendar: `str`, optional
The calendar for reference time units.
:Returns:
`{{class}}`
**Examples:**
>>> d = {{package}}.{{class}}.empty((96, 73))
"""
return cls(
numpy.empty(shape=shape, dtype=dtype),
units=units,
calendar=calendar,
)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
_check_values=True,
):
"""Whether two data arrays are the same.
Equality is strict by default. This means that for data arrays to
be considered equal:
* the units and calendar must be the same,
..
* the fill value must be the same (see the *ignore_fill_value*
parameter), and
..
* the arrays must have same shape and data type, the same missing
data mask, and be element-wise equal (see the *ignore_data_type*
parameter).
{{equals tolerance}}
Any compression is ignored by default, with only the arrays in
their uncompressed forms being compared. See the
*ignore_compression* parameter.
Any type of object may be tested but, in general, equality is only
possible with another cell measure construct, or a subclass of
one. See the *ignore_type* parameter.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
other:
The object to compare for equality.
{{atol: number, optional}}
{{rtol: number, optional}}
ignore_fill_value: `bool`, optional
If True then the fill value is omitted from the
comparison.
{{ignore_data_type: `bool`, optional}}
{{ignore_compression: `bool`, optional}}
{{ignore_type: `bool`, optional}}
{{verbose: `int` or `str` or `None`, optional}}
:Returns:
`bool`
Whether the two data arrays are equal.
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a data array')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# Check that each instance has the same shape
if self.shape != other.shape:
logger.info(
f"{self.__class__.__name__}: Different shapes: "
f"{self.shape} != {other.shape}"
) # pragma: no cover
return False
# Check that each instance has the same fill value
if not ignore_fill_value and self.get_fill_value(
None
) != other.get_fill_value(None):
logger.info(
f"{self.__class__.__name__}: Different fill value: "
f"{self.get_fill_value(None)} != {other.get_fill_value(None)}"
) # pragma: no cover
return False
# Check that each instance has the same data type
if not ignore_data_type and self.dtype != other.dtype:
logger.info(
f"{self.__class__.__name__}: Different data types: "
f"{self.dtype} != {other.dtype}"
) # pragma: no cover
return False
# Return now if we have been asked to not check the array
# values
if not _check_values:
return True
# Check that each instance has the same units
for attr in ("units", "calendar"):
x = getattr(self, "get_" + attr)(None)
y = getattr(other, "get_" + attr)(None)
if x != y:
logger.info(
f"{self.__class__.__name__}: Different {attr}: "
f"{x!r} != {y!r}"
) # pragma: no cover
return False
if not ignore_compression:
# --------------------------------------------------------
# Check for equal compression types
# --------------------------------------------------------
compression_type = self.get_compression_type()
if compression_type != other.get_compression_type():
logger.info(
f"{self.__class__.__name__}: Different compression types: "
f"{compression_type} != {other.get_compression_type()}"
) # pragma: no cover
return False
# --------------------------------------------------------
# Check for equal compressed array values
# --------------------------------------------------------
if compression_type:
if not self._equals(
self.compressed_array,
other.compressed_array,
rtol=rtol,
atol=atol,
):
logger.info(
f"{self.__class__.__name__}: Different compressed "
"array values"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Check for equal (uncompressed) array values
# ------------------------------------------------------------
if not self._equals(self.array, other.array, rtol=rtol, atol=atol):
logger.info(
f"{self.__class__.__name__}: Different array values "
f"(atol={atol}, rtol={rtol})"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Still here? Then the two data arrays are equal.
# ------------------------------------------------------------
return True
def get_filenames(self):
"""Return the name of the file containing the data array.
:Returns:
`set`
The file name in normalised, absolute form. If the
data is are memory then an empty `set` is returned.
**Examples:**
>>> f = {{package}}.example_field(0)
>>> {{package}}.write(f, 'temp_file.nc')
>>> g = {{package}}.read('temp_file.nc')[0]
>>> d = g.data
>>> d.get_filenames()
{'/data/user/temp_file.nc'}
>>> d[...] = -99
>>> d.get_filenames()
set()
"""
source = self.source(None)
if source is None:
return set()
try:
filename = source.get_filename()
except AttributeError:
return set()
else:
return set((abspath(filename),))
def first_element(self):
"""Return the first element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `last_element`, `second_element`
:Returns:
The first element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.first_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.first_element()
>>> print(x, type(x))
1 <class 'int'>
>>> d[0, 0] = {{package}}.masked
>>> y = d.first_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.first_element()
>>> print(x, type(x))
foo <class 'str'>
"""
return self._item((slice(0, 1),) * self.ndim)
@_inplace_enabled(default=False)
def flatten(self, axes=None, inplace=False):
"""Flatten axes of the data.
Any subset of the axes may be flattened.
The shape of the data may change, but the size will not.
The flattening is executed in row-major (C-style) order. For
example, the array ``[[1, 2], [3, 4]]`` would be flattened across
both dimensions to ``[1 2 3 4]``.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `insert_dimension`, `squeeze`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
Select the axes. By default all axes are flattened. No
axes are flattened if *axes* is an empty sequence.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The flattened data, or `None` if the operation was
in-place.
**Examples**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.flatten()
>>> e
<{{repr}}Data(24): [0, ..., 23]>
>>> print(e.array)
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
>>> e = d.flatten([])
>>> e
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> e = d.flatten([1, 3])
>>> e
<{{repr}}Data(1, 8, 3): [[[0, ..., 23]]]>
>>> print(e.array)
[[[ 0 4 8]
[ 1 5 9]
[ 2 6 10]
[ 3 7 11]
[12 16 20]
[13 17 21]
[14 18 22]
[15 19 23]]]
>>> d.flatten([0, -1], inplace=True)
>>> d
<{{repr}}Data(4, 2, 3): [[[0, ..., 23]]]>
>>> print(d.array)
[[[ 0 4 8]
[12 16 20]]
[[ 1 5 9]
[13 17 21]]
[[ 2 6 10]
[14 18 22]]
[[ 3 7 11]
[15 19 23]]]
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't flatten data: {error}")
ndim = d.ndim
if ndim <= 1:
return d
if axes is None:
# By default flatten all axes
axes = tuple(range(ndim))
else:
if len(axes) <= 1:
return d
# Note that it is important that the first axis in the
# list is the left-most flattened axis
axes = sorted(axes)
# Save the shape before we transpose
shape = list(d.shape)
order = [i for i in range(ndim) if i not in axes]
order[axes[0] : axes[0]] = axes
d.transpose(order, inplace=True)
new_shape = [n for i, n in enumerate(shape) if i not in axes]
new_shape.insert(axes[0], numpy.prod([shape[i] for i in axes]))
array = d.array.reshape(new_shape)
out = type(self)(
array,
units=d.get_units(None),
calendar=d.get_calendar(None),
fill_value=d.get_fill_value(None),
)
if inplace:
d.__dict__ = out.__dict__
return out
def last_element(self):
"""Return the last element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `second_element`
:Returns:
The last element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.last_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.last_element()
>>> print(x, type(x))
4 <class 'int'>
>>> d[-1, -1] = {{package}}.masked
>>> y = d.last_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.last_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(-1, None),) * self.ndim)
def second_element(self):
"""Return the second element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `last_element`
:Returns:
The second element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.second_element()
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> y = d.second_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.second_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(0, 1),) * (self.ndim - 1) + (slice(1, 2),))
def to_memory(self):
"""Bring data on disk into memory and retain it there.
There is no change to data that is already in memory.
:Returns:
`None`
**Examples:**
>>> f = {{package}}.example_field(4)
>>> f.data
<{{repr}}Data(3, 26, 4): [[[290.0, ..., --]]] K>
>>> f.data.to_memory()
"""
self._set_Array(self.source().to_memory())
@_inplace_enabled(default=False)
def uncompress(self, inplace=False):
"""Uncompress the underlying array.
.. versionadded:: (cfdm) 1.7.3
.. seealso:: `array`, `compressed_array`, `source`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The uncompressed data, or `None` if the operation was
in-place.
**Examples:**
>>> d.get_compression_type()
'ragged contiguous'
>>> d.source()
<RaggedContiguousArray(4, 9): >
>>> d.uncompress(inpalce=True)
>>> d.get_compression_type()
''
>>> d.source()
<NumpyArray(4, 9): >
"""
d = _inplace_enabled_define_and_cleanup(self)
if d.get_compression_type():
d._set_Array(d.array, copy=False)
return d
def unique(self):
"""The unique elements of the data.
The unique elements are sorted into a one dimensional array. with
no missing values.
.. versionadded:: (cfdm) 1.7.0
:Returns:
`{{class}}`
The unique elements.
**Examples:**
>>> d = {{package}}.{{class}}([[4, 2, 1], [1, 2, 3]], 'metre')
>>> d.unique()
<{{repr}}Data(4): [1, ..., 4] metre>
>>> d[1, -1] = {{package}}.masked
>>> d.unique()
<{{repr}}Data(3): [1, 2, 4] metre>
"""
array = self.array
array = numpy.unique(array)
if numpy.ma.is_masked(array):
array = array.compressed()
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
# ----------------------------------------------------------------
# Aliases
# ----------------------------------------------------------------
def max(self, axes=None):
"""Alias for `maximum`."""
return self.maximum(axes=axes)
def min(self, axes=None):
"""Alias for `minimum`."""
return self.minimum(axes=axes)
| 28.692393 | 116 | 0.477239 | 76,883 | 0.994271 | 1,226 | 0.015855 | 33,647 | 0.435132 | 0 | 0 | 48,821 | 0.631366 |
f6efdf1a1e2ca3bc054152966617543b747082cb | 9,458 | py | Python | alipay/aop/api/domain/ArInvoiceReceiptQueryOpenApiDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/ArInvoiceReceiptQueryOpenApiDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/ArInvoiceReceiptQueryOpenApiDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class ArInvoiceReceiptQueryOpenApiDTO(object):
def __init__(self):
self._arrangement_no = None
self._id = None
self._inst_id = None
self._inv_dt = None
self._inv_mode = None
self._invoice_amt = None
self._invoiced_amt = None
self._ip_id = None
self._ip_role_id = None
self._link_invoice_amt = None
self._out_biz_no = None
self._pd_code = None
self._settle_type = None
self._statement_bill_no = None
self._status = None
self._tax_rate = None
self._tax_type = None
@property
def arrangement_no(self):
return self._arrangement_no
@arrangement_no.setter
def arrangement_no(self, value):
self._arrangement_no = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def inv_dt(self):
return self._inv_dt
@inv_dt.setter
def inv_dt(self, value):
self._inv_dt = value
@property
def inv_mode(self):
return self._inv_mode
@inv_mode.setter
def inv_mode(self, value):
self._inv_mode = value
@property
def invoice_amt(self):
return self._invoice_amt
@invoice_amt.setter
def invoice_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._invoice_amt = value
else:
self._invoice_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def invoiced_amt(self):
return self._invoiced_amt
@invoiced_amt.setter
def invoiced_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._invoiced_amt = value
else:
self._invoiced_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def ip_id(self):
return self._ip_id
@ip_id.setter
def ip_id(self, value):
self._ip_id = value
@property
def ip_role_id(self):
return self._ip_role_id
@ip_role_id.setter
def ip_role_id(self, value):
self._ip_role_id = value
@property
def link_invoice_amt(self):
return self._link_invoice_amt
@link_invoice_amt.setter
def link_invoice_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._link_invoice_amt = value
else:
self._link_invoice_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def pd_code(self):
return self._pd_code
@pd_code.setter
def pd_code(self, value):
self._pd_code = value
@property
def settle_type(self):
return self._settle_type
@settle_type.setter
def settle_type(self, value):
self._settle_type = value
@property
def statement_bill_no(self):
return self._statement_bill_no
@statement_bill_no.setter
def statement_bill_no(self, value):
self._statement_bill_no = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tax_rate(self):
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value):
self._tax_rate = value
@property
def tax_type(self):
return self._tax_type
@tax_type.setter
def tax_type(self, value):
self._tax_type = value
def to_alipay_dict(self):
params = dict()
if self.arrangement_no:
if hasattr(self.arrangement_no, 'to_alipay_dict'):
params['arrangement_no'] = self.arrangement_no.to_alipay_dict()
else:
params['arrangement_no'] = self.arrangement_no
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.inv_dt:
if hasattr(self.inv_dt, 'to_alipay_dict'):
params['inv_dt'] = self.inv_dt.to_alipay_dict()
else:
params['inv_dt'] = self.inv_dt
if self.inv_mode:
if hasattr(self.inv_mode, 'to_alipay_dict'):
params['inv_mode'] = self.inv_mode.to_alipay_dict()
else:
params['inv_mode'] = self.inv_mode
if self.invoice_amt:
if hasattr(self.invoice_amt, 'to_alipay_dict'):
params['invoice_amt'] = self.invoice_amt.to_alipay_dict()
else:
params['invoice_amt'] = self.invoice_amt
if self.invoiced_amt:
if hasattr(self.invoiced_amt, 'to_alipay_dict'):
params['invoiced_amt'] = self.invoiced_amt.to_alipay_dict()
else:
params['invoiced_amt'] = self.invoiced_amt
if self.ip_id:
if hasattr(self.ip_id, 'to_alipay_dict'):
params['ip_id'] = self.ip_id.to_alipay_dict()
else:
params['ip_id'] = self.ip_id
if self.ip_role_id:
if hasattr(self.ip_role_id, 'to_alipay_dict'):
params['ip_role_id'] = self.ip_role_id.to_alipay_dict()
else:
params['ip_role_id'] = self.ip_role_id
if self.link_invoice_amt:
if hasattr(self.link_invoice_amt, 'to_alipay_dict'):
params['link_invoice_amt'] = self.link_invoice_amt.to_alipay_dict()
else:
params['link_invoice_amt'] = self.link_invoice_amt
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.pd_code:
if hasattr(self.pd_code, 'to_alipay_dict'):
params['pd_code'] = self.pd_code.to_alipay_dict()
else:
params['pd_code'] = self.pd_code
if self.settle_type:
if hasattr(self.settle_type, 'to_alipay_dict'):
params['settle_type'] = self.settle_type.to_alipay_dict()
else:
params['settle_type'] = self.settle_type
if self.statement_bill_no:
if hasattr(self.statement_bill_no, 'to_alipay_dict'):
params['statement_bill_no'] = self.statement_bill_no.to_alipay_dict()
else:
params['statement_bill_no'] = self.statement_bill_no
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tax_rate:
if hasattr(self.tax_rate, 'to_alipay_dict'):
params['tax_rate'] = self.tax_rate.to_alipay_dict()
else:
params['tax_rate'] = self.tax_rate
if self.tax_type:
if hasattr(self.tax_type, 'to_alipay_dict'):
params['tax_type'] = self.tax_type.to_alipay_dict()
else:
params['tax_type'] = self.tax_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ArInvoiceReceiptQueryOpenApiDTO()
if 'arrangement_no' in d:
o.arrangement_no = d['arrangement_no']
if 'id' in d:
o.id = d['id']
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'inv_dt' in d:
o.inv_dt = d['inv_dt']
if 'inv_mode' in d:
o.inv_mode = d['inv_mode']
if 'invoice_amt' in d:
o.invoice_amt = d['invoice_amt']
if 'invoiced_amt' in d:
o.invoiced_amt = d['invoiced_amt']
if 'ip_id' in d:
o.ip_id = d['ip_id']
if 'ip_role_id' in d:
o.ip_role_id = d['ip_role_id']
if 'link_invoice_amt' in d:
o.link_invoice_amt = d['link_invoice_amt']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'pd_code' in d:
o.pd_code = d['pd_code']
if 'settle_type' in d:
o.settle_type = d['settle_type']
if 'statement_bill_no' in d:
o.statement_bill_no = d['statement_bill_no']
if 'status' in d:
o.status = d['status']
if 'tax_rate' in d:
o.tax_rate = d['tax_rate']
if 'tax_type' in d:
o.tax_type = d['tax_type']
return o
| 32.279863 | 86 | 0.591774 | 9,083 | 0.960351 | 0 | 0 | 4,336 | 0.458448 | 0 | 0 | 1,084 | 0.114612 |
f6f037d5301de39668019ccf1a2625274e621032 | 14,051 | py | Python | Model/lookalike-model/tests/pipeline/test_main_clean.py | rangaswamymr/blue-marlin | 2ab39a6af01e14f40386f640fe087aeb284b5524 | [
"Apache-2.0"
] | null | null | null | Model/lookalike-model/tests/pipeline/test_main_clean.py | rangaswamymr/blue-marlin | 2ab39a6af01e14f40386f640fe087aeb284b5524 | [
"Apache-2.0"
] | null | null | null | Model/lookalike-model/tests/pipeline/test_main_clean.py | rangaswamymr/blue-marlin | 2ab39a6af01e14f40386f640fe087aeb284b5524 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import yaml
from pyspark import SparkContext
from pyspark.sql import SparkSession, HiveContext
from pyspark.sql.functions import col, udf, collect_set
from pyspark.sql.types import IntegerType, BooleanType
from lookalike_model.pipeline import main_clean, util
from data_generator import *
class TestMainClean(unittest.TestCase):
def setUp(self):
# Initialize the Spark session
self.spark = SparkSession.builder.appName('unit test').getOrCreate()
self.spark.sparkContext.setLogLevel('ERROR')
# Testing the method that tests user uniqueness and removes users with conflicting age/gender.
def compare_list(self, list1, list2):
if len(list1) != len(list2):
return False
def _key(x): return '-'.join([str(_) for _ in x])
return sorted(list1, key=_key) == sorted(list2, key=_key)
def _test_clean_persona(self):
print('*** Running test_clean_persona ***')
data = [
('0000001', 0, 0),
('0000001', 0, 0), # duplicate entry, duplicates will be removed
('0000002', 1, 1),
('0000003', 2, 2),
('0000003', 2, 3), # duplicate entry, duplicates will be removed
]
schema = StructType([
StructField("aid", StringType(), True),
StructField("gender", StringType(), True),
StructField("age", StringType(), True)
])
df = self.spark.createDataFrame(self.spark.sparkContext.parallelize(data), schema)
df = main_clean.clean_persona(df, 1).select('aid', 'gender', 'age')
expected_output = [
('0000001', 0, 0),
('0000002', 1, 1)
]
print("Original DataFrame df:")
print(df.show(100, False))
print("Expected DataFrame df_expected:")
print(expected_output)
self.assertTrue(self.compare_list(df.collect(), expected_output))
# Tests the method that adds the 'day' column with just the date from the action_time column.
def _test_add_day(self):
print('*** Running test_add_day ***')
data = [
('0000001', '2022-02-19 12:34:56.78'),
('0000002', '2022-02-20 12:34:56.78')
]
schema = StructType([
StructField("aid", StringType(), True),
StructField("action_time", StringType(), True)
])
df = self.spark.createDataFrame(self.spark.sparkContext.parallelize(data), schema)
# Run the method to be tested.
df = main_clean.add_day(df)
print(df.show(100, False))
expected_output = [
('0000001', '2022-02-19 12:34:56.78', '2022-02-19'),
('0000002', '2022-02-20 12:34:56.78', '2022-02-20')
]
schema_expected = StructType([
StructField("aid", StringType(), True),
StructField("action_time", StringType(), True),
StructField("day", StringType(), True),
])
df_expected = self.spark.createDataFrame(self.spark.sparkContext.parallelize(expected_output), schema_expected)
df_expected = main_clean.add_day(df_expected)
print("Original DataFrame df:")
print(df.show(100, False))
print("Expected DataFrame df_expected:")
print(df_expected.show(100, False))
result = sorted(df.collect()) == sorted(df_expected.collect())
print result
# Testing the addition of the 'aid_bucket' column with a hash mod bucket_num value.
def _test_add_aid_bucket(self):
print('*** Running test_add_aid_bucket ***')
# Input
data = [
('0000001', 0, 0),
('0000002', 1, 1),
('0000003', 2, 2),
]
schema = StructType([
StructField("aid", StringType(), True),
StructField("gender", StringType(), True),
StructField("age", StringType(), True)
]
)
df = self.spark.createDataFrame(self.spark.sparkContext.parallelize(data), schema)
# Run the method to be tested.
aid_bucket_num = 4
df = main_clean.add_aid_bucket(df, aid_bucket_num)
expected_output = [
('0000001', 0, 0),
('0000002', 1, 1),
('0000003', 2, 2)
]
df_expected = add_aid_bucket(self.spark.createDataFrame(self.spark.sparkContext.parallelize(expected_output), schema), aid_bucket_num)
print("Original DataFrame df:")
print(df.show(100, False))
print("Expected DataFrame df_expected:")
print(df_expected.show(100, False))
result = sorted(df.collect()) == sorted(df_expected.collect())
print result
# Testing the method that joins the log rows with the user persona, keyword, and media category.
def _test_clean_batched_log(self):
print('*** Running test_clean_batched_log ***')
# Get the data inputs for the test.
df_log = create_raw_log(self.spark)
df_persona = create_cleaned_persona(self.spark)
df_keywords = create_keywords(self.spark)
conditions = {
'new_slot_id_list': [
'abcdef0', 'abcdef1', 'abcdef2', 'abcdef3', 'abcdef4',
'abcdef5', 'abcdef6', 'abcdef7', 'abcdef8', 'abcdef9'
],
'new_slot_id_app_name_list': [
'Huawei Magazine', 'Huawei Browser', 'Huawei Video', 'Huawei Music', 'Huawei Reading',
'Huawei Magazine', 'Huawei Browser', 'Huawei Video', 'Huawei Music', 'Huawei Reading'
]
}
# Run the method to be tested.
aid_bucket_num = 4
df = main_clean.clean_batched_log(df_log, df_persona, df_keywords, aid_bucket_num)
print(df.sort('aid').show(100, False))
# Validate the output.
self.validate_cleaned_log(df, df_persona, conditions, df_keywords, df_log, aid_bucket_num)
# Testing data look up and cleaning process for clicklog and showlog data.
def _test_clean_logs(self):
print('*** Running test_clean_logs ***')
with open('config_clean.yml', 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
showlog_table = cfg['showlog_table_name']
showlog_output_table = cfg['pipeline']['main_clean']['showlog_output_table']
clicklog_table = cfg['clicklog_table_name']
clicklog_output_table = cfg['pipeline']['main_clean']['clicklog_output_table']
log_table_names = (showlog_table, showlog_output_table, clicklog_table, clicklog_output_table)
print(showlog_table)
print(clicklog_table)
print(showlog_output_table)
print(clicklog_output_table)
# Create the persona and keyword dataframes.
df_persona = create_cleaned_persona(self.spark)
df_keywords = create_keywords(self.spark)
# Create the clicklog and showlog tables.
create_clicklog_table(self.spark, clicklog_table)
create_showlog_table(self.spark, showlog_table)
# Drop the output tables
util.drop_table(self.hive_context, showlog_output_table)
util.drop_table(self.hive_context, clicklog_output_table)
# Run the method to be tested.
main_clean.clean_logs(cfg, df_persona, df_keywords, log_table_names)
# Validate the output tables.
conditions = cfg['pipeline']['main_clean']['conditions']
df_log = create_raw_log(self.spark)
# Validate the cleaned clicklog table.
df_clicklog = util.load_df(self.hive_context, clicklog_output_table)
print(df_clicklog.sort('action_time').show(100, False))
self.validate_cleaned_log(df_clicklog, conditions, df_persona, df_keywords, df_log, cfg['pipeline']['main_clean']['aid_bucket_num'])
# Validate the cleaned showlog table.
df_showlog = util.load_df(self.hive_context, clicklog_output_table)
self.validate_cleaned_log(df_showlog, conditions, df_persona, df_keywords, df_log, cfg['pipeline']['main_clean']['aid_bucket_num'])
# Testing full data cleaning process for persona, clicklog, and showlog data.
def _test_run(self):
print('*** Running test_run ***')
with open('config_clean.yml', 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
# Create the persona, keywords, clicklog and showlog tables.
persona_table = cfg['persona_table_name']
keywords_table = cfg['keywords_table']
showlog_table = cfg['showlog_table_name']
clicklog_table = cfg['clicklog_table_name']
effective_keywords_table = cfg['pipeline']['main_keywords']['keyword_output_table']
create_persona_table(self.spark, persona_table)
create_keywords_table(self.spark, keywords_table)
create_clicklog_table(self.spark, clicklog_table)
create_showlog_table(self.spark, showlog_table)
create_effective_keywords_table(self.spark, effective_keywords_table)
# Drop the output tables
showlog_output_table = cfg['pipeline']['main_clean']['showlog_output_table']
clicklog_output_table = cfg['pipeline']['main_clean']['clicklog_output_table']
persona_output_table = cfg['pipeline']['main_clean']['persona_output_table']
util.drop_table(self.hive_context, showlog_output_table)
util.drop_table(self.hive_context, clicklog_output_table)
util.drop_table(self.hive_context, persona_output_table)
# Run the method to be tested.
main_clean.run(self.hive_context, cfg)
# Validate the output tables.
conditions = cfg['pipeline']['main_clean']['conditions']
bucket_num = cfg['pipeline']['main_clean']['aid_bucket_num']
df_keywords = util.load_df(self.hive_context, keywords_table)
# run() does filtering on the effective keywords so we need to filter
# the raw logs with the spread app ids when validating the output.
effective_spread_app_ids = ['C000', 'C001', 'C002', 'C003', 'C004', 'C010', 'C011', 'C012', 'C013', 'C014', ]
df_log = create_raw_log(self.spark)
df_log = self.filter_spread_app_ids(df_log, effective_spread_app_ids)
# Validate the cleaned persona table.
df_persona = util.load_df(self.hive_context, persona_output_table)
self.validate_clean_persona(df_persona, bucket_num)
# Validate the cleaned clicklog table.
df_clicklog = util.load_df(self.hive_context, clicklog_output_table)
self.validate_cleaned_log(df_clicklog, conditions, df_persona, df_keywords, df_log, bucket_num)
print_df_generator_code(df_clicklog.sort('aid'))
# Validate the cleaned showlog table.
df_showlog = util.load_df(self.hive_context, clicklog_output_table)
self.validate_cleaned_log(df_showlog, conditions, df_persona, df_keywords, df_log, bucket_num)
print_df_generator_code(df_showlog.sort('aid'))
def filter_spread_app_ids(self, df, spread_app_ids):
# User defined function to return if the keyword is in the inclusion set.
_udf = udf(lambda x: x in spread_app_ids, BooleanType())
# Return the filtered dataframe.
return df.filter(_udf(col('spread_app_id')))
# ========================================
# Helper methods
# ========================================
def validate_cleaned_log(self, df, conditions, df_persona, df_keywords, df_log, bucket_num):
# Verify the column names.
columns = ['spread_app_id', 'aid', 'adv_id', 'media', 'slot_id', 'device_name',
'net_type', 'price_model', 'action_time', 'gender', 'age',
'gender_index', 'keyword', 'day', 'aid_bucket']
for name in columns:
self.assertTrue(name in df.columns)
# Verify the number of rows.
# The raw log count has one entry that will be filtered out so adjusted accordingly.
self.assertEqual(df.count(), df_log.count() - 1)
# Helper method for verifying table joins.
def assert_row_value(row, df_match, field_name, join_field):
self.assertEqual(row[field_name], df_match.filter(col(join_field) == row[join_field]).collect()[0][field_name])
# Check the row values.
for row in df.collect():
self.assertTrue(row['slot_id'] in conditions['new_slot_id_list'])
self.assertEqual(row['day'], row['action_time'].split()[0])
self.assertTrue(int(row['aid_bucket']) < bucket_num)
assert_row_value(row, df_persona, 'gender', 'aid')
assert_row_value(row, df_persona, 'age', 'aid')
assert_row_value(row, df_keywords, 'keyword', 'spread_app_id')
assert_row_value(row, df_keywords, 'keyword_index', 'spread_app_id')
assert_row_value(row, df_log, 'adv_id', 'aid')
assert_row_value(row, df_log, 'media', 'aid')
assert_row_value(row, df_log, 'slot_id', 'aid')
assert_row_value(row, df_log, 'device_name', 'aid')
assert_row_value(row, df_log, 'net_type', 'aid')
assert_row_value(row, df_log, 'price_model', 'aid')
assert_row_value(row, df_log, 'action_time', 'aid')
# Runs the tests.
if __name__ == '__main__':
# Run the unit tests.
unittest.main()
| 44.46519 | 142 | 0.647925 | 12,853 | 0.914739 | 0 | 0 | 0 | 0 | 0 | 0 | 4,814 | 0.342609 |
f6f09641b35ebd494e5edb18cd6f3b2853290c77 | 8,663 | py | Python | src/nemo/datasets.py | thomasjo/nemo-redux | c4196c0d99633dca011d60008be0cb7667c348b7 | [
"MIT"
] | null | null | null | src/nemo/datasets.py | thomasjo/nemo-redux | c4196c0d99633dca011d60008be0cb7667c348b7 | [
"MIT"
] | null | null | null | src/nemo/datasets.py | thomasjo/nemo-redux | c4196c0d99633dca011d60008be0cb7667c348b7 | [
"MIT"
] | null | null | null | import json
from pathlib import Path
import numpy as np
import torch
import torchvision as vision
import yaml
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset, SubsetRandomSampler
from torch.utils.data.dataset import Subset
from torchvision.datasets import ImageFolder
import nemo.transforms
class ObjectDataset(Dataset):
def __init__(self, root_dir, transform=None, max_image_size=2000):
super().__init__()
if not isinstance(root_dir, Path):
root_dir = Path(root_dir)
self.transform = transform
self.max_image_size = max_image_size
self.annotations = self.load_annotations(root_dir)
self.image_files = sorted(root_dir.glob("images/*.png"))
self.mask_files = sorted(root_dir.glob("masks/*.png"))
# Run a naive "sanity check" on the dataset.
assert len(self.annotations) == len(self.image_files)
assert all(map(lambda a, b: a.name == b.name, self.image_files, self.mask_files))
# TODO(thomasjo): Check order of objects in mask images vs. annotation file.
classes, class_to_idx = self.find_classes(root_dir)
self.classes = classes
self.class_to_idx = class_to_idx
def __getitem__(self, idx):
image_file = self.image_files[idx]
image = Image.open(image_file)
image_size = image.size
largest_dim = max(image_size)
scale_factor = largest_dim / self.max_image_size
if scale_factor > 1:
image_size = tuple(round(d / scale_factor) for d in image_size)
image = image.resize(image_size, resample=Image.NEAREST)
mask_image = Image.open(self.mask_files[idx])
if scale_factor > 1:
mask_image = mask_image.resize(image_size, resample=Image.NEAREST)
mask = np.array(mask_image)
obj_ids = np.unique(mask)
obj_ids = obj_ids[1:] # Skip background label
masks = np.equal(mask, obj_ids[:, None, None])
areas, boxes = [], []
for i in range(len(obj_ids)):
point = np.nonzero(masks[i])
xmin = np.min(point[1])
xmax = np.max(point[1])
ymin = np.min(point[0])
ymax = np.max(point[0])
areas.append((xmax - xmin) * (ymax - ymin))
boxes.append([xmin, ymin, xmax, ymax])
labels = [int(label) for xy_points, label in self.annotations[image_file.name]]
target = {
"image_id": torch.tensor([idx]),
"boxes": torch.as_tensor(boxes, dtype=torch.float32),
"labels": torch.as_tensor(labels, dtype=torch.int64),
"masks": torch.as_tensor(masks, dtype=torch.uint8),
"area": torch.as_tensor(areas, dtype=torch.float32),
"iscrowd": torch.zeros(len(obj_ids), dtype=torch.int64),
}
if self.transform:
image, target = self.transform(image, target)
return image, target
def __len__(self):
return len(self.image_files)
def load_annotations(self, root_dir):
json_file = root_dir / "via.json"
with json_file.open() as fp:
raw_data = json.load(fp)
annotations = {}
for entry in raw_data.values():
filename = entry["filename"]
masks = []
for region in entry["regions"]:
# Each "region" contains "shape_attributes" that contains the mask shape (typically polygon) XY coordinates,
# and a "region_attributes" that holds the object label.
region_attr, shape_attr = region["region_attributes"], region["shape_attributes"]
assert shape_attr["name"] == "polygon"
# Extract object mask polygon xy coordinates.
xy_points = list(zip(shape_attr["all_points_x"], shape_attr["all_points_y"]))
# Extract object label.
label = int(region_attr["category"])
masks.append((xy_points, label))
annotations[filename] = masks
return annotations
def find_classes(self, root_dir):
json_file = root_dir / "via_attributes.json"
with json_file.open() as fp:
raw_data = json.load(fp)
categories = raw_data["region"]["category"]["options"]
class_to_idx = {int(k): v for k, v in categories.items()}
classes = list(class_to_idx.values())
return classes, class_to_idx
def initialize_detection_datasets(data_dir, no_augmentation=False):
transform = nemo.transforms.Compose([nemo.transforms.ToTensor()])
train_transform = nemo.transforms.Compose([
nemo.transforms.ToTensor(),
# nemo.transforms.RandomFlip(),
nemo.transforms.RandomHorizontalFlip(),
nemo.transforms.RandomVerticalFlip(),
nemo.transforms.GammaJitter(gamma=0.2),
nemo.transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.01, hue=0.01),
])
if no_augmentation:
train_transform = transform
train_dataset = ObjectDataset(data_dir / "train", transform=train_transform)
test_dataset = ObjectDataset(data_dir / "test", transform=transform)
num_classes = len(train_dataset.classes) + 1 # add "background" class
return train_dataset, test_dataset, num_classes
def detection_dataloaders(
data_dir,
batch_size=1,
subset_indices=None,
no_augmentation=False,
num_workers=0,
):
train_dataset, test_dataset, num_classes = initialize_detection_datasets(data_dir, no_augmentation)
if subset_indices is not None:
train_dataset = Subset(train_dataset, subset_indices)
test_dataset = Subset(test_dataset, subset_indices)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=num_workers,
)
test_dataloader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=collate_fn,
num_workers=num_workers,
)
return train_dataloader, test_dataloader, num_classes
# TODO(thomasjo): Rename to something more descriptive.
def collate_fn(batch):
images, targets = zip(*batch)
return list(images), list(targets)
def classification_dataloaders(
data_dir,
batch_size=32,
no_augmentation=False,
num_workers=None,
):
# TODO(thomasjo): Consider calculating moments on-demand.
# Fetch dataset moments from metadata.
moments = load_metadata(data_dir)
transform = vision.transforms.Compose([
vision.transforms.Resize(256),
vision.transforms.CenterCrop(224),
vision.transforms.ToTensor(),
vision.transforms.Normalize(**moments),
])
train_transform = vision.transforms.Compose([
vision.transforms.Resize(256),
vision.transforms.RandomCrop(224),
vision.transforms.RandomHorizontalFlip(),
nemo.transforms.RandomDiscreteRotation(angles=[0, 90, 180, 270]),
vision.transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.05, hue=0.05),
vision.transforms.ToTensor(),
vision.transforms.Normalize(**moments),
])
if no_augmentation:
train_transform = transform
train_dataset = ImageFolder(data_dir / "train", transform=train_transform)
train_dataset.moments = moments # Embed training dataset moments into dataset object
# Create validation dataset from the training data.
val_dataset = ImageFolder(data_dir / "train", transform=transform)
# Split the training dataset into training and validation subsets.
indices = list(range(len(train_dataset)))
labels = [y for x, y in train_dataset.samples]
train_idx, val_idx = train_test_split(indices, train_size=0.8, stratify=labels)
train_sampler, val_sampler = SubsetRandomSampler(train_idx), SubsetRandomSampler(val_idx)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers, pin_memory=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, num_workers=num_workers, pin_memory=True)
test_dataset = ImageFolder(data_dir / "test", transform=transform)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True)
return train_dataloader, val_dataloader, test_dataloader
def load_metadata(data_dir: Path):
metadata_file = data_dir / "metadata.yaml"
with metadata_file.open("r") as fs:
metadata = yaml.safe_load(fs)
return metadata
| 34.931452 | 136 | 0.669399 | 4,131 | 0.476856 | 0 | 0 | 0 | 0 | 0 | 0 | 1,041 | 0.120166 |
f6f1181b9d63e4b1e1908205531b518c477401af | 6,255 | py | Python | ever2text/converter.py | nicholaskuechler/ever2text | dc21ca86b691f2ee793ce273035341e55374448e | [
"MIT"
] | 13 | 2016-12-16T18:57:53.000Z | 2021-04-30T22:14:45.000Z | ever2text/converter.py | nicholaskuechler/ever2text | dc21ca86b691f2ee793ce273035341e55374448e | [
"MIT"
] | 2 | 2017-01-21T19:46:10.000Z | 2020-11-16T11:03:27.000Z | ever2text/converter.py | nicholaskuechler/ever2text | dc21ca86b691f2ee793ce273035341e55374448e | [
"MIT"
] | 3 | 2016-09-16T02:10:04.000Z | 2021-04-30T22:16:08.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import str
import json
import os
import sys
from dateutil.parser import parse
from html2text import HTML2Text
from lxml import etree
from bs4 import BeautifulSoup
class EverConverter(object):
"""Evernote conversion runner
"""
fieldnames = ['createdate', 'modifydate', 'content', 'tags']
date_fmt = '%h %d %Y %H:%M:%S'
def __init__(self, enex_filename, output_dir=None, fmt="text",
preserve_title=False, verbose=False):
self.enex_filename = os.path.expanduser(enex_filename)
self.stdout = False
if output_dir is None:
self.stdout = True
self.output_dir = output_dir
else:
self.output_dir = os.path.expanduser(output_dir)
self.fmt = fmt
self.preserve_title = preserve_title
self.verbose = verbose
self.use_beautifulsoup = True
def _load_xml(self, enex_file):
try:
parser = etree.XMLParser(huge_tree=True)
xml_tree = etree.parse(enex_file, parser)
except (etree.XMLSyntaxError, ) as e:
print('Could not parse XML')
print(e)
sys.exit(1)
return xml_tree
def prepare_notes(self, xml_tree):
notes = []
raw_notes = xml_tree.xpath('//note')
for note in raw_notes:
note_dict = {}
title = note.xpath('title')[0].text
note_dict['title'] = title
# Use dateutil to figure out these dates
# 20110610T182917Z
created_string = parse('19700101T000017Z')
if note.xpath('created'):
created_string = parse(note.xpath('created')[0].text)
updated_string = created_string
if note.xpath('updated'):
updated_string = parse(note.xpath('updated')[0].text)
note_dict['createdate'] = created_string.strftime(self.date_fmt)
note_dict['modifydate'] = updated_string.strftime(self.date_fmt)
tags = [tag.text for tag in note.xpath('tag')]
note_dict['tags'] = tags
note_dict['content'] = ''
content = note.xpath('content')
if content:
raw_text = content[0].text
# TODO: Option to go to just plain text, no markdown
converted_text = self._convert_note_to_text(title, raw_text)
note_dict['content'] = converted_text
if self.verbose:
print("note_dict: {}".format(note_dict))
notes.append(note_dict)
return notes
def convert(self):
if not os.path.exists(self.enex_filename):
print("File does not exist: {}".format(self.enex_filename))
sys.exit(1)
# TODO: use with here, but pyflakes barfs on it
enex_file = open(self.enex_filename)
xml_tree = self._load_xml(enex_file)
enex_file.close()
notes = self.prepare_notes(xml_tree)
self._convert_dir(notes)
def _convert_note_to_text(self, title, text):
if self.fmt == "markdown":
html2plain = HTML2Text(None, "")
html2plain.feed("<h1>%s</h1>" % title)
html2plain.feed(text)
return html2plain.close()
else:
soup = BeautifulSoup(text, 'html.parser')
output = soup.get_text()
return output
def sanitize_note_title(self, note_title):
# replace spaces with underscores
note_title = note_title.replace(' ', '_')
# replace forward slaces with dashes
note_title = note_title.replace('/', '-')
note_title = note_title.replace('|', '-')
note_title = note_title.replace('(', '')
note_title = note_title.replace(')', '')
note_title = note_title.replace('?', '')
note_title = note_title.replace('*', '')
note_title = note_title.replace('!', '')
note_title = note_title.replace('$', '')
note_title = note_title.replace('"', '')
note_title = note_title.replace("'", '')
note_title = note_title.replace(':', '-')
note_title = note_title.replace('>', '-')
note_title = note_title.replace('<', '-')
note_title = note_title.replace('®', '')
note_title = note_title.replace(u"\u2122", '')
return note_title
def _convert_dir(self, notes):
if self.output_dir is None:
sys.stdout.write(json.dumps(notes))
else:
if (os.path.exists(self.output_dir) and
not os.path.isdir(self.output_dir)):
print('"{}" exists but is not a directory.'.format(
self.output_dir))
sys.exit(1)
elif not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
for i, note in enumerate(notes):
if self.preserve_title:
# (nicholaskuechler) try to preserve the title, but replace
# spaces with underscores, replace forward slash with dash,
# and preserve the note number in case of duplicate titles.
note_title = note['title']
note_title = self.sanitize_note_title(note_title)
note_title = "%s-%s" % (note_title, i)
else:
note_title = str(i)
try:
output_file_path = \
os.path.join(self.output_dir, note_title + '.txt')
with open(output_file_path, 'w') as output_file:
output_file.write(note['content'])
except Exception as e:
output_file_path = os.path.join(
self.output_dir,
"title_fail" + '-' + str(i) + '.txt')
print("failed to use title for filename: {}".format(e))
with open(output_file_path, 'w') as output_file:
output_file.write(note['content'])
| 40.096154 | 79 | 0.566107 | 5,877 | 0.939418 | 0 | 0 | 0 | 0 | 0 | 0 | 999 | 0.159687 |
f6f2d88af8b19a330bc99f6628aa11a5d1619261 | 4,055 | pyw | Python | randomopgavergui.pyw | jensjacobt/randomopgaver | 24a2b032bcd1bccd90dface0365feff419819d58 | [
"MIT"
] | null | null | null | randomopgavergui.pyw | jensjacobt/randomopgaver | 24a2b032bcd1bccd90dface0365feff419819d58 | [
"MIT"
] | null | null | null | randomopgavergui.pyw | jensjacobt/randomopgaver | 24a2b032bcd1bccd90dface0365feff419819d58 | [
"MIT"
] | null | null | null | # The GUI of Randomopgaver
# -*- coding: utf-8 -*-
import os
import tkinter
from tkinter import *
from tkinter import scrolledtext
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askdirectory
from filegenerator import create_randomopgaver
global doc_file, tab_file, out_dir, st, status
# GUI FUNCTIONS
def set_doc():
doc_file = askopenfilename(
initialdir='',
filetypes=(('Alle Word-dokumenter', '*.docx;*.doc'),
('Alle filer', '*.*')),
title='Vælg et Word-dokument',
multiple=False
)
e1.delete(0, END)
e1.insert(0, os.path.abspath(doc_file))
def set_tab():
tab_file = askopenfilename(
initialdir='',
filetypes=(('Alle Excel-ark', '*.xlsx;*.xls'),
('Alle CSV-filer', '*.csv'),
('Alle TXT-filer', '*.txt'),
("Alle filer", "*.*")),
title="Vælg et Excel-ark (eller en CSV-fil)",
multiple=False
)
e2.delete(0, END)
e2.insert(0, os.path.abspath(tab_file))
def set_out():
out_dir = askdirectory(
initialdir="",
title="Vælg en mappe til output"
)
e3.delete(0, END)
e3.insert(0, os.path.abspath(out_dir))
def generate():
global st, status
if not os.path.isfile(e1.get()):
error_handler(
'Fejl: Den angivne sti for Word-dokumentet er ikke korrekt.')
return
if not os.path.isfile(e2.get()):
error_handler(
'Fejl: Den angivne sti for Excel-arket/CSV-filen er ikke korrekt.')
return
out_folder = (e3.get() if e3.get() else os.path.abspath(os.path.dirname(e1.get())))
st = tkinter.scrolledtext.ScrolledText(
master=root,
wrap=tkinter.WORD,
height=15,
width=5
)
st.grid(row=4, column=0, columnspan=3, padx=xpad, pady=ypad, sticky=EW)
root.grid_columnconfigure(0, weight=1)
st.delete(1.0, END)
st.insert(1.0, 'Genererering af randomopgaver påbegyndt.')
status['text'] = 'Genererer randomopgaver...'
st.update()
res = create_randomopgaver(write, error_handler, e1.get(), e2.get(), out_folder)
if res:
status['text'] = 'Randomopgaver blev genereret med succes.'
def write(string):
global st
st.insert(END, '\n' + string)
st.see(END)
st.update()
def error_handler(error_string):
global status
status['text'] = 'Der opstod en fejl under genereringen, som nu er stoppet.'
if tkinter.messagebox.askretrycancel(
'Der opstod en fejl',
error_string,
icon=messagebox.ERROR,
default=messagebox.CANCEL
):
generate()
# GUI
root = tkinter.Tk()
root.wm_title('Randomopgaver')
root.resizable(width=False, height=False)
# https://commons.wikimedia.org/wiki/File:One_die.jpeg
root.iconbitmap(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'icon.ico'))
ypad = 5
xpad = 6
l1 = Label(root, text='Word-dokument:')
l2 = Label(root, text='Excel-ark:')
l3 = Label(root, text='Evt. output-mappe:')
status = Label(root, text='')
l1.grid(row=0, sticky=W, padx=xpad)
l2.grid(row=1, sticky=W, padx=xpad)
l3.grid(row=2, sticky=W, padx=xpad)
status.grid(row=3, columnspan=2, sticky=W, padx=xpad)
e1 = Entry(root, width=40)
e2 = Entry(root, width=40)
e3 = Entry(root, width=40)
e1.grid(row=0, column=1, pady=ypad)
e2.grid(row=1, column=1, pady=ypad)
e3.grid(row=2, column=1, pady=ypad)
b1 = Button(root, text='Gennemse...', command=set_doc, width=14)
b2 = Button(root, text='Gennemse...', command=set_tab, width=14)
b3 = Button(root, text='Gennemse...', command=set_out, width=14)
b4 = Button(root, text='Generer opgaver', command=generate, width=14)
b1.grid(row=0, column=2, sticky=E, padx=xpad, pady=ypad)
b2.grid(row=1, column=2, sticky=E, padx=xpad, pady=ypad)
b3.grid(row=2, column=2, sticky=E, padx=xpad, pady=ypad)
b4.grid(row=3, column=2, sticky=E, padx=xpad, pady=ypad)
root.columnconfigure(1, weight=5)
root.rowconfigure(1, weight=5)
root.mainloop()
| 28.356643 | 87 | 0.641184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 836 | 0.205962 |
f6f3a7cb2f1f74649f7392db25208bff5b69d6eb | 8,325 | py | Python | reb/plain.py | workingenius/reb | ebc597d95f79d6f2d89a401940f24d7a2c4274f0 | [
"MIT"
] | 1 | 2021-04-27T04:03:21.000Z | 2021-04-27T04:03:21.000Z | reb/plain.py | workingenius/reb | ebc597d95f79d6f2d89a401940f24d7a2c4274f0 | [
"MIT"
] | null | null | null | reb/plain.py | workingenius/reb | ebc597d95f79d6f2d89a401940f24d7a2c4274f0 | [
"MIT"
] | null | null | null | """Reb plain Implementation"""
from typing import Iterator, List, Optional
from functools import singledispatch
from .parse_tree import PTNode, VirtualPTNode
from .pattern import (
Finder,
Pattern,
PText, PAnyChar, PTag, PNotInChars, PInChars,
PAny, PRepeat, PAdjacent,
PExample,
PStarting, PEnding)
__all__ = [
'compile_pattern',
'FinderPlain'
]
def compile_pattern(pattern: Pattern) -> Finder:
return _compile_pattern(pattern)
@singledispatch
def _compile_pattern(pattern: Pattern) -> 'FinderPlain':
raise TypeError
class FinderPlain(Finder):
def __init__(self, pattern: Pattern):
self.pattern: Pattern = pattern
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
"""Match pattern from <text>, start at <start>, iterate over all possible matches as PTNode"""
raise NotImplementedError
def finditer(self, text: str) -> Iterator[PTNode]:
"""Find pattern in text, yield them one after another"""
cur = 0
ll = len(text)
while cur <= ll:
m = False
for pt in self.match(text, cur, reverse=False):
m = True
yield pt
if pt.index1 > cur:
cur = pt.index1
else:
cur += 1
break
if not m:
cur += 1
@_compile_pattern.register(PText)
class FText(FinderPlain):
def __init__(self, pattern: PText):
super().__init__(pattern)
self.text = pattern.text
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
if text[start: start + len(self.text)] == self.text:
yield PTNode(text, start=start, end=start + len(self.text))
@_compile_pattern.register(PAnyChar)
class FAnyChar(FinderPlain):
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
if start < len(text):
yield PTNode(text, start=start, end=start + 1)
@_compile_pattern.register(PTag)
class FTag(FinderPlain):
def __init__(self, pattern: PTag):
super().__init__(pattern)
self.finder = _compile_pattern(pattern.pattern)
self.tag = pattern.tag
def match(self, text, start=0, reverse=True) -> Iterator[PTNode]:
for pt in self.finder.match(text, start, reverse=reverse):
pt.tag = self.tag
yield pt
@_compile_pattern.register(PNotInChars)
class FNotInChars(FinderPlain):
def __init__(self, pattern: PNotInChars):
super().__init__(pattern)
self.chars = pattern.chars
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
if start >= len(text):
return
elif text[start] not in self.chars:
yield PTNode(text, start=start, end=start + 1)
@_compile_pattern.register(PInChars)
class FInChars(FinderPlain):
def __init__(self, pattern: PInChars):
super().__init__(pattern)
self.chars = pattern.chars
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
if start >= len(text):
return
elif text[start] in self.chars:
yield PTNode(text, start=start, end=start + 1)
@_compile_pattern.register(PAny)
class FAny(FinderPlain):
def __init__(self, pattern: PAny):
super().__init__(pattern)
self.finders = [_compile_pattern(p) for p in pattern.patterns]
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
for finder in self.finders:
for pt in finder.match(text, start, reverse=reverse):
yield pt
@_compile_pattern.register(PRepeat)
class FRepeat(FinderPlain):
def __init__(self, pattern: PRepeat):
super().__init__(pattern)
self.finder = _compile_pattern(pattern.pattern)
self._from = pattern._from
self._to = pattern._to
self.greedy = pattern.greedy
self.sub = self._prepare(pattern, self.finder, self._from, self._to)
def _prepare(self, pattern: Pattern, finder: FinderPlain, _from: int, _to: int = None) -> FinderPlain:
tail = None
if _to is None:
tail = FRepeat0n(finder)
elif isinstance(_to, int) and _from < _to:
tail = FRepeat0n(finder, _to - _from)
sub = [finder] * _from
if tail:
sub = sub + [tail]
return FAdjacent(pattern, sub)
@staticmethod
def reversed(it):
buffer = []
buffer.extend(it)
return reversed(buffer)
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
if self.greedy and reverse:
return self.sub.match(text, start, reverse=reverse)
elif not self.greedy and not reverse:
return self.sub.match(text, start, reverse=reverse)
elif self.greedy and not reverse:
return self.reversed(self.sub.match(text, start, reverse=not reverse))
else:
return self.reversed(self.sub.match(text, start, reverse=not reverse))
@_compile_pattern.register(PAdjacent)
def compile_padjacent(pattern: PAdjacent):
return FAdjacent(pattern, [_compile_pattern(p) for p in pattern.patterns])
class FAdjacent(FinderPlain):
def __init__(self, pattern: Pattern, finders: List[FinderPlain]):
super().__init__(pattern)
self.finders: List[FinderPlain] = finders
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
idx_ptn = 0
idx_pos = start
mtc_stk: List[Iterator[PTNode]] = [self.finders[idx_ptn].match(text, idx_pos, reverse=reverse)]
res_stk: List[Optional[PTNode]] = [None]
while True:
try:
res_nxt = next(mtc_stk[-1])
except StopIteration:
idx_ptn -= 1
if idx_ptn < 0:
return
mtc_stk.pop()
res_stk.pop()
assert res_stk[-1] is not None
idx_pos = res_stk[-1].index1
else:
# assert res_stk[-1] != res_nxt
res_stk[-1] = res_nxt
idx_ptn += 1
if idx_ptn < len(self.finders):
idx_pos = res_nxt.index1
mtc_stk.append(self.finders[idx_ptn].match(text, idx_pos, reverse=reverse))
res_stk.append(None)
else:
yield PTNode.lead(res_stk) # type: ignore
idx_ptn -= 1
assert res_stk[-1] is not None
idx_pos = res_stk[-1].index0
@_compile_pattern.register(PExample)
def compile_pexample(pattern: PExample):
return _compile_pattern(pattern.pattern)
@_compile_pattern.register(PStarting)
class FStarting(FinderPlain):
def match(self, text, start=0, reverse=True):
if start == 0:
yield PTNode(text, start=start, end=start)
@_compile_pattern.register(PEnding)
class FEnding(FinderPlain):
def match(self, text, start=0, reverse=True):
if start == len(text):
yield PTNode(text, start=start, end=start)
class FRepeat0n(FinderPlain):
def __init__(self, finder: FinderPlain, _to: int = None):
self.finder: FinderPlain = finder
self._to: Optional[int] = _to
def match(self, text: str, start: int = 0, reverse: bool = True) -> Iterator[PTNode]:
if self._to is not None and (self._to <= 0):
return
# Node List NeXT
nl_nxt = [PTNode(text, start, start)]
yield VirtualPTNode.lead(nl_nxt) # x* pattern can always match empty string
nl_que = [nl_nxt]
while nl_que:
# Node List PREvious, which has already failed
nl_pre = nl_que.pop(0)
if self._to is not None and (len(nl_pre) - 1 >= self._to):
continue
for n2 in self.finder.match(text, nl_pre[-1].index1, reverse=reverse):
if not n2:
# repeat expect it's sub pattern to proceed
continue
nl_nxt = nl_pre + [n2]
yield VirtualPTNode.lead(nl_nxt)
nl_que.append(nl_nxt)
| 33.3 | 106 | 0.599399 | 7,112 | 0.854294 | 4,116 | 0.494414 | 4,498 | 0.5403 | 0 | 0 | 415 | 0.04985 |
f6f524b71daa4dbc98c31cccc8cb7fce4eac5538 | 881 | py | Python | AdversarialAttack/SST/gen_pos.py | thunlp/DictSKB | ac7c328db4d25cf6bdc3c64aefa9b773854ba525 | [
"MIT"
] | 2 | 2021-11-21T13:42:06.000Z | 2022-01-11T05:33:43.000Z | AdversarialAttack/SST/gen_pos.py | thunlp/DictSKB | ac7c328db4d25cf6bdc3c64aefa9b773854ba525 | [
"MIT"
] | null | null | null | AdversarialAttack/SST/gen_pos.py | thunlp/DictSKB | ac7c328db4d25cf6bdc3c64aefa9b773854ba525 | [
"MIT"
] | 3 | 2021-09-14T06:40:51.000Z | 2021-12-23T09:41:22.000Z | import pickle
with open('./aux_files/dataset_13837.pkl','rb') as fp:
dataset=pickle.load(fp)
from nltk.tag import StanfordPOSTagger
jar = 'stanford-postagger-full-2018-10-16/stanford-postagger.jar'
model = 'stanford-postagger-full-2018-10-16/models/english-left3words-distsim.tagger'
pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')
from tqdm import tqdm
train_text=[[dataset.inv_full_dict[t] for t in tt] for tt in dataset.train_seqs]
test_text=[[dataset.inv_full_dict[t] for t in tt] for tt in dataset.test_seqs]
all_pos_tags=[]
test_pos_tags=[]
for text in tqdm(train_text):
pos_tags = pos_tagger.tag(text)
all_pos_tags.append(pos_tags)
for text in test_text:
pos_tags = pos_tagger.tag(text)
test_pos_tags.append(pos_tags)
f = open('pos_tags.pkl','wb')
pickle.dump(all_pos_tags,f)
f=open('pos_tags_test.pkl','wb')
pickle.dump(test_pos_tags,f)
| 32.62963 | 85 | 0.76277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.247446 |
f6f688d8a5f12a882c462a7d43fb10da64d6fa89 | 5,009 | py | Python | PiCam/picam.py | alexwtz/pciam | 7e90dd22a8e97e41fb480cb85cdf95cefd2d974e | [
"MIT"
] | null | null | null | PiCam/picam.py | alexwtz/pciam | 7e90dd22a8e97e41fb480cb85cdf95cefd2d974e | [
"MIT"
] | null | null | null | PiCam/picam.py | alexwtz/pciam | 7e90dd22a8e97e41fb480cb85cdf95cefd2d974e | [
"MIT"
] | null | null | null | import serial
import time
import atexit
from functools import wraps
from flask import Flask, render_template, request, Response
app = Flask(__name__)
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == 'admin' and password == 'secret'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# This function maps the angle we want to move the servo to, to the needed PWM value
def angleMap(angle):
value = int((round((2000.0/180.0),0)*angle) -1000)
if(value > 1000):
return 1000
elif(value < -1000):
return -1000
else:
return value;
# Create a dictionary called pins to store the pin number, name, and angle
pins = {
23 : {'name' : 'pan', 'angle' : 90},
22 : {'name' : 'tilt', 'angle' : 90}
}
#defines the speed of the movement
speed = 5
#define inverse
paninverse = 1
tiltinverse = -1
def cleanup():
print("Exit app")
def sendPosition(motor, position):
#Initialise the serial interface
s=serial.Serial("/dev/ttyAMA0",9600)
if(s.isOpen()):
s.close()
s.open()
if motor == "pan":
s.write("s0 "+str(position)+" 5\n")
elif motor == "tilt":
s.write("s1 "+str(position)+" 5\n")
s.close()
return "Moved"
def sendBothPosition(positionPan, positionTilt):
#Initialise the serial interface
s=serial.Serial("/dev/ttyAMA0",9600)
if(s.isOpen()):
s.close()
s.open()
s.write("s0 "+str(positionPan)+" 5\n")
s.write("s1 "+str(positionTilt)+" 5\n")
s.close()
return "Moved"
# Load the main form template on webrequest for the root page
@app.route("/")
@requires_auth
def main():
# Create a template data dictionary to send any data to the template
templateData = {
'title' : 'PiCam'
}
# Pass the template data into the template picam.html and return it to the user
return render_template('picam.html', **templateData)
# The function below is executed when someone requests a URL with a move direction
@app.route("/<direction>")
def move(direction):
global speed
global titlinverse
global paninverse
# Choose the direction of the request
if direction == 'left':
# Increment the angle by speed
na = pins[23]['angle'] + paninverse * speed
# Verify that the new angle is not too great
if int(na) <= 180:
# Change the angle of the servo
sendPosition(pins[23]['name'],angleMap(na))
print("Servo 23 at %s" % (angleMap(na)))
# Store the new angle in the pins dictionary
pins[23]['angle'] = na
else:
pins[23]['angle'] = 180
return str(na) + ' ' + str(angleMap(na))
elif direction == 'reset':
sendBothPosition(0,0)
pins[23]['angle'] = 90
pins[22]['angle'] = 90
return '0 0'
elif direction == 'right':
na = pins[23]['angle'] - paninverse *speed
if na >= 0:
sendPosition(pins[23]['name'],angleMap(na))
print("Servo 23 at %s" % (angleMap(na)))
pins[23]['angle'] = na
else :
pins[23]['angle'] = 0
return str(na) + ' ' + str(angleMap(na))
elif direction == 'up':
na = pins[22]['angle'] + tiltinverse * speed
if na <= 180:
sendPosition(pins[22]['name'],angleMap(na))
print("Servo 22 at %s" % (angleMap(na)))
pins[22]['angle'] = na
else :
pins[22]['angle'] = 180
return str(na) + ' ' + str(angleMap(na))
elif direction == 'down':
na = pins[22]['angle'] - tiltinverse * speed
if na >= 0:
sendPosition(pins[22]['name'],angleMap(na))
print("Servo 22 at %s" % (angleMap(na)))
pins[22]['angle'] = na
else :
pins[22]['angle'] = 0
return str(na) + ' ' + str(angleMap(na))
elif direction == 'speed1':
speed = 1
return str(speed)
elif direction == 'speed5':
speed = 5
return str(speed)
elif direction == 'speed10':
speed = 10
return str(speed)
elif direction == 'speed20':
speed = 20
return str(speed)
# Function to manually set a motor to a specific pluse width
@app.route("/<motor>/<pulsewidth>")
def manual(motor,pulsewidth):
if motor == "pan":
servoPan.set_servo(23, int(pulsewidth))
elif motor == "tilt":
servoTilt.set_servo(22, int(pulsewidth))
return "Moved"
# Clean everything up when the app exits
atexit.register(cleanup)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True)
| 28.787356 | 84 | 0.609503 | 0 | 0 | 0 | 0 | 2,796 | 0.558195 | 0 | 0 | 1,663 | 0.332002 |
f6f935040202abf72d1e7c6636a1df716225a0b0 | 7,834 | py | Python | simplemfa/helpers.py | mwhawkins/django-simple-mfa | 58b5188473a776f5fcebda9843e2147123dce59f | [
"MIT"
] | 3 | 2020-07-17T09:14:09.000Z | 2020-12-15T09:26:56.000Z | simplemfa/helpers.py | mwhawkins/django-simple-mfa | 58b5188473a776f5fcebda9843e2147123dce59f | [
"MIT"
] | 1 | 2020-11-25T10:41:02.000Z | 2020-11-25T10:41:02.000Z | simplemfa/helpers.py | mwhawkins/django-simple-mfa | 58b5188473a776f5fcebda9843e2147123dce59f | [
"MIT"
] | 2 | 2020-11-25T10:47:20.000Z | 2021-11-04T15:41:21.000Z | from django import template
from django.core.mail import send_mail
from django.template.loader import get_template
from django.conf import settings
from django.shortcuts import reverse
from twilio.twiml.voice_response import VoiceResponse, Say
from twilio.rest import Client
from django.utils import timezone
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def template_exists(value):
try:
template.loader.get_template(value)
return True
except template.TemplateDoesNotExist:
return False
def template_fallback(values):
for value in values:
if template_exists(value):
return value
raise template.TemplateDoesNotExist
def get_message_context(request, code):
context = {
'username': request.user.username,
'request': request,
'app_name': settings.APP_NAME if hasattr(settings, "APP_NAME") else f"{request.META.get('HTTP_HOST')}",
'code': code,
'url': request.build_absolute_uri(location=reverse('mfa:mfa-login'))
}
return context
def get_twilio_client():
if hasattr(settings, "TWILIO_ACCOUNT_SID") and hasattr(settings, "TWILIO_AUTH_TOKEN") and \
hasattr(settings, "TWILIO_NUMBER"):
return Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
return None
def send_mfa_code_email(request, code):
html_template = get_template('simplemfa/auth_email.html')
context = get_message_context(request, code)
msg = html_template.render(context)
subject = f"{context['app_name']} Verification Code"
default_from_email = settings.DEFAULT_FROM_EMAIL if hasattr(settings, 'DEFAULT_FROM_EMAIL') else \
f"no-reply@{request.META.get('HTTP_HOST')}"
return send_mail(subject, msg, default_from_email, [request.user.email], fail_silently=False)
def send_mfa_code_text(request, code):
template = get_template('simplemfa/auth_text.html')
context = get_message_context(request, code)
msg = str(template.render(context))
client = get_twilio_client()
if hasattr(settings, "MFA_USER_PHONE_ATTRIBUTE") and client is not None:
phone_object_string = f"request.user.{settings.MFA_USER_PHONE_ATTRIBUTE}"
try:
recipient = eval(phone_object_string)
if recipient is not None:
recipient = parse_phone(recipient)
client.messages.create(to=recipient,
from_=settings.TWILIO_NUMBER,
body=msg)
return True
except:
return False
return False
def send_mfa_code_phone(request, code):
template = get_template('simplemfa/auth_voice.html')
context = get_message_context(request, code)
msg = str(template.render(context)) + ","
client = get_twilio_client()
if hasattr(settings, "MFA_USER_PHONE_ATTRIBUTE") and client is not None:
phone_object_string = f"request.user.{settings.MFA_USER_PHONE_ATTRIBUTE}"
try:
recipient = eval(phone_object_string)
code_list = [str(i) for i in str(code)]
if recipient is not None:
recipient = parse_phone(recipient)
response = VoiceResponse()
say = Say()
say.p(f",,,,,,,,,{msg},,,,,,,,,...")
for char in code_list:
say.say_as(f",,,,,,,,...{str(char)},,,,,,,,...", interpret_as="spell-out")
say.p(f",,,,,,,,,...Again, {msg},,,,,,,,,")
for char in code_list:
say.say_as(f",,,,,,,,...{str(char)},,,,,,,,...", interpret_as="spell-out")
say.p(",,,,,,Goodbye!")
response.append(say)
print(response.to_xml())
client.calls.create(to=recipient,
from_=settings.TWILIO_NUMBER,
twiml=str(response.to_xml()))
return True
except:
return False
return False
def parse_phone(phone):
result = phone
if "+" not in result:
result = "+" + result
return result.replace("-","").replace("(","").replace(")","").replace(" ","")
def get_user_mfa_mode(request):
if hasattr(settings, "MFA_USER_MODE_ATTRIBUTE"):
mode_attr_string = f"request.user.{settings.MFA_USER_MODE_ATTRIBUTE}"
return eval(mode_attr_string) if eval(mode_attr_string) is not None else "EMAIL"
else:
return "EMAIL"
def send_mfa_code(request, code, mode=None):
if mode is None:
mode = get_user_mfa_mode(request)
if mode == "TEXT":
if not send_mfa_code_text(request, code):
return send_mfa_code_email(request, code)
else:
return True
elif mode == "PHONE":
if not send_mfa_code_phone(request, code):
return send_mfa_code_email(request, code)
else:
return True
else:
return send_mfa_code_email(request, code)
def get_user_phone(request):
if hasattr(settings, "MFA_USER_PHONE_ATTRIBUTE"):
mode_attr_string = f"request.user.{settings.MFA_USER_PHONE_ATTRIBUTE}"
return eval(mode_attr_string) if eval(mode_attr_string) is not None else None
return None
def get_cookie_expiration():
if hasattr(settings, "MFA_COOKIE_EXPIRATION_DAYS"):
return settings.MFA_COOKIE_EXPIRATION_DAYS
else:
return 7
def set_cookie(response, key, value, days_expire=get_cookie_expiration()):
if days_expire is None:
max_age = 7 * 24 * 60 * 60 # seven days
else:
max_age = days_expire * 24 * 60 * 60
expires = timezone.datetime.strftime(timezone.now() + timezone.timedelta(seconds=max_age),
"%a, %d-%b-%Y %H:%M:%S UTC")
response.set_cookie(key, value, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None)
def sanitize_email(email):
if email is not None:
email_parts = email.split("@")
domain_parts = email_parts[1].split('.')
email_result = ""
for i in range(len(email_parts[0])):
if i == len(email_parts[0]) - 1 or i == 0:
email_result += email_parts[0][i]
else:
email_result += "*"
email = email_result
domain_result = ""
for i in range(len(domain_parts[0])):
if i == len(domain_parts[0]) - 1 or i == 0:
domain_result += domain_parts[0][i]
else:
domain_result += "*"
domain = f"{domain_result}.{domain_parts[1]}"
return f"{email}@{domain}"
return None
def sanitize_phone(phone):
if phone is not None:
phone = parse_phone(phone).replace("+", "")
phone_result = ""
for i in range(len(phone)):
if i < len(phone) - 4:
phone_result += "*"
else:
phone_result += phone[i]
return phone_result
return None
def build_mfa_request_url(request, next_url=None):
if next_url is None:
next_url = request.GET.get("next", None)
request_url = reverse("simplemfa:mfa-request")
request_url += "?reset=true"
if next_url is not None:
request_url += f"&next={next_url}"
return request_url
def build_mfa_post_url(request, next_url=None):
if next_url is None:
next_url = request.GET.get("next", None)
request_url = reverse("simplemfa:mfa-login")
if next_url is not None:
request_url += f"?next={next_url}"
return request_url
| 34.359649 | 111 | 0.616033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,168 | 0.149094 |
f6fad38b5c6177592103f90dc3306807b04807de | 443 | py | Python | wsgi/djkatta/cabshare/urls.py | ashishnitinpatil/mukatta | dfec9cd65d08f6b7d76b686fae3605986b31b80d | [
"BSD-2-Clause-FreeBSD",
"PSF-2.0",
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | null | null | null | wsgi/djkatta/cabshare/urls.py | ashishnitinpatil/mukatta | dfec9cd65d08f6b7d76b686fae3605986b31b80d | [
"BSD-2-Clause-FreeBSD",
"PSF-2.0",
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | null | null | null | wsgi/djkatta/cabshare/urls.py | ashishnitinpatil/mukatta | dfec9cd65d08f6b7d76b686fae3605986b31b80d | [
"BSD-2-Clause-FreeBSD",
"PSF-2.0",
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, url
# App specific URL patterns
urlpatterns = patterns("djkatta.cabshare.views",
# post new req
url(r'new_post/$', 'new_post', name='new_post'),
# view posts by the user
url(r'my_posts/$', 'my_posts', name='my_posts'),
# modify old req
url(r'(?P<post_id>[\d]+)/edit/$', 'edit', name='edit'),
# view individual req
url(r'(?P<post_id>[\d]+)/$', 'indi', name='indi'),
)
| 24.611111 | 59 | 0.611738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.602709 |
f6fadb640578e054ade2f1f7f1755e165fd9b8bc | 3,035 | py | Python | GANs/WGAN/train.py | sushant097/Deep-Learning-Paper-Scratch-Implementation | e1e5cbd0f1f10dd9ed0673e8a0506f33c94f6725 | [
"MIT"
] | 4 | 2022-01-07T14:48:13.000Z | 2022-02-24T10:10:24.000Z | GANs/WGAN/train.py | sushant097/Deep-Learning-Paper-Scratch-Implementation | e1e5cbd0f1f10dd9ed0673e8a0506f33c94f6725 | [
"MIT"
] | null | null | null | GANs/WGAN/train.py | sushant097/Deep-Learning-Paper-Scratch-Implementation | e1e5cbd0f1f10dd9ed0673e8a0506f33c94f6725 | [
"MIT"
] | null | null | null | import torch
from torchvision.utils import save_image
from torchvision import datasets
from torchvision.transforms import transforms
import os
from model import Generator, Discriminator, init_weights, denorm_image
from config import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# create a directory if not exists
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
transforms = transforms.Compose([
transforms.Resize(Image_size),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
dataloader = torch.utils.data.DataLoader(
datasets.CIFAR10(
data_dir,
download=True,
train=True,
transform=transforms
),
batch_size=batch_size,
shuffle=True
)
# Step2: Define Generator and discriminator
gen = Generator(hidden_dim, img_channels).to(device)
critic = Discriminator(img_channels).to(device)
# intialize weights
init_weights(gen)
init_weights(critic)
# WGAN with gradient clipping uses RMSprop instead of ADAM
optimCritic = torch.optim.RMSprop(critic.parameters(), lr=learning_rate)
optimGen = torch.optim.RMSprop(gen.parameters(), lr=learning_rate)
# ------------
# Training
# ------------
# set the training mode
gen.train()
critic.train()
for epoch in range(epochs):
for i, (imgs, _) in enumerate(dataloader):
# to device
real_imgs = imgs.to(device)
# -------------------
# Train Discriminator : Max -E[critic(real)] + E[critic(fake)]
# --------------------
for _ in range(critic_iter):
optimCritic.zero_grad()
# sample noise as generator input
z = torch.randn((batch_size, hidden_dim, 1, 1), dtype=torch.float, device=device)
# Generate a batch of fake images
fake_imgs = gen(z).detach()
# Adversarial loss - Critic
critic_loss = -torch.mean(critic(real_imgs)) + torch.mean(critic(fake_imgs))
critic_loss.backward()
optimCritic.step()
# Clip weights of critic during training
# clip critic weights between -0.01, 0.01
for p in critic.parameters():
p.data.clamp_(-weight_clipping_limit, weight_clipping_limit)
# --------------
# Train Generator
# ---------------
# Train the generator Min - E[critic(fake)] ~ Max E[critic(fake)]
optimGen.zero_grad() # zero the gradient
fake_imgs = gen(z)
# Adversarial Loss
loss_G = -torch.mean(critic(fake_imgs))
loss_G.backward()
optimGen.step()
print('Epoch [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}'
.format(epoch, epochs, critic_loss.item(), loss_G.item()))
save_image(denorm_image(fake_imgs), os.path.join(sample_dir, 'fake_images-{}.png'.format(epoch + 1)))
# Save the model checkpoints
torch.save(gen.state_dict(), 'G.ckpt')
torch.save(critic.state_dict(), 'D.ckpt')
| 28.364486 | 105 | 0.632619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 780 | 0.257002 |
f6fb3b9906e2a0e989b1efec9cae5a714bfdf274 | 5,969 | py | Python | examples/mpi_based_distributed_execution/MODIS_Aggregation_MPI.py | big-data-lab-umbc/MODIS_Aggregation | 43b79c44b0f62affdc3befb9c089a296e840893a | [
"Apache-2.0"
] | 3 | 2021-05-02T10:11:19.000Z | 2021-12-24T22:00:10.000Z | examples/mpi_based_distributed_execution/MODIS_Aggregation_MPI.py | big-data-lab-umbc/MODIS_Aggregation | 43b79c44b0f62affdc3befb9c089a296e840893a | [
"Apache-2.0"
] | 5 | 2019-11-26T21:09:35.000Z | 2022-03-03T19:23:25.000Z | examples/mpi_based_distributed_execution/MODIS_Aggregation_MPI.py | big-data-lab-umbc/MODIS_Aggregation | 43b79c44b0f62affdc3befb9c089a296e840893a | [
"Apache-2.0"
] | 4 | 2020-08-18T14:26:01.000Z | 2021-10-20T12:42:14.000Z | #!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN MPI WITH FLEXIBLE STATISTICS
Created on 2020
@author: Jianyu Zheng (Email: jzheng3@umbc.edu)
"""
import os
import sys
import h5py
import timeit
import random
import calendar
import numpy as np
import pandas as pd
from mpi4py import MPI
from netCDF4 import Dataset
from collections import OrderedDict
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY, MONTHLY
from MODIS_Aggregation import *
if __name__ =='__main__':
# This is the main program for using concurrent to speed up the whole process
#--------------STEP 1: Read User Inputs and Initial Paramters for Aggregation--------------------
fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,map_lon,map_lat,grid_lon,grid_lat,gap_x,gap_y,filenum, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,bin_num1,bin_num2,var_idx,spl_num,sts_name,histnames, \
output_dir,l3name,unit_list,scale_list,offst_list,longname_list,fillvalue_list = read_user_inputs()
total_file_num = len(filenum)
#--------------STEP 2: Start Aggregation------------------------------------------------
# Start counting operation time
start_time = timeit.default_timer()
print("-------- START AGGREGATION --------")
# Initiate MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
random.seed(rank)
# Initiate the number of files for MPI
remain = size-total_file_num%size
files_part1 = np.arange(total_file_num + remain)
tasks_part1 = np.array(np.split(files_part1,size))
files_part2 = np.arange(total_file_num - tasks_part1[rank].size * (size-remain)) + tasks_part1[rank].size * (size-remain)
tasks_part2 = np.array(np.split(files_part2,remain))
if rank < (size-remain):
fileloop = tasks_part1[rank]
else:
fileloop = tasks_part2[rank-(size-remain)]
print("Process {} calculating files from {} to {}... (Total: {} / {})".format(rank, fileloop[0],fileloop[-1],fileloop.shape[0],total_file_num))
if rank == 0:
grid_data = run_modis_aggre(fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,grid_lon,grid_lat,gap_x,gap_y,fileloop, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,var_idx,spl_num,sts_name,histnames)
for i in range(1,size):
results = comm.recv(source=i, tag=0)
grid_data = addCounter(grid_data, results)
# Compute the mean cloud fraction & Statistics (Include Min & Max & Standard deviation)
# Reference for statstic parameters
# sts_name[0]: min
# sts_name[1]: max
# sts_name[2]: mean / total_value
# sts_name[3]: count
# sts_name[4]: square
# sts_name[5]: histogram
# sts_name[6]: joint histogram
sts_idx = np.array(np.where(sts_switch == True))[0]
print("Index of User-defined Statistics:",sts_idx)
print(grid_data['GRID_Counts'].reshape([grid_lat,grid_lon]))
key_idx = 0
for key in varnames:
for i in sts_idx:
if i == 0:
grid_data[key+'_'+sts_name[0]] = grid_data[key+'_'+sts_name[0]].reshape([grid_lat,grid_lon])
elif i == 1:
grid_data[key+'_'+sts_name[1]] = grid_data[key+'_'+sts_name[1]].reshape([grid_lat,grid_lon])
elif i == 2:
grid_data[key+'_'+sts_name[2]] = grid_data[key+'_'+sts_name[2]] / grid_data[key+'_'+sts_name[3]]
grid_data[key+'_'+sts_name[2]] = grid_data[key+'_'+sts_name[2]].reshape([grid_lat,grid_lon])
elif i == 3:
grid_data[key+'_'+sts_name[3]] = grid_data[key+'_'+sts_name[3]].reshape([grid_lat,grid_lon])
elif i == 4:
grid_data[key+'_'+sts_name[4]] = ((grid_data[key+'_'+sts_name[4]] / grid_data[key+'_'+sts_name[3]].ravel()) - grid_data[key+'_'+sts_name[2]].ravel()**2)**0.5
grid_data[key+'_'+sts_name[4]] = grid_data[key+'_'+sts_name[4]].reshape([grid_lat,grid_lon])
elif i == 5:
grid_data[key+'_'+sts_name[5]] = grid_data[key+'_'+sts_name[5]].reshape([grid_lat,grid_lon,bin_num1[key_idx]])
elif i == 6:
grid_data[key+'_'+sts_name[6]+histnames[key_idx]] = grid_data[key+'_'+sts_name[6]+histnames[key_idx]].reshape([grid_lat,grid_lon,bin_num1[key_idx],bin_num2[key_idx]])
key_idx += 1
end_time = timeit.default_timer()
print ("Operation Time in {:7.2f} seconds".format(end_time - start_time))
#--------------STEP 3: Create HDF5 file to store the result------------------------------
ff=h5py.File(output_dir+l3name+'MPI','w')
PC=ff.create_dataset('lat_bnd',data=map_lat)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Latitude_boundaries'
PC=ff.create_dataset('lon_bnd',data=map_lon)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Longitude_boundaries'
PCentry=ff.create_dataset('GRID_Counts',data=grid_data['GRID_Counts'].reshape([grid_lat,grid_lon]))
PCentry.dims[0].label='lat_bnd'
PCentry.dims[1].label='lon_bnd'
PC.attrs['units']='none'
PC.attrs['long_name']='grid_point_counts'
for i in range(sts_idx.shape[0]):
cnt = 0
for key in grid_data:
if key.find("1km") != -1:
new_name = key.replace("_1km", "")
else:
new_name = key
if (sts_name[sts_idx[i]] in key) == True:
if sts_idx[i] >= 5:
addGridEntry(ff,new_name,unit_list[cnt],longname_list[cnt],fillvalue_list[cnt],scale_list[cnt],offst_list[cnt],grid_data[key],intervals_1d[cnt],intervals_2d[cnt])
else:
addGridEntry(ff,new_name,unit_list[cnt],longname_list[cnt],fillvalue_list[cnt],scale_list[cnt],offst_list[cnt],grid_data[key],intervals_1d[0],intervals_2d[0])
cnt += 1
ff.close()
print(l3name+' Saved!')
print("-------- AGGREGATION COMPLETED --------")
else:
results = run_modis_aggre(fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,grid_lon,grid_lat,gap_x,gap_y,fileloop, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,var_idx,spl_num,sts_name,histnames)
massage = "Process {} finished".format(rank)
print(massage)
comm.send(results, dest=0, tag=0)
#---------------------------COMPLETED------------------------------------------------------
| 37.074534 | 171 | 0.68638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,561 | 0.261518 |
f6fb899f61687e61712367affa88a5603ababa3e | 5,631 | py | Python | repack_img.py | Illidanz/VampireTranslation | d57e88d0dcd7d5d1c9dcaaff2d6e5ea098d842b6 | [
"MIT"
] | 3 | 2021-12-02T07:12:21.000Z | 2022-02-16T04:26:21.000Z | repack_img.py | Illidanz/VampireTranslation | d57e88d0dcd7d5d1c9dcaaff2d6e5ea098d842b6 | [
"MIT"
] | 1 | 2021-12-07T17:54:58.000Z | 2021-12-07T17:54:59.000Z | repack_img.py | Illidanz/VampireTranslation | d57e88d0dcd7d5d1c9dcaaff2d6e5ea098d842b6 | [
"MIT"
] | null | null | null | import os
from hacktools import common, nitro
import images
def run(data):
infolder = data + "extract_BMP/"
outfolder = data + "repack_BMP/"
workfolder = data + "work_IMG/"
common.logMessage("Repacking IMG from", workfolder, "...")
files = common.getFiles(infolder, ".IMG")
lastpals = []
totfiles = 0
for file in common.showProgress(files):
dirname = os.path.dirname(file)
if not os.path.isdir(workfolder + dirname):
continue
if file == "DATESL/002.IMG":
continue
with common.Stream(infolder + file, "rb") as fin:
with common.Stream(outfolder + file, "wb") as f:
common.logDebug("Repacking", file)
# Read section offsets
paloff = fin.readUInt() * 4
tileoff = fin.readUInt() * 4
mapoff = fin.readUInt() * 4
common.logDebug("Sections:", common.toHex(paloff), common.toHex(tileoff), common.toHex(mapoff))
# Read image
paloffs, tileoffs, mapoffs = images.readIMGData(fin, paloff, tileoff, mapoff)
if len(mapoffs) == 0 or len(mapoffs) > 2:
common.logError("Can't repack file", file)
fin.seek(0)
f.write(fin.read())
continue
palettes, tiles, maps = images.readIMG(fin, paloffs, tileoffs, mapoffs)
if len(paloffs) == 0:
common.logDebug("No palettes")
palettes = lastpals
else:
lastpals = palettes
# Copy file up until the first tile
fin.seek(0)
f.write(fin.read(tileoffs[0]))
# Repack them
newtileoffs = []
mapfiles = []
for i in range(len(mapoffs) - 1):
pngfile = workfolder + file.replace(".IMG", "_" + str(i).zfill(2) + ".png")
mapdata = common.Stream().__enter__()
if os.path.isfile(pngfile):
common.logDebug("Repacking", pngfile)
tiles[i].tileoffset = 0
maps[i].mapoffset = 0
open("temptile.bin", "w").close()
open("tempmap.bin", "w").close()
nitro.writeMappedNSCR("temptile.bin", "tempmap.bin", tiles[i], maps[i], pngfile, palettes, maps[i].width, maps[i].height, writelen=False)
with common.Stream("temptile.bin", "rb") as temptile:
f.write(temptile.read())
if f.tell() % 4 > 0:
f.writeZero(f.tell() % 4)
newtileoffs.append(f.tell())
with common.Stream("tempmap.bin", "rb") as tempmap:
mapdata.writeUShort(maps[i].width // 8)
mapdata.writeUShort(maps[i].height // 8)
mapdata.write(tempmap.read())
if mapdata.tell() % 4 > 0:
mapdata.writeZero(mapdata.tell() % 4)
os.remove("temptile.bin")
os.remove("tempmap.bin")
totfiles += 1
else:
common.logDebug("Copying", pngfile)
# Just copy the tile and map data
fin.seek(tileoffs[i])
f.write(fin.read(tileoffs[i+1] - fin.tell()))
newtileoffs.append(f.tell())
fin.seek(mapoffs[i])
mapdata.write(fin.read(mapoffs[i+1] - fin.tell()))
mapdata.seek(0)
mapfiles.append(mapdata)
# Write the new map data
newmapoffs = []
mapoff = f.tell()
f.writeUInt(len(mapfiles))
for i in range(len(mapfiles)):
f.writeUInt(0)
for mapdata in mapfiles:
f.write(mapdata.read())
newmapoffs.append(f.tell())
f.seek(8)
f.writeUInt(mapoff // 4)
# Write the new offsets
f.seek(tileoff + 4)
for newtileoff in newtileoffs:
f.writeUInt((newtileoff - tileoff) // 4)
f.seek(mapoff + 4)
for newmapoff in newmapoffs:
f.writeUInt((newmapoff - mapoff) // 4)
# Repack ANCG files
files = common.getFiles(infolder, ".ANCG")
open("tempcell.bin", "w").close()
for file in common.showProgress(files):
if file == "MANGA_LINE/000.ANCG":
continue
dirname = os.path.dirname(file)
if not os.path.isdir(workfolder + dirname):
continue
pngfile = workfolder + file.replace(".ANCG", ".png")
if not os.path.isfile(pngfile):
continue
common.copyFile(infolder + file, outfolder + file)
with common.Stream(infolder + file, "rb") as fin:
common.logDebug("Repacking", file)
tiles, cells, palettes, bpp = images.readANCGGraphics(fin, file, infolder)
if cells is None:
continue
nitro.writeNCER(outfolder + file, "tempcell.bin", tiles, cells, pngfile, palettes, checkRepeat=False, writelen=False, checkalpha=True)
totfiles += 1
os.remove("tempcell.bin")
common.logMessage("Done! Repacked", totfiles, "files")
| 45.780488 | 161 | 0.485171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.108151 |
f6fbcb6ce3a18a3d1314248bf99c7af9b4df0da9 | 1,741 | py | Python | src/waffle/downloader.py | JasonMWhite/congenial-waffle | 281513b4f566c43a3c326947c88ea53d60cc5fc3 | [
"MIT"
] | null | null | null | src/waffle/downloader.py | JasonMWhite/congenial-waffle | 281513b4f566c43a3c326947c88ea53d60cc5fc3 | [
"MIT"
] | 1 | 2021-12-13T19:53:44.000Z | 2021-12-13T19:53:44.000Z | src/waffle/downloader.py | JasonMWhite/congenial-waffle | 281513b4f566c43a3c326947c88ea53d60cc5fc3 | [
"MIT"
] | null | null | null | import typing
from lxml import html
import requests
from dataclasses import dataclass
from waffle.logger import LOG
from waffle.law_url import LawUrl
@dataclass
class _FollowResults:
path: LawUrl
links: typing.List[LawUrl]
@dataclass
class DownloadResults:
path: LawUrl
content: str
class Downloader:
LAWS = {
'english': "eng/acts/",
'french': "fra/lois/",
}
def legislation(self, language: str) -> typing.Iterable[DownloadResults]:
location = LawUrl(self.LAWS[language])
yield from self._fetch_pages([location])
def _fetch_pages(self, paths: typing.List[LawUrl]) -> typing.Iterable[DownloadResults]:
for path in paths:
LOG.debug("Fetching page at %s", path)
follow, downloaded = self._process_page(path)
if downloaded:
yield downloaded
yield from self._fetch_pages(follow.links)
@staticmethod
def _process_page(location: LawUrl) -> typing.Tuple[_FollowResults, typing.Optional[DownloadResults]]:
LOG.debug("Processing page at %s", location)
resp = requests.get(str(location))
body = resp.content.decode("UTF-8")
download: typing.Optional[DownloadResults] = None
if str(location).endswith('.html'):
paths = []
download = DownloadResults(path=location, content=body)
else:
root = html.fromstring(body)
letters = root.findall('.//div[@id="alphaList"]//a')
paths = [resp.request.path + letter.attrib['href'] for letter in letters if letter.attrib['href'] != '#']
links = [LawUrl(path) for path in paths]
return _FollowResults(path=location, links=links), download
| 30.54386 | 117 | 0.643308 | 1,560 | 0.896037 | 508 | 0.291786 | 957 | 0.549684 | 0 | 0 | 140 | 0.080414 |
f6fc765a80d06b184d48204318ac290f02c0daa1 | 756 | py | Python | ozone-framework-python-server/people/urls.py | aamduka/ozone | 3fdbf232f5ea70661204a632e45310ca9d374973 | [
"Apache-2.0"
] | 6 | 2020-02-21T22:06:31.000Z | 2020-12-08T10:48:07.000Z | ozone-framework-python-server/people/urls.py | aamduka/ozone | 3fdbf232f5ea70661204a632e45310ca9d374973 | [
"Apache-2.0"
] | 12 | 2019-12-26T17:38:40.000Z | 2022-02-10T14:15:55.000Z | ozone-framework-python-server/people/urls.py | aamduka/ozone | 3fdbf232f5ea70661204a632e45310ca9d374973 | [
"Apache-2.0"
] | 4 | 2019-08-05T13:22:29.000Z | 2021-07-21T16:04:03.000Z | from django.urls import path
from rest_framework import routers
from .administration.views import AdministrationOfUserAPIView
from .views import PersonDetailView, PersonDashboardsWidgetsView, PersonWidgetDefinitionViewSet, PersonStackViewset
router = routers.SimpleRouter()
router.register(r'admin/users', AdministrationOfUserAPIView)
router.register(r'admin/users-widgets', PersonWidgetDefinitionViewSet, base_name='admin_users-widgets')
urlpatterns = [
path('me/', PersonDetailView.as_view(), name='user-detail'),
path('me/dashboards-widgets/', PersonDashboardsWidgetsView.as_view(), name='user-widgets-dashboards-detail'),
path('admin/users-stacks/', PersonStackViewset.as_view(), name='admin_users-stacks')
]
urlpatterns += router.urls
| 42 | 115 | 0.806878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.227513 |
f6fe49f2caf10f8c0e6901c9acd0a24f3952045a | 3,899 | py | Python | 00_CoinMarketCap_PullCryptoStats.py | prajwal-skdlove/CryptoCurrencyAnalysis | fd3dcc54c4e36722bb44405f16a4064452d83920 | [
"MIT"
] | 1 | 2021-12-05T19:37:58.000Z | 2021-12-05T19:37:58.000Z | 00_CoinMarketCap_PullCryptoStats.py | prajwal-skdlove/CryptoCurrencyAnalysis | fd3dcc54c4e36722bb44405f16a4064452d83920 | [
"MIT"
] | null | null | null | 00_CoinMarketCap_PullCryptoStats.py | prajwal-skdlove/CryptoCurrencyAnalysis | fd3dcc54c4e36722bb44405f16a4064452d83920 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''This code pulls all coins from Conmarketcap.com
It stores it in a pandas dataframe'''
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
import collections
def coinmarketcap_coins(n):
'''This function pulls all the cryptocurrencies and its related statistics
from the table that you see in the first page of coinmarketcap.com.
This doesnt pull the history of a coin
Input number of pages worth of cryptocurrency to pull'''
df_coindata = pd.DataFrame() # create an empty dataframe to add coins
# Loop through the number of coinmarketcap pages
# Use n + 1 for the last page as Python range (1, n) is actually 1 to (n-1)
for pages in range(1, n + 1):
# Request the coin informations and make it pretty with beautifulsoup
cmc = requests.get('https://coinmarketcap.com/?page=' + str(pages))
soup = BeautifulSoup(cmc.content, 'html.parser')
# Find where the cryptocurrency data resides in html
# This is is json format with script id __NEXT_DATA__
data = soup.find('script', id="__NEXT_DATA__", type="application/json")
# Load the contents of json data
coin_data = json.loads(data.contents[0])
# Find where the coin data actually is
# Here you find keyattributes/colnames in the dictionary
# Each Crypto stats is in rows with the order following keyattributes
listings = coin_data['props']['initialState']['cryptocurrency']['listingLatest']['data']
# Loop through it to match the keysAttribute to data
# Assign it and create a dicitonary for each coin
# Is there a better way of doing it?
# IDK Just my first few weeks of dedicated python coding
coindata_cmb = {"data": []}
coindata_cmb = collections.defaultdict(list)
for k in range(1, len(listings)):
coins = {}
coins = collections.defaultdict(list)
coindata_cmb["data"].append(coins)
for i in range(len(listings[0]['keysArr'])):
coins[listings[0]['keysArr'][i]] = listings[k][i]
# Combine all the coins data
# You have each key as keyattributes/colnames and values for each coin
temp_comb_coindata = collections.defaultdict(list)
for d in coindata_cmb['data']:
for k, v in d.items():
temp_comb_coindata[k].append(v)
# Convert the coins into a pandas dataframe
df_coins = pd.DataFrame.from_dict(temp_comb_coindata)
# Concatenate all the data together to have one big dataset
df_coindata = pd.concat([df_coindata, df_coins], axis=0, sort=False,
ignore_index=True)
# =============================================================================
# There is way too many variables
# Not all of them is useful but keep them all
# Reorder some variables that helps to figure out what you are looking at
# Risk - Errros if coinmarketcap changed the names of these variables
# But should be relatively easy to fix
# =============================================================================
var_first = ["rank", "cmcRank", "id", "name", "symbol", "slug", "isActive",
"isAudited", "dateAdded", "lastUpdated", "quote.USD.price",
"ath", "atl", "high24h", "low24h", "circulatingSupply",
"maxSupply", "totalSupply", "quote.USD.marketCap",
"quote.USD.marketCapByTotalSupply",
"quote.USD.fullyDilluttedMarketCap"]
var_order = var_first + list(set(list(df_coindata)) - set(var_first))
# Final dataframe to return
df_coindata = df_coindata[var_order]
return(df_coindata)
df_allcoins = coinmarketcap_coins(79)
| 43.808989 | 97 | 0.611952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,223 | 0.570146 |
f6fee774c750964c054daa25511fd9a0fd69478c | 903 | py | Python | api/views/logs/resources.py | bdeprez/machinaris | 233d6e60ae6526b999fb660d9b87a895341de96f | [
"Apache-2.0"
] | null | null | null | api/views/logs/resources.py | bdeprez/machinaris | 233d6e60ae6526b999fb660d9b87a895341de96f | [
"Apache-2.0"
] | null | null | null | api/views/logs/resources.py | bdeprez/machinaris | 233d6e60ae6526b999fb660d9b87a895341de96f | [
"Apache-2.0"
] | null | null | null | import json
import re
import traceback
from flask import request, make_response, abort
from flask.views import MethodView
from api import app
from api.extensions.api import Blueprint
from api.commands import log_parser
blp = Blueprint(
'Log',
__name__,
url_prefix='/logs',
description="Operations on all logs"
)
@blp.route('/')
class Logs(MethodView):
def get(self):
response = make_response(json.dumps(['alerts', 'farming', 'plotting', 'archiving', 'webui', 'apisrv', 'pooling']), 200)
response.mimetype = "application/json"
return response
@blp.route('/<type>')
class LogByType(MethodView):
def get(self, type):
log = log_parser.get_log_lines(type, log_id=request.args.get('log_id'), blockchain=request.args.get('blockchain'))
response = make_response(log, 200)
response.mimetype = "plain/text"
return response
| 23.763158 | 127 | 0.687708 | 527 | 0.58361 | 0 | 0 | 565 | 0.625692 | 0 | 0 | 160 | 0.177187 |
f6fefb9a652b9bf59e233f2f5acfc1fedff02992 | 11,550 | py | Python | train_hg_seqnet.py | middleprince/fashionAi | c512936b4983c2fb093008f06e04753180af0a90 | [
"Apache-2.0"
] | 316 | 2018-06-01T16:21:21.000Z | 2022-03-22T03:25:20.000Z | train_hg_seqnet.py | middleprince/fashionAi | c512936b4983c2fb093008f06e04753180af0a90 | [
"Apache-2.0"
] | 8 | 2018-06-02T07:07:49.000Z | 2019-07-11T06:55:43.000Z | train_hg_seqnet.py | middleprince/fashionAi | c512936b4983c2fb093008f06e04753180af0a90 | [
"Apache-2.0"
] | 91 | 2018-06-01T17:12:21.000Z | 2022-03-19T06:54:34.000Z | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
import config
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../Datasets/tfrecords',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', '{}_????', 'The pattern of the dataset name to load.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', '*'
'dataset_split_name', 'blouse', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'model_dir', './logs/',
'The parent directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 3600,
'The frequency with which the model is saved, in seconds.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 256,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'heatmap_size', 64,
'The size of the output heatmap of the model.')
tf.app.flags.DEFINE_float(
'heatmap_sigma', 1.,
'The sigma of Gaussian which generate the target heatmap.')
tf.app.flags.DEFINE_integer('feats_channals', 256, 'Number of features in the hourglass.')
tf.app.flags.DEFINE_integer('num_stacks', 8, 'Number of hourglasses to stack.')#8
tf.app.flags.DEFINE_integer('num_modules', 1, 'Number of residual modules at each location in the hourglass.')
tf.app.flags.DEFINE_float(
'bbox_border', 25.,
'The nearest distance of the crop border to al keypoints.')
tf.app.flags.DEFINE_integer(
'train_epochs', 5,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'epochs_per_eval', 1,
'The number of training epochs to run between evaluations.')
tf.app.flags.DEFINE_integer(
'batch_size', 6,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
# optimizer related configuration
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180406, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 0.00000, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'mse_weight', 1.0, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.0,#0.9
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 2.5e-4, 'Initial learning rate.')#2.5e-4
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'warmup_learning_rate', 0.00001,
'The start warm-up learning rate to avoid NAN.')
tf.app.flags.DEFINE_integer(
'warmup_steps', 100,
'The total steps to warm-up.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '2, 3',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.5, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', None,
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all'
'model_scope', 'all',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,#'all/hg_heatmap',#
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud.')
tf.app.flags.DEFINE_boolean(
'seq_train', True,
'Wether we will train a sequence model.')
tf.app.flags.DEFINE_string(
'model_to_train', 'all, blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers',
'The sub-model to train (comma-separated list).')
FLAGS = tf.app.flags.FLAGS
total_params = {
'--data_dir': FLAGS.data_dir,
'--dataset_name': FLAGS.dataset_name,
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', '*'
'--model_dir': FLAGS.model_dir,
'--save_checkpoints_secs': FLAGS.save_checkpoints_secs,
'--train_image_size': FLAGS.train_image_size,
'--heatmap_size': FLAGS.heatmap_size,
'--heatmap_sigma': FLAGS.heatmap_sigma,
'--feats_channals': FLAGS.feats_channals,
'--num_stacks': FLAGS.num_stacks,
'--num_modules': FLAGS.num_modules,
'--bbox_border': FLAGS.bbox_border,
'--train_epochs': FLAGS.train_epochs,
'--epochs_per_eval': FLAGS.epochs_per_eval,
'--batch_size': FLAGS.batch_size,
'--data_format': FLAGS.data_format,
'--tf_random_seed': FLAGS.tf_random_seed,
'--weight_decay': FLAGS.weight_decay,
'--mse_weight': FLAGS.mse_weight,
'--momentum': FLAGS.momentum,
'--learning_rate': FLAGS.learning_rate,
'--end_learning_rate': FLAGS.end_learning_rate,
'--warmup_learning_rate': FLAGS.warmup_learning_rate,
'--warmup_steps': FLAGS.warmup_steps,
'--decay_boundaries': FLAGS.decay_boundaries,
'--lr_decay_factors': FLAGS.lr_decay_factors,
'--checkpoint_path': FLAGS.checkpoint_path,
'--checkpoint_model_scope': FLAGS.checkpoint_model_scope,
'--model_scope': FLAGS.model_scope,
'--checkpoint_exclude_scopes': FLAGS.checkpoint_exclude_scopes,
'--run_on_cloud': FLAGS.run_on_cloud
}
if FLAGS.seq_train:
detail_params = {
'all': {
'model_dir' : os.path.join(FLAGS.model_dir, 'all'),
'train_epochs': 6,
'epochs_per_eval': 3,
'decay_boundaries': '3, 4',
'model_scope': 'all',
},
'blouse': {
'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'blouse',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'blouse/hg_heatmap',
},
'dress': {
'model_dir' : os.path.join(FLAGS.model_dir, 'dress'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'dress',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'dress/hg_heatmap',
},
'outwear': {
'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'outwear',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'outwear/hg_heatmap',
},
'skirt': {
'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'skirt',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'skirt/hg_heatmap',
},
'trousers': {
'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'trousers',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'trousers/hg_heatmap',
},
}
else:
detail_params = {
'blouse': {
'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'blouse',
},
'dress': {
'model_dir' : os.path.join(FLAGS.model_dir, 'dress'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'dress',
},
'outwear': {
'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'outwear',
},
'skirt': {
'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'skirt',
},
'trousers': {
'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'trousers',
},
}
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def parse_str_comma_list(args):
return [s.strip() for s in args.split(',')]
def main(_):
import subprocess
import copy
#['skirt', 'dress', 'outwear', 'trousers']#
all_category = parse_str_comma_list(FLAGS.model_to_train)
for cat in all_category:
tf.gfile.MakeDirs(os.path.join(FLAGS.model_dir, cat))
for cat in all_category:
temp_params = copy.deepcopy(total_params)
for k, v in total_params.items():
if k[2:] in detail_params[cat]:
temp_params[k] = detail_params[cat][k[2:]]
params_str = []
for k, v in temp_params.items():
if v is not None:
params_str.append(k)
params_str.append(str(v))
print('params send: ', params_str)
train_process = subprocess.Popen(['python', './train_subnet.py'] + params_str, stdout=subprocess.PIPE, cwd=os.getcwd())
output, _ = train_process.communicate()
print(output)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 39.690722 | 153 | 0.638874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,976 | 0.517403 |
f6ff14536113c487785785cf8713fabd87f3391a | 34,386 | py | Python | W_conv.py | jeongjuns/Control_yolact | dad800dcc1aa0b02445e302256b4508b7688880c | [
"MIT"
] | null | null | null | W_conv.py | jeongjuns/Control_yolact | dad800dcc1aa0b02445e302256b4508b7688880c | [
"MIT"
] | null | null | null | W_conv.py | jeongjuns/Control_yolact | dad800dcc1aa0b02445e302256b4508b7688880c | [
"MIT"
] | null | null | null |
import math
import warnings
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch.autograd import Variable
from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple
#from torch.nn.modules.conv import _ConvNd
flcnt1=0
flcnt2=0
flcnt3=0
avgcnt1=0
avgcnt2=0
avgcnt3=0
#fpnlatlayercnt=0
flfpnlatlayercnt=0
bboxcnt=0
flbboxcnt=0
confcnt=0
flconfcnt=0
maskcnt=0
flmaskcnt=0
makenetcnt=0
flmakenetcnt=0
segcnt=0
flsegcnt=0
# torch.nn.conv2d 변형
class W_ConvNd(Module):
__constants__ = ['stride', 'padding', 'dilation', 'groups',
'padding_mode', 'output_padding', 'in_channels',
'out_channels', 'kernel_size']
__annotations__ = {'bias': Optional[torch.Tensor]}
_in_channels: int
out_channels: int
kernel_size: Tuple[int, ...]
stride: Tuple[int, ...]
padding: Tuple[int, ...]
dilation: Tuple[int, ...]
transposed: bool
output_padding: Tuple[int, ...]
groups: int
padding_mode: str
weight: Tensor
bias: Optional[Tensor]
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t,
padding: _size_1_t,
dilation: _size_1_t,
transposed: bool,
output_padding: _size_1_t,
groups: int,
bias: Optional[Tensor],
padding_mode: str) -> None:
super(W_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
self.padding_mode = padding_mode
# `_reversed_padding_repeated_twice` is the padding to be passed to
# `F.pad` if needed (e.g., for non-zero padding types that are
# implemented as two ops: padding + conv). `F.pad` accepts paddings in
# reverse order than the dimension.
self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(_ConvNd, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
class W_Conv2d1(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(W_Conv2d1, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
################################################# jj add
self.W1 = Parameter(make_mw(out_channels, in_channels, kernel_size[0]), requires_grad=True)
W_Conv2d1.fl = {}
W_Conv2d1.Wweight={}
################################################# jj end
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
################################################# jj add
global flcnt1
global avgcnt1
if avgcnt1 == 34:
avgcnt1 = 1
if flcnt1 == 33:
avgcnt1 += 1
for i in range(33,66):
if flcnt1 == i:
W_Conv2d1.fl['{0}'.format(i-33)] = self.weight.clone().detach()
if flcnt1 > 32:
for i in range(1,34):
if avgcnt1 == i:
W_Conv2d1.Wweight['{0}'.format(i)] = mod_compute(W_Conv2d1.fl['{0}'.format(i-1)], self.W1)
if flcnt1 < 66:
flcnt1+=1
if 0 < avgcnt1 < 34:
avgcnt1+=1
if flcnt1 < 34:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(W_Conv2d1.fl['{0}'.format(avgcnt1-2)], self.W1))
def mod_compute(fl, w):
# seungil modification
if fl.size(3) == 1:
fl = fl.squeeze(-1).squeeze(-1)
fla_tensor = w@fl
fla_tensor = fla_tensor.unsqueeze(-1).unsqueeze(-1)
elif fl.size(3) == 3:
fla_tensor = torch.zeros(fl.size(0), fl.size(1), 3, 3)
for i in range(3):
for j in range(3):
temp = fl[:,:,i,j].squeeze(-1).squeeze(-1)
temp = w@temp
fla_tensor[:,:,i,j] = temp
return fla_tensor
def make_mw(o_size, i_size, k_size):
# seungil modification
mw = torch.eye(o_size)
return mw
################################################# jj end
class W_Conv2d2(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(W_Conv2d2, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
################################################# jj add
self.W2 = Parameter(make_mw(out_channels, in_channels, kernel_size[0]), requires_grad=True)
W_Conv2d2.fl = {}
W_Conv2d2.Wweight={}
################################################# jj end
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
################################################# jj add
global flcnt2
global avgcnt2
if avgcnt2 == 34:
avgcnt2 = 1
if flcnt2 == 33:
avgcnt2 += 1
for i in range(33,66):
if flcnt2 == i:
W_Conv2d2.fl['{0}'.format(i-33)] = self.weight.clone().detach()
if flcnt2 > 32:
for i in range(1,34):
if avgcnt2 == i:
W_Conv2d2.Wweight['{0}'.format(i)] = mod_compute(W_Conv2d2.fl['{0}'.format(i-1)], self.W2)
if flcnt2 < 66:
flcnt2+=1
if 0 < avgcnt2 < 34:
avgcnt2+=1
#if flcnt2 == 66:
# print(W_Conv2d2.fl['{0}'.format(32)][0][0])
if flcnt2 < 34:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(W_Conv2d2.fl['{0}'.format(avgcnt2-2)], self.W2))
################################################# jj end
class W_Conv2d3(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(W_Conv2d3, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
################################################# jj add
self.W3 = Parameter(make_mw(out_channels, in_channels, kernel_size[0]), requires_grad=True)
W_Conv2d3.fl = {}
W_Conv2d3.Wweight={}
################################################# jj end
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
################################################# jj add
global flcnt3
global avgcnt3
if avgcnt3 == 34:
avgcnt3 = 1
if flcnt3 == 33:
avgcnt3 += 1
for i in range(33,66):
if flcnt3 == i:
W_Conv2d3.fl['{0}'.format(i-33)] = self.weight.clone().detach()
if flcnt3 > 32:
for i in range(1,34):
if avgcnt3 == i:
W_Conv2d3.Wweight['{0}'.format(i)] = mod_compute(W_Conv2d3.fl['{0}'.format(i-1)], self.W3)
if flcnt3 < 66:
flcnt3+=1
if 0 < avgcnt3 < 34:
avgcnt3+=1
if flcnt3 < 34:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(W_Conv2d3.fl['{0}'.format(avgcnt3-2)], self.W3))
################################################# jj end
class bbox_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(bbox_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
bbox_Conv2d.fl={}
bbox_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flbboxcnt
global bboxcnt
if bboxcnt == 6:
bboxcnt = 1
if flbboxcnt == 5:
bboxcnt += 1
for i in range(5,10):
if flbboxcnt == i:
bbox_Conv2d.fl['{0}'.format(i-5)] = self.weight.clone().detach()
if flbboxcnt > 4:
for i in range(1,6):
if bboxcnt == i:
bbox_Conv2d.Wweight['{0}'.format(i)] = mod_compute(bbox_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flbboxcnt < 10:
flbboxcnt+=1
if 0 < bboxcnt < 6:
bboxcnt+=1
#if flbboxcnt == 10:
# print(bbox_Conv2d.fl['{0}'.format(0)][0][0])
# print(bbox_Conv2d.Wweight['{0}'.format(1)][0][0])
if flbboxcnt < 6:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(bbox_Conv2d.fl['{0}'.format(bboxcnt-2)], self.mw))
class conf_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(conf_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
conf_Conv2d.fl={}
conf_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flconfcnt
global confcnt
if confcnt == 6:
confcnt = 1
if flconfcnt == 5:
confcnt += 1
for i in range(5,10):
if flconfcnt == i:
conf_Conv2d.fl['{0}'.format(i-5)] = self.weight.clone().detach()
if flconfcnt > 4:
for i in range(1,6):
if confcnt == i:
conf_Conv2d.Wweight['{0}'.format(i)] = mod_compute(conf_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flconfcnt < 10:
flconfcnt+=1
if 0 < confcnt < 6:
confcnt+=1
if flconfcnt < 6:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(conf_Conv2d.fl['{0}'.format(confcnt-2)], self.mw))
class mask_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(mask_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
mask_Conv2d.fl={}
mask_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flmaskcnt
global maskcnt
if maskcnt == 6:
maskcnt = 1
if flmaskcnt == 5:
maskcnt += 1
for i in range(5,10):
if flmaskcnt == i:
mask_Conv2d.fl['{0}'.format(i-5)] = self.weight.clone().detach()
if flmaskcnt > 4:
for i in range(1,6):
if maskcnt == i:
mask_Conv2d.Wweight['{0}'.format(i)] = mod_compute(mask_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flmaskcnt < 10:
flmaskcnt+=1
if 0 < maskcnt < 6:
maskcnt+=1
if flmaskcnt < 6:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(mask_Conv2d.fl['{0}'.format(maskcnt-2)], self.mw))
class makenet_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(makenet_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
makenet_Conv2d.fl={}
makenet_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flmakenetcnt
global makenetcnt
if makenetcnt == 11:
makenetcnt = 1
if flmakenetcnt == 10:
makenetcnt += 1
for i in range(10,20):
if flmakenetcnt == i:
makenet_Conv2d.fl['{0}'.format(i-10)] = self.weight.clone().detach()
if flmakenetcnt > 9:
for i in range(1,11):
if makenetcnt == i:
makenet_Conv2d.Wweight['{0}'.format(i)] = mod_compute(makenet_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flmakenetcnt < 20:
flmakenetcnt+=1
if 0 < makenetcnt < 11:
makenetcnt+=1
if flmakenetcnt < 11:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(makenet_Conv2d.fl['{0}'.format(makenetcnt-2)], self.mw))
class seg_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(seg_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
seg_Conv2d.fl={}
seg_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flsegcnt
global segcnt
if segcnt == 2:
segcnt = 1
if flsegcnt == 1:
segcnt += 1
for i in range(1,2):
if flsegcnt == i:
seg_Conv2d.fl['{0}'.format(i-1)] = self.weight.clone().detach()
if flsegcnt > 0:
for i in range(1,2):
if segcnt == i:
seg_Conv2d.Wweight['{0}'.format(i)] = mod_compute(seg_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flsegcnt < 2:
flsegcnt+=1
if 0 < segcnt < 2:
segcnt+=1
if flsegcnt < 2:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(seg_Conv2d.fl['{0}'.format(segcnt-2)], self.mw))
class fpn_lat_layers_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(fpn_lat_layers_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
self.fl_2048=torch.ones(256,2048,1,1)
self.fl_1024=torch.ones(256,1024,1,1)
self.fl_512=torch.ones(256,512,1,1)
self.fla_2048=torch.ones(256,2048,1,1)
self.fla_1024=torch.ones(256,1024,1,1)
self.fla_512=torch.ones(256,512,1,1)
self.in_channels = in_channels
self.cnt = 0
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
if self.cnt < 2:
self.cnt += 1
if self.cnt == 2:
if self.in_channels == 2048:
self.fl_2048 = self.weight.clone().detach()
elif self.in_channels == 1024:
self.fl_1024 = self.weight.clone().detach()
elif self.in_channels == 512:
self.fl_512 = self.weight.clone().detach()
self.cnt += 1
if self.cnt > 2:
if self.in_channels == 2048:
self.fla_2048 = self.fl_2048.squeeze(-1).squeeze(-1)
self.fla_2048 = self.mw@self.fla_2048
self.fla_2048 = self.fla_2048.unsqueeze(-1).unsqueeze(-1)
elif self.in_channels == 1024:
self.fla_1024 = self.fl_1024.squeeze(-1).squeeze(-1)
self.fla_1024 = self.mw@self.fla_1024
self.fla_1024 = self.fla_1024.unsqueeze(-1).unsqueeze(-1)
elif self.in_channels == 512:
self.fla_512 = self.fl_512.squeeze(-1).squeeze(-1)
self.fla_512 = self.mw@self.fla_512
self.fla_512 = self.fla_512.unsqueeze(-1).unsqueeze(-1)
#print(self.fl_512[0][0])
if self.cnt < 2:
return self._conv_forward(input, self.weight)
elif self.in_channels == 2048:
return self._conv_forward(input, self.fla_2048)
elif self.in_channels == 1024:
return self._conv_forward(input, self.fla_1024)
elif self.in_channels == 512:
return self._conv_forward(input, self.fla_512)
return self._conv_forward(input, self.weight)
class fpn_pred_layers_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
x_cnt : int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(fpn_pred_layers_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
self.fl_512=torch.ones(256,256,3,3)
self.fl_1024=torch.ones(256,256,3,3)
self.fl_2048=torch.ones(256,256,3,3)
self.fla_512=torch.ones(256,256,3,3)
self.fla_1024=torch.ones(256,256,3,3)
self.fla_2048=torch.ones(256,256,3,3)
self.cnt = 0
self.x_cnt = x_cnt
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
if self.cnt < 2:
self.cnt += 1
if self.cnt == 2:
if self.x_cnt == 512:
self.fl_512 = self.weight.clone().detach()
elif self.x_cnt == 1024:
self.fl_1024 = self.weight.clone().detach()
elif self.x_cnt == 2048:
self.fl_2048 = self.weight.clone().detach()
self.cnt += 1
if self.cnt > 2:
if self.x_cnt == 512:
self.fla_512 = torch.zeros(self.fl_512.size(0), self.fl_512.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_512[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_512[:,:,i,j] = temp
if self.x_cnt == 1024:
self.fla_1024 = torch.zeros(self.fl_1024.size(0), self.fl_1024.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_1024[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_1024[:,:,i,j] = temp
if self.x_cnt == 2048:
self.fla_2048 = torch.zeros(self.fl_2048.size(0), self.fl_2048.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_2048[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_2048[:,:,i,j] = temp
if self.cnt < 2:
return self._conv_forward(input, self.weight)
elif self.x_cnt == 512:
return self._conv_forward(input, self.fla_512)
elif self.x_cnt == 1024:
return self._conv_forward(input, self.fla_1024)
elif self.x_cnt == 2048:
return self._conv_forward(input, self.fla_2048)
return self._conv_forward(input, self.weight)
class fpn_down_layers_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
x_cnt : int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(fpn_down_layers_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
self.fl_0=torch.ones(2,2,2,2)
self.fl_1=torch.ones(2,2,2,2)
self.fla_0=torch.ones(2,2,2,2)
self.fla_1=torch.ones(2,2,2,2)
self.cnt = 0
self.x_cnt = x_cnt
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
if self.cnt < 2:
self.cnt += 1
if self.cnt == 2:
if self.x_cnt == 0:
self.fl_0 = self.weight.clone().detach()
elif self.x_cnt == 1:
self.fl_1 = self.weight.clone().detach()
self.cnt += 1
if self.cnt > 2:
if self.x_cnt == 0:
self.fla_0 = torch.zeros(self.fl_0.size(0), self.fl_0.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_0[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_0[:,:,i,j] = temp
if self.x_cnt == 1:
self.fla_1 = torch.zeros(self.fl_1.size(0), self.fl_1.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_1[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_1[:,:,i,j] = temp
if self.cnt < 2:
return self._conv_forward(input, self.weight)
elif self.x_cnt == 0:
return self._conv_forward(input, self.fla_0)
elif self.x_cnt == 1:
return self._conv_forward(input, self.fla_1)
return self._conv_forward(input, self.weight) | 34.351648 | 120 | 0.529663 | 32,825 | 0.954493 | 0 | 0 | 0 | 0 | 0 | 0 | 2,374 | 0.069032 |
1001525132d1016af0a89108b37f0bd4e52435e6 | 2,246 | py | Python | instapyper.py | rriehle/instapyper | 0a2e01793a971ccaa47dac1cc32b6d6209ad7d77 | [
"MIT"
] | 1 | 2017-09-27T19:17:18.000Z | 2017-09-27T19:17:18.000Z | instapyper.py | rriehle/instapyper | 0a2e01793a971ccaa47dac1cc32b6d6209ad7d77 | [
"MIT"
] | null | null | null | instapyper.py | rriehle/instapyper | 0a2e01793a971ccaa47dac1cc32b6d6209ad7d77 | [
"MIT"
] | null | null | null | # encoding: utf-8
import requests
from requests_oauthlib import OAuth1
class Instapyper:
# Not sure this dict is necessary or even useful
status_codes = {
200: "Ok",
201: "URL successfully added",
400: "Bad Request",
401: "Unauthorized",
403: "Invalid username or password",
405: "Method not allowed",
500: "Service error, try again later",
504: "Gateway timeout",
}
# Likewise unsure of the utility of this dict
urls = {
'add': "https://www.instapaper.com/api/add",
'auth': "https://www.instapaper.com/api/authenticate",
'bookmarks_list': "https://www.instapaper.com/api/1/bookmarks/list",
'folders_list': "https://www.instapaper.com/api/1/folders/list",
'oauth_access_token': "https://www.instapaper.com/api/1/oauth/access_token",
}
def __init__(self, user, password):
self.user = user
self.password = password
def add(self, url, title=None, selection=None, redirect=None, jsonp=None):
parameters = {
'username': self.user,
'password': self.password,
'url': url,
}
if title:
parameters['title'] = title
if selection:
parameters['selection'] = selection
if redirect:
parameters['redirect'] = redirect
if jsonp:
parameters['jsonp'] = jsonp
try:
self.response = requests.post(
Instapyper.urls['add'],
data=parameters,
)
except Exception as e:
print(e)
return self.response
def bookmarks_list(self):
pass
def oauth(self, jsonp=None):
'''
http://docs.python-requests.org/en/master/user/authentication/#oauth-1-authentication
'''
auth = OAuth1(
'APP_KEY',
'APP_SECRET',
'USER_OAUTH_TOKEN',
'USER_OAUTH_TOKEN_SECRET',
)
try:
self.response = requests.get(
Instapyper.urls['oauth_access_token'],
auth=auth,
)
except Exception as e:
print(e)
return self.response
| 25.522727 | 93 | 0.547195 | 2,171 | 0.966607 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.361977 |
10016d95225710db6712e5ab81370184805de3d2 | 4,732 | py | Python | tests/parser/test_mamanger.py | Sungup/sungup-utils | 6ef3f9d65aaf5d9f11c683356823ead174c5793b | [
"MIT"
] | null | null | null | tests/parser/test_mamanger.py | Sungup/sungup-utils | 6ef3f9d65aaf5d9f11c683356823ead174c5793b | [
"MIT"
] | null | null | null | tests/parser/test_mamanger.py | Sungup/sungup-utils | 6ef3f9d65aaf5d9f11c683356823ead174c5793b | [
"MIT"
] | null | null | null | import os
import re
import string
from collections import namedtuple, defaultdict
from tests import utils
from tests.parser import ParserTestCase
from sglove.parser.exception import *
from sglove.parser import _OptionManager
class TestOptionManager(ParserTestCase):
__TEST_COUNT = 50
def __test_invalid_naming(self, str_func, str_list):
manager = _OptionManager(self._APP_NAME)
name_t = namedtuple('name_t', ('name', 'sub'))
for _ in range(self.__TEST_COUNT):
invalid_strings = [str_func(c) for c in str_list]
# 1. Try constructor's name format check
for case in invalid_strings:
with self.assertRaises(SGLException) as err:
_OptionManager(case)
self.assertEqual(err.exception.code,
SGL_PARSER_INVALID_NAME_FORMAT)
# 2. Try each member function's name format check
valid = self._gen_random_string()
test_case = [
(name_t(invalid, valid), name_t(valid, invalid))
for invalid in invalid_strings
]
for case in [v for sub in test_case for v in sub]:
for func in (manager.dest_name,
manager.env_name,
manager.long_arg):
with self.assertRaises(SGLException) as err:
func(case.name, case.sub)
self.assertEqual(err.exception.code,
SGL_PARSER_INVALID_NAME_FORMAT)
def test_invalid_naming(self):
# 1. Contain invalid characters
punctuation = re.sub(r'[_\-]', '', string.punctuation)
self.__test_invalid_naming(lambda c: self._gen_random_string(middle=c),
punctuation)
# 2. Consisted with valid character but not started with alphabet
invalid_first = string.digits + '_-'
self.__test_invalid_naming(lambda c: self._gen_random_string(prefix=c),
invalid_first)
# 3. Not ended with alphabet and numbers
invalid_last = '_-'
self.__test_invalid_naming(lambda c: self._gen_random_string(suffix=c),
invalid_last)
def test_valid_naming(self):
manager = _OptionManager(self._APP_NAME)
test_case = {self._gen_random_string(): self._gen_random_string()
for _ in range(self.__TEST_COUNT)}
for k, v in test_case.items():
self.assertEqual(manager.env_name(k, v), self._to_env_name(k, v))
self.assertEqual(manager.long_arg(k, v), self._to_arg_name(k, v))
self.assertEqual(manager.dest_name(k, v), self._to_dest_name(k, v))
def test_invalid_initialization(self):
# 1. Enter invalid type.
with self.assertRaises(SGLException) as err:
_OptionManager(self._APP_NAME, [self._APP_NAME])
self.assertEqual(err.exception.code, SGL_PARSER_UNEXPECTED_ENV_TYPE)
# 2. Enter dict converted os.environ as environ target
_OptionManager(self._APP_NAME, dict(os.environ))
def test_invalid_loading(self):
manager = _OptionManager(self._APP_NAME)
with self.assertRaises(SGLException) as err:
manager.load(utils.get_temp_file('invalid_path'))
self.assertEqual(err.exception.code, SGL_PARSER_CONFIG_NOT_EXIST)
def test_normal(self):
test_options = self._gen_random_inputs(self.__TEST_COUNT)
# This is the temporal manager to call the env_name().
manager = _OptionManager(self._APP_NAME)
env_dict = {}
conf_dict = defaultdict(dict)
for category, values in test_options.items():
for name, value in values.items():
if value.is_env_choosable:
env_dict.update({
manager.env_name(category, name): str(value.e_val)
})
if value.is_file_choosable:
conf_dict[category].update({name: value.f_val})
del manager
# Run normal valid test
with utils.config_file(conf_dict) as temp_file:
manager = _OptionManager(self._APP_NAME, environ=env_dict)
manager.load(temp_file)
for category, values in test_options.items():
for name, value in values.items():
default = manager.default_value(category, name,
default=value.default,
type=value.type)
self.assertEqual(default, value.expected)
| 37.259843 | 79 | 0.595097 | 4,502 | 0.951395 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.091082 |
1003316b1ac47d4a3dfc26a90b05ca7e035820e5 | 6,333 | py | Python | gitea_api/models/internal_tracker.py | awalker125/gitea-api | 2dea0493d4b6a92d6e63a7284afb2c80cbf35cf7 | [
"MIT"
] | null | null | null | gitea_api/models/internal_tracker.py | awalker125/gitea-api | 2dea0493d4b6a92d6e63a7284afb2c80cbf35cf7 | [
"MIT"
] | null | null | null | gitea_api/models/internal_tracker.py | awalker125/gitea-api | 2dea0493d4b6a92d6e63a7284afb2c80cbf35cf7 | [
"MIT"
] | 1 | 2022-01-27T14:12:40.000Z | 2022-01-27T14:12:40.000Z | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.15.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from gitea_api.configuration import Configuration
class InternalTracker(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_only_contributors_to_track_time': 'bool',
'enable_issue_dependencies': 'bool',
'enable_time_tracker': 'bool'
}
attribute_map = {
'allow_only_contributors_to_track_time': 'allow_only_contributors_to_track_time',
'enable_issue_dependencies': 'enable_issue_dependencies',
'enable_time_tracker': 'enable_time_tracker'
}
def __init__(self, allow_only_contributors_to_track_time=None, enable_issue_dependencies=None, enable_time_tracker=None, _configuration=None): # noqa: E501
"""InternalTracker - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._allow_only_contributors_to_track_time = None
self._enable_issue_dependencies = None
self._enable_time_tracker = None
self.discriminator = None
if allow_only_contributors_to_track_time is not None:
self.allow_only_contributors_to_track_time = allow_only_contributors_to_track_time
if enable_issue_dependencies is not None:
self.enable_issue_dependencies = enable_issue_dependencies
if enable_time_tracker is not None:
self.enable_time_tracker = enable_time_tracker
@property
def allow_only_contributors_to_track_time(self):
"""Gets the allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
Let only contributors track time (Built-in issue tracker) # noqa: E501
:return: The allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._allow_only_contributors_to_track_time
@allow_only_contributors_to_track_time.setter
def allow_only_contributors_to_track_time(self, allow_only_contributors_to_track_time):
"""Sets the allow_only_contributors_to_track_time of this InternalTracker.
Let only contributors track time (Built-in issue tracker) # noqa: E501
:param allow_only_contributors_to_track_time: The allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
:type: bool
"""
self._allow_only_contributors_to_track_time = allow_only_contributors_to_track_time
@property
def enable_issue_dependencies(self):
"""Gets the enable_issue_dependencies of this InternalTracker. # noqa: E501
Enable dependencies for issues and pull requests (Built-in issue tracker) # noqa: E501
:return: The enable_issue_dependencies of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._enable_issue_dependencies
@enable_issue_dependencies.setter
def enable_issue_dependencies(self, enable_issue_dependencies):
"""Sets the enable_issue_dependencies of this InternalTracker.
Enable dependencies for issues and pull requests (Built-in issue tracker) # noqa: E501
:param enable_issue_dependencies: The enable_issue_dependencies of this InternalTracker. # noqa: E501
:type: bool
"""
self._enable_issue_dependencies = enable_issue_dependencies
@property
def enable_time_tracker(self):
"""Gets the enable_time_tracker of this InternalTracker. # noqa: E501
Enable time tracking (Built-in issue tracker) # noqa: E501
:return: The enable_time_tracker of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._enable_time_tracker
@enable_time_tracker.setter
def enable_time_tracker(self, enable_time_tracker):
"""Sets the enable_time_tracker of this InternalTracker.
Enable time tracking (Built-in issue tracker) # noqa: E501
:param enable_time_tracker: The enable_time_tracker of this InternalTracker. # noqa: E501
:type: bool
"""
self._enable_time_tracker = enable_time_tracker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InternalTracker, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalTracker):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InternalTracker):
return True
return self.to_dict() != other.to_dict()
| 34.796703 | 160 | 0.657666 | 6,016 | 0.949945 | 0 | 0 | 2,627 | 0.414811 | 0 | 0 | 2,934 | 0.463288 |
1003df02d14e656596e0f0e0b34b98df84ded585 | 2,501 | py | Python | tests/unit/flow/test_flow_except.py | gvvynplaine/jina | 1441f40f849d22c21b8397527960b4307ff0d5a3 | [
"Apache-2.0"
] | 1 | 2021-09-07T05:10:24.000Z | 2021-09-07T05:10:24.000Z | tests/unit/flow/test_flow_except.py | gvvynplaine/jina | 1441f40f849d22c21b8397527960b4307ff0d5a3 | [
"Apache-2.0"
] | null | null | null | tests/unit/flow/test_flow_except.py | gvvynplaine/jina | 1441f40f849d22c21b8397527960b4307ff0d5a3 | [
"Apache-2.0"
] | null | null | null | import unittest
from jina.executors.crafters import BaseCrafter
from jina.flow import Flow
from jina.proto import jina_pb2
from tests import JinaTestCase
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
return 1 / 0
class FlowExceptTestCase(JinaTestCase):
def test_bad_flow(self):
def validate(req):
assert req.status.code == jina_pb2.Status.ERROR
assert req.status.details[0].pod == 'r1'
f = (Flow().add(name='r1', uses='!BaseCrafter')
.add(name='r2', uses='!BaseEncoder')
.add(name='r3', uses='!BaseEncoder'))
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
def test_bad_flow_customized(self):
def validate(req):
assert req.status.code == jina_pb2.Status.ERROR
assert req.status.details[0].pod == 'r2'
self.assertTrue(req.status.details[0].exception.startswith('ZeroDivisionError'))
f = (Flow().add(name='r1', uses='_pass')
.add(name='r2', uses='!DummyCrafter')
.add(name='r3', uses='!BaseEncoder'))
with f:
f.dry_run()
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
def test_except_with_parallel(self):
def validate(req):
assert req.status.code == jina_pb2.Status.ERROR
assert len(req.status.details) == 2
assert req.status.details[0].executor == 'DummyCrafter'
assert req.status.details[1].executor == 'BaseEncoder'
self.assertTrue(req.status.details[0].exception.startswith('ZeroDivisionError'))
self.assertTrue(req.status.details[1].exception.startswith('NotImplementedError'))
f = (Flow().add(name='r1', uses='_pass')
.add(name='r2', uses='!DummyCrafter', parallel=3)
.add(name='r3', uses='!BaseEncoder'))
with f:
f.dry_run()
with f:
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
if __name__ == '__main__':
unittest.main()
| 35.225352 | 94 | 0.608956 | 2,291 | 0.916034 | 0 | 0 | 0 | 0 | 0 | 0 | 498 | 0.19912 |
10058581bb6dbcacbc9e94927325b0900f7918f8 | 5,734 | py | Python | Operator/server.py | ale9412/Operator | 353f873878018503015113cd26592ac7bf08bfc8 | [
"MIT"
] | null | null | null | Operator/server.py | ale9412/Operator | 353f873878018503015113cd26592ac7bf08bfc8 | [
"MIT"
] | null | null | null | Operator/server.py | ale9412/Operator | 353f873878018503015113cd26592ac7bf08bfc8 | [
"MIT"
] | null | null | null | import socketserver
import multiprocessing as mp
from shunting_yard_algorithm import evaluate
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
data = self.receive()
operations = data.split(',')
result_list = []
"""Instatiate the ProcessHandler class, send it the operations and retrieve results,
the default number of process is 2, if more processes wants to be used is necesary
pass to the ProcessHandler class the "process_number" argument with a value equal
to the number of process to be used."""
process_handler = ProcessHandler(operations)
# processes_results is a tuple with the structure (process_order,results_list)
processes_results = process_handler.create_processes()
# Sort to get the results from processes with the same order of the operations
processes_results.sort()
# Get only the list of results
processes_results = [r[1] for r in processes_results]
# Unify the results obtained from the processes in a single results list
for i in range(len(processes_results)):
result_list.extend(processes_results[i])
stream = self.to_stream(result_list)
# Send back to the client
self.request.sendall(stream)
self.request.close()
def to_stream(self,results):
return bytes(','.join(str(result) for result in results),"utf-8")
def receive(self):
data = bytearray()
msg = ""
buffer = 1048476 # 1MB the buffer
# Receive data until the delimiter flag would be reached
while not msg:
# self.request is the TCP socket client connected to the server
received = self.request.recv(buffer)
data = data + received
if received.endswith(b'\0'):
# Get a string out of the bytes stream
msg = data.decode().strip('\0')
break
return msg
class ProcessHandler:
def __init__(self,operations,process_number=2):
"""Multiprocessing module supports two types of communication channels:
Pipes and Queue, the one used here. Queue allows process to store values
in a common place without worrying about concurrency issues."""
self.queue = mp.Queue()
self.operations = operations
# Split data to fit the two processes
limit = int(len(operations)/process_number)
self.operations_per_proc = self.split_list(process_number)
self.processes = []
def create_processes(self):
# The pos identifier will allows to identify which list of results comes first
# in order to retrieve the results in the same order they were requested
for pos,op_list in enumerate(self.operations_per_proc):
self.processes.append(mp.Process(target=self.resolver,args=(pos,op_list)))
for p in self.processes:
p.start()
# Get from queue tuple formed by the process results and his position
results = [self.queue.get() for p in self.processes]
for process in self.processes:
process.terminate()
return results
def resolver(self,pos,operations_list):
result_list = list(map(evaluate,operations_list))
self.queue.put((pos,result_list))
def split_list(self,splits):
full_list = self.operations
# Method to split operations betwen the number of processes
length = int(len(full_list)/splits)
parts = []
for i in range(splits):
parts.append(list(full_list[:length]))
full_list = full_list[length:]
# If there were some elements left add them to the last list
parts[-1].extend(full_list)
return parts
if __name__ == "__main__":
mp.freeze_support()
# If an ip address wants to be used instead, change localhost by the ip. If multiple interfaces want to be used
# you could change localhost by the empty string (''), this will make the server listen on all available interfaces.
HOST, PORT = "localhost", 8000
# Create the server, binding to localhost on port 8000
with socketserver.TCPServer((HOST, PORT), MyTCPHandler) as server:
"""
This is a high-level asynchronous TCP base server that will handle incomming
connections, and send answers. The protocol used was TCP instead of UDP because of the
beneficts of this protocol, like:
1- Reliability
2- Synchronization
3- Error recovery in case of losed and damaged data
This high level server is the equivalent to the low level implementation
with the socket module:
srvsock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
srvsock.bind( ("localhost", 8000) )
srvsock.listen( 5 )
and handling incoming conections with the select module to made it asynchronous
(sread, swrite, sexc) = select.select( [read_socket_client], [write_socket_client], [] )
The previous line will block the server until a conexion is made. Extra coding must be
made in order to handle the conexion.
"""
# Activate the server, this will keep running until you
# interrupt with Ctrl-C
print(f"Serving on port {PORT}, press Ctr-C to exit...")
server.serve_forever()
| 40.380282 | 120 | 0.647367 | 3,988 | 0.695501 | 0 | 0 | 0 | 0 | 0 | 0 | 2,982 | 0.520056 |
10075c04799cef519143b0f99e086bee68012233 | 16,539 | py | Python | cbh.py | jensengroup/fragreact | 2a9113505fd469e934eec6c6ac8200de67f65dd6 | [
"MIT"
] | 2 | 2018-10-01T00:40:12.000Z | 2020-01-16T15:02:51.000Z | cbh.py | jensengroup/fragreact | 2a9113505fd469e934eec6c6ac8200de67f65dd6 | [
"MIT"
] | null | null | null | cbh.py | jensengroup/fragreact | 2a9113505fd469e934eec6c6ac8200de67f65dd6 | [
"MIT"
] | 1 | 2021-04-24T10:53:30.000Z | 2021-04-24T10:53:30.000Z | #!/usr/bin/env python
import numpy as np
import re
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from itertools import combinations
import copy
def print_smiles(smiles_list, human=False):
smiles_dict = count_smiles(smiles_list)
keys = smiles_dict.keys()
keys.sort()
out = []
for key in keys:
out += [str(smiles_dict[key]) + " " + key]
return " ".join(out)
def print_reaction(reactants, products, human=False):
if not human:
reaction = ">>".join([".".join(reactants), ".".join(products)])
else:
reactants = print_smiles(reactants)
products = print_smiles(products)
reaction = reactants+ ">>"+ products
return reaction
def canonical(smiles):
"""
SMILES provided is canonical, so the output should be the same no matter
how a particular molecule is input
"""
m = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(m)
return smiles
def kekulize(smiles):
m = Chem.MolFromSmiles(smiles)
Chem.Kekulize(m)
smiles = Chem.MolToSmiles(m, kekuleSmiles=True)
return smiles
def count_hydrogens(smiles):
""" """
m = Chem.MolFromSmiles(smiles)
n_hydrogen = 0
for a in m.GetAtoms():
n_hydrogen += a.GetTotalNumHs()
# print a.GetAtomicNum()
# print a.GetTotalNumHs()
# print a.GetNumExplicitHs()
# print a.GetNumImplicitHs()
return n_hydrogen
def count_smiles(smiles_list):
"""
Count SMILES by creating a dictionary with SMILES as keys, point to the
number of that particular SMILES.
e.i. dict[smiles] = # of smiles
"""
smiles_dict = {}
components, components_count = np.unique(smiles_list, return_counts=True)
for comp, count in zip(components, components_count):
smiles_dict[comp] = count
return smiles_dict
def substract_smiles(A, B):
"""
A - B = Cp + Cn
where Cp has positive results
and Cn has negative results
"""
if isinstance(A, str): A = A.split(".")
if isinstance(B, str): B = B.split(".")
Cp = []
Cn = []
A = count_smiles(A)
B = count_smiles(B)
for key in np.unique(list(A.keys()) + list(B.keys())):
if key not in A:
Cn += [key] * B[key]
continue
if key not in B:
Cp += [key] * A[key]
continue
diff = A[key] - B[key]
if diff == 0:
continue
elif diff > 0:
Cp += [key]*diff
elif diff < 0:
Cn += [key]*abs(diff)
return Cp, Cn
def tuning(left_side, right_side):
corrected_left = []
corrected_right = []
left_side = count_smiles(left_side)
right_side = count_smiles(right_side)
for key in np.unique(list(left_side.keys()) + list(right_side.keys())):
if key not in left_side:
print("hello")
quit()
if key not in right_side:
print("hello2")
quit()
diff = right_side[key] - left_side[key]
if diff == 0:
continue
elif diff > 0:
corrected_left += [key] * diff
elif diff < 0:
corrected_right += [key] * diff
return corrected_left, corrected_right
def get_bond_type(m, a, b):
# NOTE
# If m is not kekulized then bonds can be AROMATIC
# which is a problem for the component schemes
try:
bond_type = str(m.GetBondBetweenAtoms(a, b).GetBondType())
except AttributeError:
return False
if bond_type == "SINGLE":
bond = ""
elif bond_type == "DOUBLE":
bond = "="
elif bond_type == "TRIPLE":
bond = "#"
else:
bond = False
return bond
def get_atoms(smiles, ignore_hydrogen=True):
smiles = kekulize(smiles)
p = re.compile(r"[A-Z][a-z]?")
atoms = p.findall(smiles)
if ignore_hydrogen:
atoms = [atom for atom in atoms if atom != "H"]
return atoms
def add_neighbours(mol, substructures):
substructures = list(substructures)
for j, idx in enumerate(substructures):
for i in idx:
A = mol.GetAtomWithIdx(i)
for B in A.GetNeighbors():
k = B.GetIdx()
substructures[j] += (k,)
return substructures
def get_components_neighbors(mol, atoms):
atoms = list(atoms)
for idx in atoms:
idx, = idx
A = mol.GetAtomWithIdx(idx)
for B in A.GetNeighbors():
idx_b = B.GetIdx()
atom = B.GetAtomicNum()
charge = B.GetFormalCharge()
bond = Chem.GetBondBetweenAtoms(mol, idx, idx_b)
return
def get_components(smiles, smart, kekulize=True, add=False):
m = Chem.MolFromSmiles(smiles)
smart = Chem.MolFromSmarts(smart)
if kekulize:
Chem.Kekulize(m)
substructures = m.GetSubstructMatches(smart)
components = []
if add:
substructures = add_neighbours(m, substructures)
for sub in substructures:
if add:
m_new = copy.copy(m)
m_new = Chem.RWMol(m_new)
for B, C in combinations(sub[1:], 2):
m_new.RemoveBond(B, C)
else:
m_new = m
component = Chem.MolFragmentToSmiles(m_new,
atomsToUse=sub,
isomericSmiles=True,
kekuleSmiles=True,
canonical=True)
A = m.GetAtomWithIdx(sub[0])
mc = Chem.MolFromSmiles(component)
n_atoms = mc.GetNumAtoms()
n_bonds = len(mc.GetBonds())
component = Chem.MolToSmiles(mc)
if "+" in component or "-" in component or "H" in component:
# Very awful hack to fix the charged molecules and their explicit
# hydrogens
charges = np.zeros(n_atoms, dtype=int)
for idx in range(n_atoms):
atom = mc.GetAtomWithIdx(idx)
atom.SetNumExplicitHs(0)
charge = atom.GetFormalCharge()
charges[idx] = charge
atom.SetFormalCharge(0)
component = Chem.MolToSmiles(mc, canonical=False)
component = component.replace("[", "").replace("]","")
mc = Chem.MolFromSmiles(component)
for idx, charge in zip(range(n_atoms), charges):
atom = mc.GetAtomWithIdx(idx)
charge = int(charge)
atom.SetFormalCharge(charge)
component = Chem.MolToSmiles(mc)
if n_atoms <= n_bonds:
mw = Chem.RWMol(m)
if len(sub) == 3:
mw.RemoveBond(sub[0], sub[-1])
elif len(sub) == 4 or len(sub) == 5:
for i in range(0, n_atoms):
for j in range(i+1, n_atoms):
if i == 1 or j == 1: continue
mw.RemoveBond(sub[i], sub[j])
component = Chem.MolFragmentToSmiles(mw,
atomsToUse=sub,
isomericSmiles=True,
kekuleSmiles=True,
canonical=True)
if "1" in component:
quit("Error connectivity")
else:
component = Chem.MolToSmiles(mc)
# charge = Chem.GetFormalCharge(mc)
#
# if not charge == 0:
# # NOTE
# # Lots of lots of if case down this road
#
# n_atoms = mc.GetNumAtoms()
#
# for i in range(n_atoms):
#
# atom = mc.GetAtomWithIdx(i)
# charge = atom.GetFormalCharge()
#
# if not charge == 0:
# atom.SetFormalCharge(0)
component = canonical(component)
components += [component]
return components
def get_components_scheme1(smiles, kekulize=True):
c1 = "[*]~[*]"
if "+" in smiles or "-" in smiles:
pass
else:
return get_components(smiles, c1)
# The code below doesn't get charges
return get_components(smiles, c1)
c1 = Chem.MolFromSmarts(c1)
m = Chem.MolFromSmiles(smiles)
if kekulize:
Chem.Kekulize(m)
substructures = m.GetSubstructMatches(c1)
components = []
for sub in substructures:
a, b = sub
ab = get_bond_type(m, a, b)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
component = a + ab + b
components.append(component)
components = [canonical(component) for component in components]
return components
def get_components_scheme2(smiles, kekulize=True):
c1 = "[D2]"
c2 = "[*]~[D2]~[*]"
c3 = "[*]~[D3](~[*])~[*]"
c4 = "[*]~[*](~[*])(~[*])~[*]"
# if "+" in smiles or "-" in smiles:
# pass
# else:
components = []
components += get_components(smiles, c1, add=True)
# components += get_components(smiles, c2)
components += get_components(smiles, c3)
components += get_components(smiles, c4)
return components
c2 = Chem.MolFromSmarts(c2)
c3 = Chem.MolFromSmarts(c3)
c4 = Chem.MolFromSmarts(c4)
m = Chem.MolFromSmiles(smiles)
if kekulize:
Chem.Kekulize(m)
substructures = m.GetSubstructMatches(c2)
components = []
for sub in substructures:
a, b, c = sub
ab = get_bond_type(m, a, b)
bc = get_bond_type(m, b, c)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
c = m.GetAtomWithIdx(c).GetSymbol()
component = a + ab + b + bc + c
components.append(component)
substructures = m.GetSubstructMatches(c3)
for sub in substructures:
a, b, c, d = sub
ab = get_bond_type(m, a, b)
bc = get_bond_type(m, b, c)
bd = get_bond_type(m, b, d)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
c = m.GetAtomWithIdx(c).GetSymbol()
d = m.GetAtomWithIdx(d).GetSymbol()
component = a + ab + b + "(" + bc + c + ")" + bd + d
components.append(component)
substructures = m.GetSubstructMatches(c4)
for sub in substructures:
a, b, c, d, e = sub
ab = get_bond_type(m, a, b)
bc = get_bond_type(m, b, c)
bd = get_bond_type(m, b, d)
be = get_bond_type(m, b, e)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
c = m.GetAtomWithIdx(c).GetSymbol()
d = m.GetAtomWithIdx(d).GetSymbol()
e = m.GetAtomWithIdx(e).GetSymbol()
component = a + ab + b
component += "(" + bc + c + ")"
component += "(" + bd + d + ")"
component += be + e
components.append(component)
components = [canonical(component) for component in components]
return components
def decompontent(smiles, scheme=1):
if scheme == 1: decompontent_scheme = decompontent_scheme1
elif scheme == 2: decompontent_scheme = decompontent_scheme2
left, right = decompontent_scheme(smiles)
return left, right
def decompontent_scheme1(smiles):
"""
Tune the equation
A (bb) => aa
where
A (target) is big smiles
aa (scheme1 components) is scheme2 components
bb (atoms) is additional bonds required, to have equald bonds on each side
this is done for each A which consists of len(aa) > 0
"""
components = get_components_scheme1(smiles)
if len(components) == 0:
return [], []
bonds_leftside = get_atoms(smiles)
bonds_rightside = []
for component in components:
bonds_rightside += get_atoms(component)
left, right = tuning(bonds_leftside, bonds_rightside)
right += components
return left, right
def decompontent_scheme2(smiles):
"""
Tune the equation
A (bb) => aa
where
A (target) is big smiles
aa (scheme2 components) is scheme2 components
bb (single bonds) is additional bonds required, to have equald bonds on each side
this is done for each A which consists of len(aa) > 0
"""
components = get_components_scheme2(smiles)
if len(components) == 0:
return [], []
bonds_leftside = get_components_scheme1(smiles)
bonds_rightside = []
for component in components:
bonds_rightside += get_components_scheme1(component)
left, right = tuning(bonds_leftside, bonds_rightside)
right += components
if not check_atoms([smiles] + left, right):
print("Error in fragreact tuneing:", smiles)
print([smiles], left, right)
quit()
return left, right
def resultant(reactants, products, scheme=1):
"""
assummed that smiles lists are both split(".") and canonical at this point
"""
reactants_leftside = []
reactants_rightside = []
products_leftside = []
products_rightside = []
reactants_missing = []
products_missing = []
if scheme == 1:
decompontent_scheme = decompontent_scheme1
elif scheme == 2:
decompontent_scheme = decompontent_scheme2
for reactant in reactants:
left, right = decompontent_scheme(reactant)
if len(left) == 0 and len(right) == 0:
reactants_missing += [reactant]
reactants_leftside += left
reactants_rightside += right
for product in products:
left, right = decompontent_scheme(product)
if len(left) == 0 and len(right) == 0:
products_missing += [product]
products_leftside += left
products_rightside += right
left_positive, left_negative = substract_smiles(products_leftside, reactants_leftside)
right_positive, right_negative = substract_smiles(products_rightside, reactants_rightside)
left = left_positive + right_negative + reactants_missing
right = right_positive + left_negative + products_missing
left, right = substract_smiles(left, right)
hydrogens_left = 0
hydrogens_right = 0
for each in left:
hydrogens_left += count_hydrogens(each)
for each in right:
hydrogens_right += count_hydrogens(each)
tune_hydrogens = hydrogens_left - hydrogens_right
if tune_hydrogens < 0:
left += ['[H+]']*abs(tune_hydrogens)
if tune_hydrogens > 0:
right += ['[H+]']*tune_hydrogens
return left, right
def split_smiles(smiles, num_sep=None):
"""
number seperator num_sep (e.g. 3xCC, num_spe="x")
"""
if type(smiles) == type(""):
smiles_list = smiles.split(".")
else:
smiles_list = smiles
for i, smiles in enumerate(smiles_list):
smiles = smiles.split(".")
if len(smiles) > 1:
smiles_list[i] = smiles[0]
smiles_list += smiles[1:]
if num_sep:
for i, smiles in enumerate(smiles_list):
if num_sep in smiles:
num, smiles = smiles.split(num_sep)
num = int(num)
smiles_list[i] = smiles
smiles_list += [smiles]*(num-1)
return smiles_list
def cbh_n(reactants, products, scheme, do_canonical=True):
"""
Use connectivity-based hieracy for reaction (reactants -> products)
in:
reactants -- list of SMILES
products -- list of SMILES
scheme -- int level of connecitivty
out:
left -- list of smiles for the reactant part of the CBHn reaction
right -- list of smiles for the product part of the CBHn reaction
"""
if do_canonical:
reactants = [canonical(smiles) for smiles in reactants]
products = [canonical(smiles) for smiles in products]
left, right = resultant(reactants, products, scheme=scheme)
return left, right
def check_atoms(reactants, products):
"""
Check the validity of the reaction.
Reaction should have eq. no. of atoms for both reactants and products.
"""
ratoms = [get_atoms(smiles) for smiles in reactants]
patoms = [get_atoms(smiles) for smiles in products]
# flatten
ratoms = sum(ratoms, [])
patoms = sum(patoms, [])
ratoms.sort()
patoms.sort()
return ratoms == patoms
def check_reaction(reactants, products):
"""
"""
if isinstance(reactants, list): reactants = ".".join(reactants)
if isinstance(products, list): products = ".".join(products)
reactants = Chem.MolFromSmiles(reactants)
products = Chem.MolFromSmiles(products)
return rdMolDescriptors.CalcMolFormula(reactants) == rdMolDescriptors.CalcMolFormula(products)
| 23.459574 | 98 | 0.587279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,675 | 0.161739 |
1007feef6144ad7d9a7ba9c6b18904b4be96b045 | 2,075 | py | Python | dlchord2/parser/accidentals_parser.py | anime-song/DLChord-2 | 10487b4c96ae1606974f52e08c0cb2e11940f5d0 | [
"MIT"
] | null | null | null | dlchord2/parser/accidentals_parser.py | anime-song/DLChord-2 | 10487b4c96ae1606974f52e08c0cb2e11940f5d0 | [
"MIT"
] | null | null | null | dlchord2/parser/accidentals_parser.py | anime-song/DLChord-2 | 10487b4c96ae1606974f52e08c0cb2e11940f5d0 | [
"MIT"
] | null | null | null | from enum import Enum
from dlchord2.const import ACCIDENTALS_SHARP, ACCIDENTALS_FLAT
from dlchord2.exceptions.accidentals_exceptions import AccidentalsParseError
class AccidentalsType(Enum):
"""
調号の種類を表す列挙体
"""
NONE = 0
SHARP = 1
FLAT = 2
class AccidentalsParseData(object):
"""
調号を解析したデータを格納するクラス
"""
def __init__(self, accidentals, accidentals_type, transpose_num):
self._accidentals = accidentals
self._accidentals_type = accidentals_type
self._transpose_num = transpose_num
@property
def accidentals(self):
"""
調号のテキストを取得します。
:return: 生の調号のテキスト
:rtype: str
"""
return self._accidentals
@property
def accidentals_type(self):
"""
調号の種類を取得します。
:return: 調号の種類
:rtype: AccidentalsType
"""
return self._accidentals_type
@property
def transpose_num(self):
"""
調号の変化量を取得します。
:return: 調号の変化量
:rtype: int
"""
return self._transpose_num
class AccidentalsParser(object):
"""
調号を解析するクラス
"""
def parse(self, accidentals_text):
"""
調号を解析します。
:param accidentals_text: ルート音を含まない調号テキスト
:type accidentals_text: str
:return: 調号解析データ
:rtype: AccidentalsParseData
"""
sharp_num = accidentals_text.count(ACCIDENTALS_SHARP)
flat_num = accidentals_text.count(ACCIDENTALS_FLAT)
if sharp_num > 0 and flat_num > 0:
raise AccidentalsParseError("異なる調号は重複して存在することはできません。")
accidentals_type = AccidentalsType.NONE
trans_num = 0
if sharp_num > 0:
accidentals_type = AccidentalsType.SHARP
trans_num = sharp_num
elif flat_num > 0:
accidentals_type = AccidentalsType.FLAT
trans_num = -flat_num
accidentals_parse_data = AccidentalsParseData(
accidentals_text,
accidentals_type,
trans_num)
return accidentals_parse_data
| 23.314607 | 76 | 0.619277 | 2,207 | 0.927701 | 0 | 0 | 623 | 0.261875 | 0 | 0 | 853 | 0.358554 |
1009bd5fe50141546afdc9a3c7299fc08481285f | 6,144 | py | Python | tests/unit/utils/test_instantiate.py | schiotz/nequip | c343ce25ecfeb64f6df92e96022e673a7714e3a6 | [
"MIT"
] | 153 | 2021-06-20T20:12:01.000Z | 2022-03-31T13:57:45.000Z | tests/unit/utils/test_instantiate.py | schiotz/nequip | c343ce25ecfeb64f6df92e96022e673a7714e3a6 | [
"MIT"
] | 25 | 2021-06-17T16:00:16.000Z | 2022-03-29T07:04:00.000Z | tests/unit/utils/test_instantiate.py | schiotz/nequip | c343ce25ecfeb64f6df92e96022e673a7714e3a6 | [
"MIT"
] | 25 | 2021-06-21T22:25:22.000Z | 2022-03-30T04:39:46.000Z | import pytest
import yaml
from nequip.utils import instantiate
simple_default = {"b": 1, "d": 31}
class SimpleExample:
def __init__(self, a, b=simple_default["b"], d=simple_default["d"]):
self.a = a
self.b = b
self.d = d
nested_default = {"d": 37}
class NestedExample:
def __init__(self, cls_c, a, cls_c_kwargs={}, d=nested_default["d"]):
self.c_obj = cls_c(**cls_c_kwargs)
self.a = a
self.d = d
def assert_dict(d):
for k, v in d.items():
if isinstance(v, dict):
assert_dict(v)
elif isinstance(v, str):
assert k == v
@pytest.mark.parametrize("positional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_simple_init(positional_args, optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
union.update(positional_args)
if "a" not in union:
return
# decorate test with prefix
_all_args = (
{"simple_example_" + k: v for k, v in all_args.items()} if prefix else all_args
)
# check key mapping is correct
km, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
return_args_only=True,
)
for t in km:
for k, v in km[t].items():
assert k in locals()[t + "_args"]
if prefix and t == "all":
assert v == "simple_example_" + k
else:
assert v == k
km, _ = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
all_args=params,
return_args_only=True,
)
assert_dict(km)
# check whether it gets the priority right
a1, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
)
assert a1.a == union["a"]
if "b" in union:
assert a1.b == union["b"]
else:
assert a1.b == simple_default["b"]
for k in params:
if k in simple_default:
assert params[k] == union.get(k, simple_default[k])
# check whether the return value is right
a2 = SimpleExample(**positional_args, **params)
assert a1.a == a2.a
assert a1.b == a2.b
def test_prefix_priority():
args = {"prefix_a": 3, "a": 4}
a, params = instantiate(
builder=SimpleExample,
prefix="prefix",
all_args=args,
)
assert a.a == 3
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_nested_kwargs(optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
if "a" not in union:
return
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args=optional_args,
all_args=all_args,
)
def test_default():
"""
check the default value will not contaminate the other class
"""
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args={"a": 11},
)
c.d = nested_default["d"]
c.c_obj.d = simple_default["d"]
class A:
def __init__(self, cls_a, cls_a_kwargs):
self.a_obj = cls_a(**cls_a_kwargs)
class B:
def __init__(self, cls_b, cls_b_kwargs):
self.b_obj = cls_b(**cls_b_kwargs)
class C:
def __init__(self, cls_c, cls_c_kwargs): # noqa
self.c_obj = c_cls(**c_cls_kwargs) # noqa
def test_deep_nests():
all_args = {"a": 101, "b": 103, "c": 107}
obj, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
)
print(yaml.dump(params))
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
obj = NestedExample(**params)
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
km, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
return_args_only=True,
)
print(yaml.dump(km))
# check the key mapping is unique for
km, _ = instantiate(
builder=NestedExample, optional_args=params, return_args_only=True
)
assert_dict(km)
def test_recursion_nests():
with pytest.raises(RuntimeError) as excinfo:
b, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo)
def test_cyclic_nests():
with pytest.raises(RuntimeError) as excinfo:
c, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": C},
all_args={"cls_c": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo, "hello")
class BadKwargs1:
def __init__(self, thing_kwargs={}):
pass
class BadKwargs2:
def __init__(self, thing="a string", thing_kwargs={}):
pass
def test_bad_kwargs():
with pytest.raises(KeyError):
_ = instantiate(BadKwargs1)
with pytest.raises(ValueError):
_ = instantiate(BadKwargs2)
| 26.144681 | 87 | 0.60791 | 789 | 0.128418 | 0 | 0 | 2,603 | 0.423665 | 0 | 0 | 681 | 0.11084 |
100e849d7eae3f15a11b1eea05ca93095bd322e6 | 9,160 | py | Python | models/wct2.py | momenator/spine_uda | 3d6c9cd2431bcdb084d7603d0cc3101163b0902c | [
"MIT"
] | 1 | 2020-12-15T08:46:39.000Z | 2020-12-15T08:46:39.000Z | models/wct2.py | momenator/spine_uda | 3d6c9cd2431bcdb084d7603d0cc3101163b0902c | [
"MIT"
] | null | null | null | models/wct2.py | momenator/spine_uda | 3d6c9cd2431bcdb084d7603d0cc3101163b0902c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import os
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from .modules import WavePool, WaveUnpool, ImagePool, NLayerDiscriminator
from utils.metrics import compute_dice_metric
from utils.losses import DiceLoss
import numpy as np
class WaveEncoder(nn.Module):
"""Wavelet encoder in WCT2, only partial layers used"""
def __init__(self):
super(WaveEncoder, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv0 = nn.Conv2d(3, 3, 1, 1, 0)
self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 0)
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
self.pool1 = WavePool(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 0)
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
self.pool2 = WavePool(128)
self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
self.pool3 = WavePool(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 0)
def forward(self, x, skips):
"""Wavelet encoding - only up to level 2
Args:
x (torch.Tensor): input to be encoded
skips (dict): dictionary to contain LH, HL, HH filter responses
Returns:
LL (torch.Tensor): output of LL filters
skips (dict): dictionary containing said filters
"""
# level 1
out = self.conv0(x)
out = self.relu(self.conv1_1(self.pad(out)))
# level 2
out = self.relu(self.conv1_2(self.pad(out)))
skips['conv1_2'] = out
LL, LH, HL, HH = self.pool1(out)
skips['pool1'] = [LH, HL, HH]
return LL, skips
class WaveDecoder(nn.Module):
"""Wavelet encoder in WCT2, only partial layers used"""
def __init__(self):
super(WaveDecoder, self).__init__()
multiply_in = 5
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv4_1 = nn.Conv2d(512, 256, 3, 1, 0)
self.recon_block3 = WaveUnpool(256)
self.conv3_4_2 = nn.Conv2d(256*multiply_in, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_1 = nn.Conv2d(256, 128, 3, 1, 0)
self.recon_block2 = WaveUnpool(128)
self.conv2_2_2 = nn.Conv2d(128*multiply_in, 128, 3, 1, 0)
self.conv2_1 = nn.Conv2d(128, 64, 3, 1, 0)
self.recon_block1 = WaveUnpool(64)
self.conv1_2_2 = nn.Conv2d(64*multiply_in, 64, 3, 1, 0)
self.conv1_1 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x, skips):
"""Decoder - upsample from level 2
Args:
x (torch.Tensor): input to be encoded
skips (dict): dictionary containing LH, HL, HH filter responses
Returns:
out (torch.Tensor): output of wavelet unpooling layer
"""
LH, HL, HH = skips['pool1']
original = skips['conv1_2'] if 'conv1_2' in skips.keys() else None
out = self.recon_block1(x, LH, HL, HH, original)
return out
class WCT2Features(nn.Module):
"""WCT2 transform with fixed input and output channels and handpicked LL filters
"""
def __init__(self, filters=None, model_path_encoder=None, model_path_decoder=None):
super(WCT2Features, self).__init__()
self.encoder = WaveEncoder().cuda()
self.decoder = WaveDecoder().cuda()
self.encoder.load_state_dict(
torch.load(os.path.join(model_path_encoder),
map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(
torch.load(os.path.join(model_path_decoder),
map_location=lambda storage, loc: storage))
self.filters = filters
# self.tanh = nn.Tanh()
# chosen channels
# self.ll_filter_idx = [4,7,11,24,25,27]
# Sparsest CT channels [25, 54,16,22,61,4,8,27,7,3]
# self.ll_filter_idx = [15,2,41,12,39,1,42,23,51,38]
# self.ll_filter_idx = [14 ,15 ,45 ,19 ,39, 1 ,42 ,23 ,51, 38]
def forward(self, x):
"""Get WCT2 LL filters
Args:
x (torch.Tensor): input tensor
Returns:
out (torch.Tensor): output LL filters
"""
skips = {}
out, skips = self.encoder(x, skips)
out = self.decoder(out, skips)
out = out[:,:64,:,:]
if self.filters != None:
out = torch.index_select(out, 1, torch.tensor(self.filters).cuda())
return out
class WCT2GANUNet(nn.Module):
"""WCT2 GAN UNet all in one class"""
def __init__(self, g, seg, n_channels, lr=0.0002):
super(WCT2GANUNet, self).__init__()
# generator
self.g = g.cuda()
# discriminator
self.d = NLayerDiscriminator(input_nc=n_channels).cuda()
# segmentor
self.seg = seg.cuda()
self.lr = lr
# optimisers here
self.g_optim = optim.Adam(self.g.parameters(), lr=self.lr)
self.seg_optim = optim.Adam(self.seg.parameters(), lr=self.lr)
# self.optim = optim.Adam(chain(self.g.parameters(), self.seg.parameters()), lr=self.lr)
self.d_optim = optim.SGD(self.d.parameters(), lr=self.lr, momentum=0.5)
self.criterion_gan = nn.BCELoss()
self.pool = ImagePool()
def criterion_seg(self, prediction, target):
return nn.BCELoss()(prediction, target) + DiceLoss()(prediction, target)
def forward_gen(self, x):
return self.g(x)
def forward_seg(self, x):
out = self.forward_gen(x)
a1, a2, a3, a4, a5 = self.seg.downsample(out)
seg = self.seg.upsample(a1, a2, a3, a4, a5)
return seg
def get_target(self, pred, is_true=True):
"""Return target tensor with similar shape to pred"""
if is_true == True and np.random.random() > 0.65:
return torch.ones(pred.size(), requires_grad=False).cuda()
return torch.zeros(pred.size(), requires_grad=False).cuda()
# # occasionally give wrong labels
# if is_true == True and np.random.random() + 0.3 > 0.5:
# # use soft label for true [0.7, 1.2]
# return (1.2 - 0.7) * torch.rand(pred.size(), requires_grad=False).cuda() + 0.7
# # use soft label [0, 0.1] for false
# return 0.1 * torch.rand(pred.size(), requires_grad=False).cuda()
def set_requires_grad(self, net, requires_grad=False):
for param in net.parameters():
param.requires_grad=requires_grad
def step(self, x_s, x_t, y_s):
# GAN loss - update discriminator and generator here
# GAN loss - max log(D(x)) + log(1 - D(G(x)))
# update d only
self.d_optim.zero_grad()
out_x_s = self.forward_gen(x_s)
out_x_t = self.forward_gen(x_t)
x_s_real = self.d(out_x_s)
target_real = self.get_target(x_s_real)
loss_real = self.criterion_gan(x_s_real, target_real)
loss_real.backward()
# get generated feature maps from pool / replay for stability
x_s_fake_map = (self.pool.query(out_x_t)).detach()
x_s_fake = self.d(x_s_fake_map)
target_fake = self.get_target(x_s_fake, is_true=False)
loss_fake = self.criterion_gan(x_s_fake, target_fake)
loss_fake.backward()
self.d_optim.step()
# update g - max D(G(X))
self.g_optim.zero_grad()
x_s_fake = self.d(x_s_fake_map)
target_real = self.get_target(x_s_real)
loss_g = self.criterion_gan(x_s_fake, target_real)
loss_g.backward()
self.g_optim.step()
# Segmentation loss
self.set_requires_grad(self.g, requires_grad=False)
# self.g_optim.zero_grad()
self.seg_optim.zero_grad()
out_seg = self.forward_seg(x_s)
seg_loss = self.criterion_seg(out_seg, y_s)
seg_loss.backward()
# self.g_optim.step()
self.seg_optim.step()
# calculate dice score for current batch
dice_score = compute_dice_metric(torch.round(out_seg), y_s).item()
# backward pass
return seg_loss.item(), (loss_real + loss_fake).item(), dice_score
def save(self, path):
print('saving model...')
if os.path.isdir(path) == False:
os.makedirs(path)
torch.save(self.g.state_dict(), os.path.join(path,'g.pth'))
torch.save(self.d.state_dict(), os.path.join(path,'d.pth'))
torch.save(self.seg.state_dict(), os.path.join(path,'seg.pth'))
print('saving done!')
| 34.179104 | 96 | 0.569978 | 8,842 | 0.965284 | 0 | 0 | 0 | 0 | 0 | 0 | 2,254 | 0.24607 |
100fae21e6b69b129ccd390195c8899a6d2d0807 | 1,188 | py | Python | api/public/urls.py | marinimau/wayne_django_rest | eeaef0b9e3544c0f678b7f66684ac56f5ef90759 | [
"MIT"
] | null | null | null | api/public/urls.py | marinimau/wayne_django_rest | eeaef0b9e3544c0f678b7f66684ac56f5ef90759 | [
"MIT"
] | 5 | 2021-04-08T20:43:19.000Z | 2021-09-22T19:42:03.000Z | api/public/urls.py | marinimau/wayne_django_rest | eeaef0b9e3544c0f678b7f66684ac56f5ef90759 | [
"MIT"
] | null | null | null | #
# copyright © 2020 - all rights reserved
# Created at: 03/11/20
# By: mauromarini
# License: MIT
# Repository: https://github.com/marinimau/wayne_django_rest
# Credits: @marinimau (https://github.com/marinimau)
#
from django.urls import path
from api.user import views as user_views
from api.social import views as social_views
urlpatterns = [
# ------------------------------------------------------------------------------------------------------------------
# public urls
# ------------------------------------------------------------------------------------------------------------------
path('get/<username>/', user_views.UserDetailPublic.as_view()),
path('get/<username>/detail/', user_views.ProfileDetailPublic.as_view()),
path('get/<username>/account/username_based/', social_views.UsernameSocialAccountPublic.as_view()),
path('reverse/username_based/<platform>/<value>/', social_views.UsernameSocialAccountRetrieve.as_view()),
path('get/<username>/account/email_based/', social_views.EmailSocialAccountPublic.as_view()),
path('reverse/email_based/<platform>/<value>/', social_views.EmailSocialAccountRetrieve.as_view()),
]
| 45.692308 | 120 | 0.591751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.56434 |
1012347db29b0e7431b7d278a87031a83ffc0e9a | 419 | py | Python | mentoring_app/migrations/0007_mentoringprogram_is_published.py | ShouravAhmed/Luminar | f80c621028f81ef7f657592560e4c95fd8e91699 | [
"MIT"
] | null | null | null | mentoring_app/migrations/0007_mentoringprogram_is_published.py | ShouravAhmed/Luminar | f80c621028f81ef7f657592560e4c95fd8e91699 | [
"MIT"
] | null | null | null | mentoring_app/migrations/0007_mentoringprogram_is_published.py | ShouravAhmed/Luminar | f80c621028f81ef7f657592560e4c95fd8e91699 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-12-28 05:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentoring_app', '0006_mentoringprogram_is_archived'),
]
operations = [
migrations.AddField(
model_name='mentoringprogram',
name='is_published',
field=models.BooleanField(default=False),
),
]
| 22.052632 | 63 | 0.630072 | 326 | 0.778043 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.307876 |
1013ecf59bc70ab9d79d68b8649884c783a2823a | 6,613 | py | Python | csdaily/data.py | qytz/cn_stock_daily | 93950c7be37f6717d1448e0f6c0f93947fdcafce | [
"Apache-2.0"
] | 1 | 2018-05-29T09:13:05.000Z | 2018-05-29T09:13:05.000Z | csdaily/data.py | qytz/cn_stock_daily | 93950c7be37f6717d1448e0f6c0f93947fdcafce | [
"Apache-2.0"
] | 1 | 2018-04-14T02:04:39.000Z | 2018-04-14T06:30:36.000Z | csdaily/data.py | qytz/csdaily | 93950c7be37f6717d1448e0f6c0f93947fdcafce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of CSDaily.
# Copyright (C) 2018-present qytz <hhhhhf@foxmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import asyncio
import logging
from datetime import datetime, date
import aiohttp
import pandas as pd
from sqlalchemy import create_engine
from .cli import FinApp, CSDaily
logger = logging.getLogger(__file__)
@CSDaily.subcommand('data')
class DataApp(FinApp):
async def get_daily_stocks_xq(self, session, data_type='stock'):
"""
获取每日行情概览信息,只能获取当天的
返回一个 pd.DataFrame
出错,返回 None
data_type
stock: 沪深股票
cb: 可转债
eft: ETF基金
fenji: 分级基金
https://xueqiu.com/stock/cata/stocklist.json
https://xueqiu.com/fund/quote/list.json
股票代码
=================
上海证券交易所
首位代码 产品定义
0 国债/指数
00 上证指数、沪深300指数、中证指数
1 债券
2 回购
3 期货
4 备用
5 基金/权证
6 A股
7 非交易业务(发行、权益分配)
8 备用
9 B股
深圳证券交易所
00 A股证券
002~004 中小板
1 债券
2 B股
30 创业板证券
39 综合指数、成份指数
"""
headers = {
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) '
# 'Gecko/20100101 Firefox/54.0',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) '
'Gecko/20100101 Firefox/54.0',
}
await session.get('https://xueqiu.com', headers=headers)
quotes = []
page_size = 90
curr_page = page_cnt = 1
params = {
'_': 0,
'order': 'desc',
'orderby': 'percent',
'page': curr_page,
'size': page_size,
}
quotes_url = 'https://xueqiu.com/stock/cata/stocklist.json'
if data_type == 'stock':
params['type'] = '11,12'
elif data_type == 'cb':
params['exchange'] = 'CN'
params['industry'] = '可转债'
elif data_type == 'etf':
params['parent_type'] = 13
params['type'] = 135
params['orderBy'] = 'percent'
quotes_url = 'https://xueqiu.com/fund/quote/list.json'
elif data_type == 'fenji':
params['parent_type'] = 1
params['type'] = 14
params['orderBy'] = 'percent'
quotes_url = 'https://xueqiu.com/fund/quote/list.json'
logger.info('start download xueqiu daily quotes for %s...', data_type)
while curr_page <= page_cnt:
logger.info('Fetching %s/%s page', curr_page, page_cnt)
params['page'] = curr_page
params['_'] = int(time.time() * 1000)
resp = await session.get(quotes_url, params=params, headers=headers)
resp_json = await resp.json()
if data_type in ('stock', 'cb'):
if not resp_json['success']:
logger.error('Get daily quotes for %s failed: %s', data_type, resp_json)
break
total_cnt = resp_json['count']['count']
elif data_type in ('etf',):
if 'error_code' in resp_json:
logger.error('Get daily quotes for %s failed: %s', data_type, resp_json)
break
total_cnt = resp_json['count']
elif data_type in ('fenji',):
if 'error_code' in resp_json:
logger.error('Get daily quotes for %s failed: %s', data_type, resp_json)
break
total_cnt = resp_json['count']
page_cnt = total_cnt // page_size + 1 if total_cnt % page_size != 0 else 0
quotes.extend(resp_json['stocks'])
curr_page += 1
if not quotes:
logger.warn('no data downloaded for %s, return None', data_type)
pd.DataFrame()
logger.info('download xueqiu daily quotes for %s finish', data_type)
df = pd.DataFrame(quotes)
# df['day'] = date.today()
df['day'] = datetime.now().replace(hour=16, minute=0, second=0, microsecond=0)
# set index
df.set_index(['symbol', 'day'], inplace=True)
df.drop_duplicates(inplace=True)
# convert to numertic types
return df.apply(pd.to_numeric, errors='ignore')
async def update_data_daily(self):
day = str(date.today())
db_dir = os.path.join(self._data_dir, 'daily_quotes')
os.makedirs(db_dir, exist_ok=True)
db_file = os.path.join(db_dir, f'{day}.db')
engine = create_engine('sqlite:///' + db_file)
logger.info('start downloading, data will be saved to %s', db_file)
async with aiohttp.ClientSession() as session:
df = await self.get_daily_stocks_xq(session, data_type='stock')
if not df.empty:
df.to_sql('stock_quotes', engine, chunksize=1000, if_exists='append', index=True)
df = await self.get_daily_stocks_xq(session, data_type='cb')
if not df.empty:
df.to_sql('cb_quotes', engine, chunksize=1000, if_exists='append', index=True)
df = await self.get_daily_stocks_xq(session, data_type='etf')
if not df.empty:
df.to_sql('etf_quotes', engine, chunksize=1000, if_exists='append', index=True)
df = await self.get_daily_stocks_xq(session, data_type='fenji')
if not df.empty:
df.to_sql('fenji_quotes', engine, chunksize=1000, if_exists='append', index=True)
logger.info('all data has be saved to %s', db_file)
def main(self, *args):
self._data_dir = os.path.join(self._root_dir, 'origin_data')
os.makedirs(self._data_dir, exist_ok=True)
loop = asyncio.get_event_loop()
loop.run_until_complete(self.update_data_daily())
if __name__ == '__main__':
DataApp()
| 36.535912 | 97 | 0.556782 | 5,949 | 0.860304 | 0 | 0 | 5,977 | 0.864353 | 5,669 | 0.819812 | 3,013 | 0.435719 |
101537d3feef1a4dbbd2ef2f6aa60048ff8c781a | 1,265 | py | Python | RecordSpider/beian.py | wjcIvan/oschinaLearning | 4f172b068ab00063b4b0e6217d031e0dcb48f492 | [
"MIT"
] | 1 | 2020-07-29T07:00:32.000Z | 2020-07-29T07:00:32.000Z | RecordSpider/beian.py | wjcIvan/oschinaLearning | 4f172b068ab00063b4b0e6217d031e0dcb48f492 | [
"MIT"
] | 4 | 2020-07-30T08:44:04.000Z | 2020-07-30T08:45:14.000Z | RecordSpider/beian.py | wjcIvan/oschinaLearning | 4f172b068ab00063b4b0e6217d031e0dcb48f492 | [
"MIT"
] | null | null | null | # coding:utf-8
import sys
from PyQt5 import QtWidgets
import window
import recordSpider
class MainWindow(object):
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
self.ui = window.Ui_MainWindow()
self.ui.setupUi(MainWindow)
MainWindow.show()
# 获取文本框内容
self.ui.pushButton.clicked.connect(self.click_success)
sys.exit(app.exec_())
def click_success(self):
domain = self.ui.textEdit.toPlainText()
result = recordSpider.main(domain)
if result is None:
self.ui.lineEdit.setText("未找到备案")
self.ui.lineEdit_2.setText("")
self.ui.lineEdit_3.setText("")
self.ui.lineEdit_4.setText("")
self.ui.lineEdit_5.setText("")
self.ui.lineEdit_6.setText("")
else:
self.ui.lineEdit.setText(result["main"])
self.ui.lineEdit_2.setText(result["mainType"])
self.ui.lineEdit_3.setText(result["record"])
self.ui.lineEdit_4.setText(result["websiteName"])
self.ui.lineEdit_5.setText(result["websiteHome"])
self.ui.lineEdit_6.setText(result["time"])
if __name__ == "__main__":
MainWindow()
| 30.119048 | 62 | 0.618972 | 1,150 | 0.892164 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.100853 |
10182dc2e471f5a0f909b79006a7d28c08916403 | 509 | py | Python | querybook/server/datasources_socketio/connect.py | shivammmmm/querybook | 71263eb7db79e56235ea752f2cf3339ca9b3a092 | [
"Apache-2.0"
] | 1,144 | 2021-03-30T05:06:16.000Z | 2022-03-31T10:40:31.000Z | querybook/server/datasources_socketio/connect.py | shivammmmm/querybook | 71263eb7db79e56235ea752f2cf3339ca9b3a092 | [
"Apache-2.0"
] | 593 | 2021-07-01T10:34:25.000Z | 2022-03-31T23:24:40.000Z | querybook/server/datasources_socketio/connect.py | shivammmmm/querybook | 71263eb7db79e56235ea752f2cf3339ca9b3a092 | [
"Apache-2.0"
] | 113 | 2021-03-30T00:07:20.000Z | 2022-03-31T07:18:43.000Z | from flask_login import current_user
from flask_socketio import ConnectionRefusedError
from app.flask_app import socketio
from const.data_doc import DATA_DOC_NAMESPACE
from const.query_execution import QUERY_EXECUTION_NAMESPACE
def connect():
if not current_user.is_authenticated:
raise ConnectionRefusedError("User is not logged in, please refresh the page.")
socketio.on("connect", namespace=DATA_DOC_NAMESPACE)(connect)
socketio.on("connect", namespace=QUERY_EXECUTION_NAMESPACE)(connect)
| 31.8125 | 87 | 0.827112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.131631 |
1019beaa3b165ae0d0bc9fd6b35ceadc23df9167 | 587 | py | Python | python/alibiexplainer/tests/utils.py | owennewo/kfserving | 89f73c87525b8e06ea799f69f2979c4ad272fcb3 | [
"Apache-2.0"
] | 2 | 2020-10-06T09:20:22.000Z | 2022-01-18T15:04:57.000Z | python/alibiexplainer/tests/utils.py | owennewo/kfserving | 89f73c87525b8e06ea799f69f2979c4ad272fcb3 | [
"Apache-2.0"
] | 15 | 2020-11-13T19:05:44.000Z | 2022-03-12T00:49:45.000Z | python/alibiexplainer/tests/utils.py | owennewo/kfserving | 89f73c87525b8e06ea799f69f2979c4ad272fcb3 | [
"Apache-2.0"
] | 2 | 2020-10-06T09:24:31.000Z | 2020-12-20T15:10:56.000Z | import kfserving
from typing import List, Union
import numpy as np
class Predictor(): # pylint:disable=too-few-public-methods
def __init__(self, clf: kfserving.KFModel):
self.clf = clf
def predict_fn(self, arr: Union[np.ndarray, List]) -> np.ndarray:
instances = []
for req_data in arr:
if isinstance(req_data, np.ndarray):
instances.append(req_data.tolist())
else:
instances.append(req_data)
resp = self.clf.predict({"instances": instances})
return np.array(resp["predictions"])
| 30.894737 | 69 | 0.623509 | 517 | 0.88075 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.107325 |
101a10a127a273feed1bef4adf70f1e5a220a9a0 | 2,186 | py | Python | relations/views.py | Mansouroopi/DRF | 057c6d77c012386734deee9c9076264c005d0221 | [
"MIT"
] | null | null | null | relations/views.py | Mansouroopi/DRF | 057c6d77c012386734deee9c9076264c005d0221 | [
"MIT"
] | null | null | null | relations/views.py | Mansouroopi/DRF | 057c6d77c012386734deee9c9076264c005d0221 | [
"MIT"
] | null | null | null |
from snippets.permissions import IsOwnerOrReadOnly
from rest_framework import permissions
from rest_framework import viewsets
from .models import Album, Track, Student, Module
from .serializers import AlbumSerializer, StudentSerializer, ModuleSerializer, TrackSerializer
from rest_framework.response import Response
class AlbumViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Album.objects.all()
serializer_class = AlbumSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
class TrackViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Track.objects.all()
serializer_class = TrackSerializer
def create(self, *args, **kwargs):
"""
override the create method
:param args:
:param kwargs:
:return:
"""
track_data = self.request.data
new_track = Track.objects.create(album=Album.objects.get(id=track_data['album']),
order=track_data['order'],
title=track_data['title'],
duration=track_data['duration'])
new_track.save()
serializer = TrackSerializer(new_track)
return Response(serializer.data)
class StudentViewSet(viewsets.ModelViewSet):
"""
student viewset automatically provides list, create, retrival, update, and destroy
"""
queryset = Student.objects.all()
serializer_class = StudentSerializer
class ModuleViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Module.objects.all()
serializer_class = ModuleSerializer
| 31.681159 | 94 | 0.663769 | 1,856 | 0.849039 | 0 | 0 | 0 | 0 | 0 | 0 | 780 | 0.356816 |
101a86c8d6437538ab20c4171397229b83fd18f9 | 184,923 | py | Python | venv/lib/python3.7/site-packages/torch/testing/_internal/common_methods_invocations.py | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/torch/testing/_internal/common_methods_invocations.py | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/torch/testing/_internal/common_methods_invocations.py | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd | [
"MIT"
] | null | null | null | from functools import reduce, wraps, partial
from itertools import product
from operator import mul, itemgetter
import collections
import operator
import torch
import numpy as np
from torch._six import inf, istuple
from torch.autograd import Variable
import collections.abc
from typing import List, Tuple, Dict, Any
from torch.testing import \
(make_non_contiguous, _dispatch_dtypes, floating_types, floating_types_and,
floating_and_complex_types, floating_and_complex_types_and,
all_types_and_complex_and, all_types_and, all_types_and_complex)
from torch.testing._internal.common_device_type import \
(skipIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCPUIfNoMkl,
skipCUDAIfRocm, expectedAlertNondeterministic, precisionOverride)
from torch.testing._internal.common_cuda import CUDA11OrLater
from torch.testing._internal.common_utils import \
(prod_single_zero, random_square_matrix_of_rank,
random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, make_nonzero_det,
random_fullrank_matrix_distinct_singular_value, set_rng_seed,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY,
torch_to_numpy_dtype_dict, slowTest, TEST_WITH_ASAN)
from distutils.version import LooseVersion
if TEST_SCIPY:
import scipy.special
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
class SkipInfo(DecorateInfo):
"""Describes which test, or type of tests, should be skipped when testing
an operator. Any test that matches all provided arguments will be skipped.
The skip will only be checked if the active_if argument is True."""
def __init__(self, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
super().__init__(decorators=skipIf(True, "Skipped!"), cls_name=cls_name,
test_name=test_name, device_type=device_type, dtypes=dtypes,
active_if=active_if)
class SampleInput(object):
"""Represents sample inputs to a function."""
# output_process_fn_grad is a function that modifies the output of op compatible with input
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=None):
# test_ops.py expects input to be a tuple
self.input = input if isinstance(input, tuple) else (input,)
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.output_process_fn_grad = output_process_fn_grad
def __repr__(self):
arguments = [
f'input[{len(self.input)}]',
f'args={self.args}' if len(self.args) > 0 else None,
f'kwargs={self.kwargs}' if len(self.kwargs) > 0 else None,
(f'output_process_fn_grad={self.output_process_fn_grad}'
if self.output_process_fn_grad is not None else None)]
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
_NOTHING = object() # Unique value to distinguish default from anything else
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
# Classes and methods for the operator database
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
def __init__(self,
name, # the string name of the function
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
dtypes=floating_types(), # dtypes this function is expected to work with
dtypesIfCPU=None, # dtypes this function is expected to work with on CPU
dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA
dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM
default_test_dtypes=None, # dtypes to test with by default. Gets intersected
# with the dtypes support on the tested device
test_inplace_grad=True, # whether to gradcheck and gradgradcheck the inplace variant
test_complex_grad=True, # whether to gradcheck and gradgradcheck for complex dtypes
skip_bfloat16_grad=False, # whether to skip grad and gradgradcheck for bfloat16 dtype
assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed
autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
output_func=lambda x: x, # fn mapping output to part that should be gradcheck'ed
supports_tensor_out=True, # whether the op supports the out kwarg, returning a Tensor
skips=tuple(), # information about which tests to skip
decorators=None, # decorators to apply to generated tests
safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments
sample_inputs_func=None, # function to generate sample inputs
aten_name=None, # name of the corresponding aten:: operator
aliases=None, # iterable of aliases, e.g. ("absolute",) for torch.abs
variant_test_name='', # additional string to include in the test name
supports_sparse=False, # supported for sparse
check_batched_grad=True, # check batched grad when doing gradcheck
check_batched_gradgrad=True, # check batched grad grad when doing gradgradcheck
):
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM):
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
self.name = name
self.aten_name = aten_name if aten_name is not None else name
self.variant_test_name = variant_test_name
self.dtypes = set(dtypes)
self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes
self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypes
self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
self.op = op if op else _getattr_qual(torch, self.name)
self.method_variant = getattr(torch.Tensor, name, None)
inplace_name = name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None)
self.operator_variant = getattr(operator, name, None)
self.skip_bfloat16_grad = skip_bfloat16_grad
self.test_inplace_grad = test_inplace_grad
self.test_complex_grad = test_complex_grad
self.supports_tensor_out = supports_tensor_out
self.safe_casts_outputs = safe_casts_outputs
self.skips = skips
self.decorators = decorators
self.output_func = output_func
self.sample_inputs_func = sample_inputs_func
self.assert_autodiffed = assert_autodiffed
self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []
if autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
else:
self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes
self.supports_sparse = supports_sparse
self.check_batched_grad = check_batched_grad
self.check_batched_gradgrad = check_batched_gradgrad
self.aliases = () # type: ignore
if aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator_variant(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def sample_inputs(self, device, dtype, requires_grad=False):
"""Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
return self.sample_inputs_func(self, device, dtype, requires_grad)
# Returns True if the test should be skipped and False otherwise
def should_skip(self, cls_name, test_name, device_type, dtype):
return any(si.is_active(cls_name, test_name, device_type, dtype)
for si in self.skips)
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypesIfCPU
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
def default_test_dtypes(self, device_type):
"""Returns the default dtypes used to test this operator on the device.
Equal to the operator's default_test_dtypes filtered to remove dtypes
not supported by the device.
"""
supported = self.supported_dtypes(device_type)
return (supported if self._default_test_dtypes is None
else supported.intersection(self._default_test_dtypes))
L = 20
M = 10
S = 5
def sample_inputs_unary(op_info, device, dtype, requires_grad):
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
return (SampleInput(make_tensor((L,), device, dtype,
low=low, high=high,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=low, high=high,
requires_grad=requires_grad)))
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=floating_types_and(torch.half),
domain=(None, None), # the [low, high) domain of the function
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
handles_extremals=True, # whether the op correctly handles extremal values (like inf)
handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_unary,
supports_sparse=False,
**kwargs):
super(UnaryUfuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
supports_sparse=supports_sparse,
**kwargs)
self.ref = ref
self.domain = domain
self.handles_large_floats = handles_large_floats
self.handles_extremals = handles_extremals
self.handles_complex_extremals = handles_complex_extremals
self.supports_complex_to_float = supports_complex_to_float
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_tensor_split(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(torch.tensor([1, 2, 3]),),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(torch.tensor(1),),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(torch.tensor([1, 2, 3]),),
kwargs=dict(dim=1)),)
def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)
inputs = []
is_dtype_half = dtype in [torch.float16, torch.bfloat16]
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype, low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
keepdim=keepdim)))
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
# TODO: remove this check when `max` is implemented for
# float16 and bfloat16. Issue:
# https://github.com/pytorch/pytorch/issues/50790
if is_vector_norm and is_dtype_half and ord in [inf, -inf]:
continue
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim)))
if ord in ['nuc', 'fro']:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
ord=ord,
keepdim=keepdim,
dim=(0, 1))))
return inputs
def sample_inputs_slogdet(op_info, device, dtype, requires_grad):
# original test cases from 'method_tests' have too many test_inputs
# we don't actually need all of them to check the autograd and jit correctness
# sample inputs with shapes 0x0, 0xSxS, 2x0x0 are added
test_inputs = (
torch.randn(0, 0, dtype=dtype, device=device), # '0x0'
torch.randn(S, S, dtype=dtype, device=device), # 'SxS'
torch.randn(0, S, S, dtype=dtype, device=device), # 'zero_batched_SxS'
torch.randn(2, 0, 0, dtype=dtype, device=device), # 'batched_0x0'
torch.randn(2, S, S, dtype=dtype, device=device), # 'batched_SxS'
)
out = []
for a in test_inputs:
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_addmm(op_info, device, dtype, requires_grad):
input = SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=False)))
if dtype.is_complex:
another_input = SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=False)),
kwargs=dict(beta=1 + 2j, alpha=2 + 3j))
return (input, another_input)
else:
return (input, )
def sample_inputs_addr(op_info, device, dtype, requires_grad):
input1 = SampleInput((make_tensor((S, M), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((M, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad)))
input2 = SampleInput((make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((M, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad)))
if dtype.is_complex:
alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j
elif dtype.is_floating_point:
alpha, beta = 0.2, 0.6
else:
alpha, beta = 2, 3
input3 = SampleInput((make_tensor((S, M), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((M, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
input4 = SampleInput((make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((M, ), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
return (input1, input2, input3, input4)
def sample_inputs_xlogy(self, device, dtype, requires_grad):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=0, high=None,
requires_grad=requires_grad))),)
def sample_inputs_trace(self, device, dtype, requires_grad):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))),)
def sample_inputs_linalg_inv(op_info, device, dtype, requires_grad=False):
"""
This function generates always invertible input for torch.linalg.inv using
random_fullrank_matrix_distinct_singular_value.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(2, 3) - 2x3 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, ), (2, 3)]
ns = [0, 5]
out = []
for batch, n in product(batches, ns):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype).to(device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def np_sinc_with_fp16_as_fp32(x):
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
# at 0 for fp16.
if x.dtype == np.float16:
return np.sinc(x.astype(np.float32))
else:
return np.sinc(x)
def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad):
test_cases = (
((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
return tuple(SampleInput((make_tensor(size, device, dtype,
low=None, high=None,
requires_grad=requires_grad), shape))
for size, shape in test_cases)
def sample_inputs_div(self, device, dtype, requires_grad, rounding_mode=None):
a = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
is_integral = not dtype.is_floating_point and not dtype.is_complex
b = make_tensor((S, S, S), device, dtype, low=1 if is_integral else 0.1, high=None,
requires_grad=requires_grad)
kwargs = None
if rounding_mode is not None:
kwargs = dict(rounding_mode=rounding_mode)
return [
SampleInput((a, b), kwargs=kwargs),
SampleInput((a,), args=(2,)),
]
def sample_inputs_stack(op_info, device, dtype, requires_grad):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)), kwargs=dict(idx=0)),)
def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))),)
def sample_inputs_gather(op_info, device, dtype, requires_grad):
return (SampleInput((make_tensor((M, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, gather_variable((S, S), 1, M, True, device=device))),
SampleInput((make_tensor((M, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
1, gather_variable((M, S // 2), 0, S, True, device=device))),
SampleInput((make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput((make_tensor((S,), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, torch.tensor(0, dtype=torch.int64, device=device))),
SampleInput((make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_diff(op_info, device, dtype, requires_grad):
test_cases = (
((1,), 0, None, None),
((S,), 0, None, None),
((S, 1), 0, None, None),
((S, 1), 1, None, None),
((S, S), 0, None, None),
((S, S), 1, None, None),
((S, S), 0, (1, S), (2, S)),
((S, S), 0, None, (2, S)),
((S, S, S), 1, None, None),
((S, S, S), 1, (S, 1, S), (S, 1, S)),)
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
args = (make_tensor(size, device, dtype,
low=None, high=None,
requires_grad=requires_grad), 1, dim,
make_tensor(size_prepend, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_prepend else None,
make_tensor(size_append, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_append else None)
sample_inputs += [SampleInput(args)]
return tuple(sample_inputs)
def sample_inputs_index_select(op_info, device, dtype, requires_grad):
return (SampleInput((make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, index_variable(2, S, device=device))),
SampleInput((make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput((make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_index_fill(op_info, device, dtype, requires_grad):
samples = []
t = make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)
fill_val = torch.tensor(-1 + 1j if t.is_complex() else -1)
# non-contiguous input
t01 = t.transpose(0, 1)
t02 = t.transpose(0, 2)
t12 = t.transpose(1, 2)
idx = index_variable(1, S, device=device)
# non-contiguous index
idx_nonctg = torch.empty_strided((S,), (2,), device=device, dtype=torch.int64)
idx_nonctg.copy_(idx)
for d in range(t.dim()):
for tensor in [t, t01, t02, t12]:
samples.append(SampleInput((tensor, d, idx, fill_val)))
samples.append(SampleInput((tensor, d, -idx - 1, fill_val)))
samples.append(SampleInput((tensor, d, idx_nonctg, fill_val)))
return samples
def sample_movedim_moveaxis(op_info, device, dtype, requires_grad):
return (SampleInput((make_tensor((4, 3, 2, 1), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
(0, 1, 2, 3), (3, 2, 1, 0))),
SampleInput((make_tensor((4, 3, 2, 1), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
(0, -1, -2, -3), (-3, -2, -1, -0))))
def sample_repeat_tile(op_info, device, dtype, requires_grad):
rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)
shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))
if requires_grad:
# Tests for variant_consistency_jit, grad, gradgrad
# are slower. Use smaller bags of `rep_dims` and `shapes`
# in this case.
rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore
shapes = ((), (0,), (2,), (3, 2)) # type: ignore
tensors = [make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad) for shape in shapes]
samples = []
for rep_dim, tensor in product(rep_dims, tensors):
for t in (tensor, tensor.T):
if op_info.name == 'repeat' and len(rep_dim) >= t.dim():
# `torch.repeat` errors for `len(rep_dims) < t.dim()`,
# so we filter such combinations.
samples.append(SampleInput((t, rep_dim),))
elif op_info.name == 'tile':
samples.append(SampleInput((t, rep_dim),))
return samples
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [np.bool, np.uint8, np.int8, np.int16, np.int32, np.int64]
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
@wraps(fn)
def wrapped_fn(x):
if is_integral(x.dtype):
return fn(x, dtype=np_dtype)
return fn(x)
return wrapped_fn
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: bool, # Whether dim argument can be a tuple
decorators=None,
**kwargs):
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoMkl,
skipCUDAIfRocm,
# gradgrad is quite slow
DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
**kwargs)
self.ref = ref if ref is not None else _getattr_qual(np, name)
self.ndimensional = ndimensional
def sample_inputs(self, device, dtype, requires_grad=False):
nd_tensor = make_tensor((S, S + 1, S + 2), device, dtype, low=None, high=None,
requires_grad=requires_grad)
tensor = make_tensor((31,), device, dtype, low=None, high=None,
requires_grad=requires_grad)
if self.ndimensional:
return [
SampleInput(nd_tensor, kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(s=(8,))),
SampleInput(tensor),
*(SampleInput(nd_tensor, kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
else:
return [
SampleInput(nd_tensor, kwargs=dict(n=10, dim=1, norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(n=7)),
SampleInput(tensor),
*(SampleInput(nd_tensor, kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCPU=None,
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
class HermitianOpInfo(OpInfo):
"""Operator information for Hermitian functions
These are functions that take Hermitian matrices as input.
They require a modified function to be tested for gradcheck, because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input and returning incorrect results.
"""
def get_op(self):
"""
Returns the function variant of the operator, torch.<op_name>,
compatible with gradcheck for Hermitian functions.
It works only for single input argument.
"""
def hermitian_func(non_hermitian_input, **kwargs):
hermitian_input = non_hermitian_input + non_hermitian_input.conj().transpose(-2, -1)
return self.op(hermitian_input, **kwargs)
return hermitian_func
class TriangularOpInfo(OpInfo):
"""Operator information for function that take lower or upper triangular matrices as input.
They require a modified function to be tested for gradcheck, because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input and returning incorrect results.
"""
def get_op(self):
"""
Returns the function variant of the operator, torch.<op_name>,
compatible with gradcheck for triangular input functions.
It works only for single input argument and upper kwarg
"""
def triangular_func(non_triangular_input, upper=False):
if upper:
triangular_input = non_triangular_input.triu()
else:
triangular_input = non_triangular_input.tril()
return self.op(triangular_input, upper=upper)
return triangular_func
def get_method(self):
"""
Returns the method variant of the operator
compatible with gradcheck for triangular input functions.
It works only for single input argument and upper kwarg
"""
def triangular_func(non_triangular_input, upper=False):
if upper:
triangular_input = non_triangular_input.triu()
else:
triangular_input = non_triangular_input.tril()
return self.method_variant(triangular_input, upper=upper)
return triangular_func
def sample_inputs(self, device, dtype, requires_grad=False):
"""
This function generates Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices
for cholesky_inverse.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices
)
test_cases = (torch.linalg.cholesky(a) for a in inputs)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
out.append(SampleInput(a, kwargs=dict(upper=True)))
return out
def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.linalg.pinv with distinct singular values so that autograd is always stable
Implementation of torch.linalg.pinv depends on torch.svd and torch.linalg.eigh, therefore it's sufficient to
check only square S x S matrix and the batched (3 x S x S) input.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
test_cases = (
random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device), # single matrix
random_fullrank_matrix_distinct_singular_value(S, 3, dtype=dtype).to(device), # batch of matrices
)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad)
for o in out:
o.kwargs = {"hermitian": True}
return out
def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True):
"""
This function generates always solvable input for torch.linalg.solve
Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [0, 5]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
out = []
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype).to(device)
a.requires_grad = requires_grad
b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)
b.requires_grad = requires_grad
out.append(SampleInput((a, b)))
return out
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
for sample in out:
sample.input = tuple(reversed(sample.input))
return out
def sample_inputs_std_var(op_info, device, dtype, requires_grad):
tensor_nd = make_tensor((S, S, S), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
tensor_1d = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
return [
SampleInput(tensor_nd),
SampleInput(tensor_nd, kwargs=dict(dim=1)),
SampleInput(tensor_nd, kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=False, keepdim=False)),
]
def _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):
"""
This function generates input for torch.svd with distinct singular values so that autograd is always stable.
Matrices of different size:
square matrix - S x S size
tall marix - S x (S-2)
wide matrix - (S-2) x S
and batched variants of above are generated.
Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd
It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
# svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice
# along different dimensions when needed (this is used by
# test_cases2:wide_all and wide_all_batched below)
if is_linalg_svd:
def slice_V(v):
return v[..., :(S - 2), :]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0]
return u00 * v00_conj
else:
def slice_V(v):
return v[..., :, :(S - 2)]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0].conj()
return u00 * v00_conj
test_cases1 = ( # some=True (default)
# loss functions for complex-valued svd have to be "gauge invariant",
# i.e. loss functions shouldn't change when sigh of the singular vectors change.
# the simplest choice to satisfy this requirement is to apply 'abs'.
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: usv[1]), # 'check_grad_s'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[0])), # 'check_grad_u'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[2])), # 'check_grad_v'
# this test is important as it checks the additional term that is non-zero only for complex-valued inputs
# and when the loss function depends both on 'u' and 'v'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
uv_loss), # 'check_grad_uv'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device),
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'
)
test_cases2 = ( # some=False
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'
)
out = []
for a, out_fn in test_cases1:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': False}
else:
kwargs = {'some': True}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
for a, out_fn in test_cases2:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': True}
else:
kwargs = {'some': False}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
return out
def sample_inputs_svd(op_info, device, dtype, requires_grad=False):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)
def sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)
def sample_inputs_pinverse(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.pinverse with distinct singular values so that autograd is always stable.
Implementation of torch.pinverse depends on torch.svd, therefore it's sufficient to check only square S x S matrix
and the batched (3 x S x S) input.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
test_cases = (
random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device), # pinverse
random_fullrank_matrix_distinct_singular_value(S, 3, dtype=dtype).to(device), # pinverse 'batched'
)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_flip(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
samples = [SampleInput(tensor, kwargs={'dims': dim}) for tensor, dim in product(tensors, dims)]
return samples
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_diag(op_info, device, dtype, requires_grad):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor, args=arg))
return samples + [vec_sample]
def sample_inputs_logit(op_info, device, dtype, requires_grad):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad))),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad))),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(bernoulli_scalar().to(device),
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad))),
)
return samples
def sample_inputs_masked_select(op_info, device, dtype, requires_grad):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',
dtypes=[torch.cfloat, torch.cdouble]),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.cfloat, torch.cdouble, torch.bfloat16]),
SkipInfo('TestCommon', 'test_jit_alias_remapping',
dtypes=[torch.cfloat, torch.cdouble, torch.bfloat16]),
),
test_inplace_grad=False,
assert_autodiffed=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
handles_complex_extremals=False,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
default_test_dtypes=[torch.long, torch.half, torch.bfloat16, torch.float32, torch.cfloat],
skip_bfloat16_grad=True,
assert_autodiffed=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
ref=np.arccosh,
domain=(1, float('inf')),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
test_inplace_grad=False,
skips=(
# RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
OpInfo('addmm',
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
# BFloat16 support on CUDA requires CUDA 11 and SM53
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.bfloat16, torch.float16, torch.cfloat, torch.cdouble]),),
sample_inputs_func=sample_inputs_addmm),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
# Reference: https://github.com/pytorch/pytorch/issues/50747
test_inplace_grad=False,
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.float16, torch.cfloat, torch.cdouble, torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/50747
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),),
sample_inputs_func=sample_inputs_addr),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
assert_autodiffed=True,
skip_bfloat16_grad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS)
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
test_inplace_grad=False,
skips=(
# RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('atan',
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
assert_autodiffed=True,
skip_bfloat16_grad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('atanh',
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
test_inplace_grad=False,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
OpInfo('broadcast_to',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_tensor_out=False,
test_inplace_grad=False,
sample_inputs_func=sample_inputs_broadcast_to),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
assert_autodiffed=True),
TriangularOpInfo('cholesky_inverse',
op=torch.cholesky_inverse,
dtypes=floating_and_complex_types(),
# TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs
# with complex dtype.
test_complex_grad=False,
test_inplace_grad=False,
check_batched_gradgrad=False,
supports_tensor_out=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# These tests do not take into account custom op.get_op()
# TODO: implement op.input_func instead of modifying op.get_op()
# See https://github.com/pytorch/pytorch/issues/50837
SkipInfo('TestCommon', 'test_variant_consistency_jit'),
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=[torch.complex64, torch.complex128]),)),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
skip_bfloat16_grad=True,
handles_large_floats=False,
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
safe_casts_outputs=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.int8]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.float16]),
)),
OpInfo('diff',
op=torch.diff,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_diff,
test_inplace_grad=False),
OpInfo('div',
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_div,
assert_autodiffed=True),
OpInfo('div',
variant_test_name='true_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_div, rounding_mode='true'),
assert_autodiffed=True),
OpInfo('div',
variant_test_name='trunc_rounding',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_div, rounding_mode='trunc'),
assert_autodiffed=True),
OpInfo('div',
variant_test_name='floor_rounding',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_div, rounding_mode='floor'),
assert_autodiffed=True),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
safe_casts_outputs=True),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
sample_inputs_func=sample_inputs_diag,
test_inplace_grad=False),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
ref=np.fft.fft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
test_inplace_grad=False,),
SpectralFuncInfo('fft.fftn',
aten_name='fft_fftn',
ref=np.fft.fftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
test_inplace_grad=False,
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],),
SpectralFuncInfo('fft.hfft',
aten_name='fft_hfft',
ref=np.fft.hfft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
check_batched_gradgrad=False,
test_inplace_grad=False,),
SpectralFuncInfo('fft.rfft',
aten_name='fft_rfft',
ref=np.fft.rfft,
ndimensional=False,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
check_batched_grad=False,
check_batched_gradgrad=False,
test_inplace_grad=False,),
SpectralFuncInfo('fft.rfftn',
aten_name='fft_rfftn',
ref=np.fft.rfftn,
ndimensional=True,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
test_inplace_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.ifft',
aten_name='fft_ifft',
ref=np.fft.ifft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
test_inplace_grad=False,),
SpectralFuncInfo('fft.ifftn',
aten_name='fft_ifftn',
ref=np.fft.ifftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
test_inplace_grad=False,),
SpectralFuncInfo('fft.ihfft',
aten_name='fft_ihfft',
ref=np.fft.ihfft,
ndimensional=False,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
supports_tensor_out=True,
check_batched_grad=False,
test_inplace_grad=False,),
SpectralFuncInfo('fft.irfft',
aten_name='fft_irfft',
ref=np.fft.irfft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
check_batched_gradgrad=False,
test_inplace_grad=False,),
SpectralFuncInfo('fft.irfftn',
aten_name='fft_irfftn',
ref=np.fft.irfftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
supports_tensor_out=True,
check_batched_gradgrad=False,
test_inplace_grad=False,),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_flip,
test_inplace_grad=False,
supports_tensor_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
test_inplace_grad=False,
supports_tensor_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
test_inplace_grad=False,
supports_tensor_out=False),
OpInfo('linalg.norm',
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_norm,
aten_name='linalg_norm',
skips=(
# TODO: remove this once `pow` is implemented for float16
# and bfloat16 on CPU. Issue:
# https://github.com/pytorch/pytorch/issues/50789
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cpu',
dtypes=[torch.float16, torch.bfloat16]),
)),
OpInfo('linalg.slogdet',
aten_name='linalg_slogdet',
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_slogdet,
output_func=itemgetter(1),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, float('inf')),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
skip_bfloat16_grad=True,
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, float('inf')),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
skip_bfloat16_grad=True,
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
domain=(-1, float('inf')),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
safe_casts_outputs=True,
assert_autodiffed=True,
skip_bfloat16_grad=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, float('inf')),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
skip_bfloat16_grad=True,
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble]),
)),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_scatter,
skips=(
# _th_masked_fill_bool_ not supported for Complex Types.
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.complex128]),
SkipInfo('TestGradients', 'test_fn_gradgrad',
device_type='cuda', dtypes=[torch.complex128]),
SkipInfo('TestGradients', 'test_inplace_grad',
device_type='cuda', dtypes=[torch.complex128]),
SkipInfo('TestGradients', 'test_inplace_gradgrad',
device_type='cuda', dtypes=[torch.complex128]),
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.cfloat, torch.cdouble]),
),
supports_tensor_out=False),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_select,
test_inplace_grad=False,
supports_tensor_out=True),
UnaryUfuncInfo('neg',
ref=np.negative,
skip_bfloat16_grad=True,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
assert_autodiffed=True,),
UnaryUfuncInfo('round',
ref=np.round,
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
assert_autodiffed=True,),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
assert_autodiffed=True,
skip_bfloat16_grad=True,
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
skip_bfloat16_grad=True,
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
safe_casts_outputs=True,
assert_autodiffed=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/48641
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.int8]),
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.float16]),
)),
OpInfo('std',
dtypes=floating_types_and(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
supports_tensor_out=False,
test_complex_grad=False,
test_inplace_grad=False,
# std has only partial support for complex and half (#51127)
skips=(SkipInfo('TestOpInfo', 'test_unsupported_dtypes',
dtypes=[torch.half, torch.complex64, torch.complex128]),),
assert_autodiffed=True,
),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
assert_autodiffed=True,
skip_bfloat16_grad=True,
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.float64],
active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
skip_bfloat16_grad=True,
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
)),
OpInfo('tensor_split',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_tensor_out=False,
test_inplace_grad=False,
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
# CUDA gradchecks are slow and triangular solve backward is a composite operation
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),
UnaryUfuncInfo('exp2',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and(torch.bool, torch.half),
dtypesIfCPU=all_types_and(torch.bool, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True),
UnaryUfuncInfo('expm1',
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and(torch.bool, torch.half),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool),
dtypesIfCPU=None,
dtypesIfCUDA=None),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCPU=None,
dtypesIfCUDA=None,
assert_autodiffed=True,
skip_bfloat16_grad=True,
safe_casts_outputs=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, float('inf')),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
decorators=(precisionOverride({torch.half: 5e-2}),),
safe_casts_outputs=True,
assert_autodiffed=True,
handles_complex_extremals=False),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, float('inf')),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
skip_bfloat16_grad=True,
decorators=(precisionOverride({torch.bfloat16: 7e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.bfloat16])),
safe_casts_outputs=True,
handles_complex_extremals=False),
OpInfo('linalg.inv',
aten_name='linalg_inv',
op=torch.linalg.inv,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=True,
sample_inputs_func=sample_inputs_linalg_inv,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool),
dtypesIfROCM=all_types_and_complex_and(torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
supports_complex_to_float=True,
test_inplace_grad=False),
OpInfo('linalg.solve',
aten_name='linalg_solve',
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=True,
sample_inputs_func=sample_inputs_linalg_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_linalg_pinv,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
HermitianOpInfo('linalg.pinv',
variant_test_name='hermitian',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# These tests do not take into account custom op.get_op()
SkipInfo('TestCommon', 'test_variant_consistency_jit'),)
),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_svd,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
# gradgrad checks are slow
DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),
],
skips=(
# cuda gradchecks are very slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),
OpInfo('linalg.svd',
op=torch.linalg.svd,
aten_name='linalg_svd',
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_linalg_svd,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
# gradgrad checks are slow
DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),
],
skips=(
# cuda gradchecks are very slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_linalg_pinv,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
sample_inputs_func=sample_inputs_gather),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_index_fill),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
skips=(
# https://github.com/pytorch/pytorch/issues/49707
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=[torch.float16, torch.bfloat16]),
SkipInfo('TestCommon', 'test_variant_consistency_jit', dtypes=[torch.float16, torch.bfloat16]),
),
sample_inputs_func=sample_inputs_index_select),
OpInfo('stack',
# gradcheck expects the input arguments as a flat list
op=lambda *args, idx: torch.stack([*args], idx),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_stack),
OpInfo('hstack',
# gradcheck expects the input arguments as a flat list
op=lambda *args: torch.hstack([*args]),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_hstack_dstack_vstack),
OpInfo('vstack',
# gradcheck expects the input arguments as a flat list
op=lambda *args: torch.vstack([*args]),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_hstack_dstack_vstack),
OpInfo('dstack',
# gradcheck expects the input arguments as a flat list
op=lambda *args: torch.dstack([*args]),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_hstack_dstack_vstack),
OpInfo('movedim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_movedim_moveaxis),
OpInfo('moveaxis',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_movedim_moveaxis),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_tensor_out=False,
test_inplace_grad=False,
skips=(
# torch.repeat does not exist so we get a RuntimeError.
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_repeat_tile),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_tensor_out=False,
test_inplace_grad=False,
sample_inputs_func=sample_repeat_tile),
OpInfo('var',
dtypes=floating_types_and(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
supports_tensor_out=False,
test_complex_grad=False,
test_inplace_grad=False,
# var has only partial support for complex and half (#51127)
skips=(SkipInfo('TestOpInfo', 'test_unsupported_dtypes',
dtypes=[torch.half, torch.complex64, torch.complex128]),),
assert_autodiffed=True,
),
]
if TEST_SCIPY:
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
op_db_scipy_reference: List[OpInfo] = [
UnaryUfuncInfo('sigmoid',
ref=reference_sigmoid,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
# RuntimeError: sigmoid does not support automatic differentiation for outputs with complex dtype.
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.complex64, torch.complex128]),
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=[torch.complex64, torch.complex128]),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
test_complex_grad=False), # Reference: https://github.com/pytorch/pytorch/issues/48552
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma,
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
skips=(
# In some cases, output is NaN (for input close to
# negative integers) especially due to reduced precision
# in float16 and NaN's can't be tested for equality.
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.float16]),),
safe_casts_outputs=True),
UnaryUfuncInfo('erf',
ref=scipy.special.erf,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# RuntimeError: "pow" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.bfloat16]),),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
skips=(
# RuntimeError: "pow" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.bfloat16]),),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
active_if=LooseVersion(scipy.__version__) < "1.4.0"),
# RuntimeError: "pow" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.bfloat16]),
)
),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma,
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
# Backward of `lgamma` uses `digamma` but `digamma`
# is not implemented for `BFloat16`
# Error Raised:
# RuntimeError: "digamma" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
UnaryUfuncInfo('logit',
ref=scipy.special.logit,
domain=(0, 1),
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit),
OpInfo('xlogy',
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
test_inplace_grad=True,
supports_tensor_out=True,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_xlogy),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
test_inplace_grad=False,
supports_tensor_out=False,
sample_inputs_func=sample_inputs_trace)
]
op_db = op_db + op_db_scipy_reference
# Common operator groupings
unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]
spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse is True]
shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape, device=device).mul_(max_indices).floor_().long()
return index
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
def uniform_scalar(offset=0, requires_grad=False):
v = torch.rand(()) + offset
v.requires_grad = requires_grad
return v
def normal_scalar_clamp(amin, amax, requires_grad=False):
v = torch.randn(()).clamp(amin, amax)
v.requires_grad = requires_grad
return v
def prod_zeros(dim_size, dim_select):
assert len(dim_select) == 2
result = torch.randn(dim_size, dim_size, dim_size)
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
class dont_convert(tuple):
pass
class NoArgsClass(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration()
next = __next__ # Python 2 compatibility
def __len__(self):
return 0
NO_ARGS = NoArgsClass()
def ident(x):
return x
# Do NOT add to this list. Method tests are being DEPRECATED and replaced by OpInfos.
# See https://github.com/pytorch/pytorch/wiki/Writing-tests-in-PyTorch-1.8
#
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name (will be used at test name suffix), // optional
# (should_check_autodiff[bool], nonfusible_nodes, fusible_nodes) for autodiff, // optional
# indices for possible dim arg, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# kwargs // optional
# )
# Note: some functions have separate schema for (Tensor other) and (Scalar other),
# and it's possible that we only support AD for Scalar version but not Tensor
# version, and vice versa.
# When writing tests, only scalar(float/int) input triggers the Scalar schema.
# uniform_scalar produces a scalar **Tensor** which won't match Scalar input.
def method_tests():
set_rng_seed(0)
return [
('add', (S, S, S), ((S, S, S),), '', (True,)),
('add', (S, S, S), ((S, S),), 'broadcast_rhs', (True,)),
('add', (S, S), ((S, S, S),), 'broadcast_lhs', (True,)),
('add', (S, 1, S), ((M, S),), 'broadcast_all', (True,)),
('add', (), ((),), 'scalar', (True,)),
('add', (S, S, S), ((),), 'scalar_broadcast_rhs', (True,)),
('add', (), ((S, S, S),), 'scalar_broadcast_lhs', (True,)),
('add', (S, S, S), (3.14,), 'constant', (True,)),
('add', (), (3.14,), 'scalar_constant', (True,)),
('add', (S, S, S), (3.14j,), 'complex_scalar_constant', (True,)),
('__radd__', (S, S, S), (3.14,), 'constant', (True, 'aten::add')),
('__radd__', (), (3.14,), 'scalar_constant', (True, 'aten::add')),
('sub', (S, S, S), ((S, S, S),), '', (True,)),
('sub', (S, S, S), ((S, S),), 'broadcast_rhs', (True,)),
('sub', (S, S), ((S, S, S),), 'broadcast_lhs', (True,)),
('sub', (S, 1, S), ((M, S),), 'broadcast_all', (True,)),
('sub', (S, S, S), ((),), 'scalar_broadcast_rhs', (True,)),
('sub', (), ((S, S, S),), 'scalar_broadcast_lhs', (True,)),
('sub', (S, S, S), (3.14,), 'constant', (True,)),
('sub', (), (3.14,), 'scalar_constant', (True,)),
('sub', (S, S, S), (3.14j,), 'complex_scalar_constant', (True,)),
('__rsub__', (S, S, S), (3.14,), 'constant', (True, 'aten::rsub')),
('__rsub__', (), (3.14,), 'scalar_constant', (True, 'aten::rsub')),
('mul', (S, S, S), ((S, S, S),), '', (True,)),
('mul', (), ((),), 'scalar', (True,)),
('mul', (S, S, S), ((S, S),), 'broadcast_rhs', (True,)),
('mul', (S, S), ((S, S, S),), 'broadcast_lhs', (True,)),
('mul', (S, 1, S), ((M, S),), 'broadcast_all', (True,)),
('mul', (S, S, S), ((),), 'scalar_broadcast_rhs', (True,)),
('mul', (), ((S, S, S),), 'scalar_broadcast_lhs', (True,)),
('mul', (S, S, S), (3.14,), 'constant', (True,)),
('mul', (), (3.14,), 'scalar_constant', (True,)),
# TODO(@anjali411): enable these tests
# ('mul', (S, S, S), (3.14j,), 'imaginary_constant', (True,)),
# ('mul', (), (3.14j,), 'imaginary_scalar_constant', (True,)),
('__rmul__', (S, S, S), (3.14,), 'constant', (True, 'aten::mul')),
('__rmul__', (), (3.14,), 'scalar_constant', (True, 'aten::mul')),
('div', (S, S, S), (torch.rand(S, S, S) + 0.1,), '', (True,)),
('div', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs', (True,)),
('div', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),
('div', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all', (True,)),
('div', (), (uniform_scalar(0.1),), 'scalar', (True,)),
('div', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),
('div', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs', (True,)),
('div', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant', (True,)),
('div', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant', (True,)),
('true_divide', (S, S, S), (torch.rand(S, S, S) + 0.1,), '', (True,)),
('true_divide', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs', (True,)),
('true_divide', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),
('true_divide', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all', (True,)),
('true_divide', (), (uniform_scalar(0.1),), 'scalar', (True,)),
('true_divide', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),
('true_divide', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs', (True,)),
('true_divide', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant', (True,)),
('true_divide', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant', (True,)),
('__rdiv__', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant',
(True, [], ['aten::mul', 'aten::reciprocal'])),
('__rdiv__', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant',
(True, [], ['aten::mul', 'aten::reciprocal'])),
('__rdiv__', torch.rand(S, S, S, dtype=torch.cdouble) + 1e-1, (3.14j,), 'complex_constant',
(True, [], ['aten::mul', 'aten::reciprocal'])),
('__rdiv__', uniform_scalar(1e-1 * (1 + 1j), requires_grad=True), (3.14j,), 'complex_scalar_constant',
(True, [], ['aten::mul', 'aten::reciprocal'])),
('div', (S, S, S), (torch.rand(S, S, S, dtype=torch.cdouble) + 0.1,), 'complex', (True,)),
('div', (S, S, S), (torch.rand(S, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_rhs', (True,)),
('div', (S, S), (torch.rand(S, S, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_lhs', (True,)),
('div', (S, 1, S), (torch.rand(M, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_all', (True,)),
('div', (), (uniform_scalar(0.1j),), 'complex_scalar', (True,)),
('div', (S, S, S), (uniform_scalar(0.1j),), 'complex_scalar_broadcast_rhs', (True,)),
('div', (), (uniform_scalar(0.1j),), 'complex_scalar_broadcast_lhs', (True,)),
('div', torch.rand(S, S, S, dtype=torch.cdouble) + 1e-1, (3.14j,), 'complex_constant', (True,)),
('div', uniform_scalar(1e-1j, requires_grad=True), (3.14j,), 'complex_scalar_constant', (True,)),
('pow', torch.rand(S, S, S) + 1e-3, (torch.rand(S, S, S) + 0.1,), '', (True,)),
('pow', torch.rand(S, S, S) + 1e-3, (torch.rand(1,) + 0.1,), 'broadcast_rhs', (True,)),
('pow', torch.rand(1,) + 1e-3, (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),
('pow', torch.rand(S, 1, S) + 1e-3, (torch.rand(1, S, 1) + 0.1,), 'broadcast_all', (True,)),
('pow', uniform_scalar(1e-3, requires_grad=True), (uniform_scalar(0.1),), 'scalar', (True,)),
('pow', torch.rand(S, S, S) + 1e-3, (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),
('pow', uniform_scalar(1e-3, requires_grad=True), (torch.rand(S, S, S) + 0.1,), 'scalar_broadcast_lhs', (True,)),
('pow', torch.rand(S, S, S) + 1e-3, (3.14,), 'constant', (True,)),
('pow', torch.rand(S, S, S, dtype=torch.cdouble) + 1e-3 * (1 + 1j), (3.14,), 'complex_constant', (True,)),
('__rpow__', torch.rand(S, S, S) + 1e-3, (3.14,), 'constant', (True, 'aten::pow')),
('pow', uniform_scalar(1e-3, requires_grad=True), (3.14,), 'scalar_constant', (True,)),
('pow', uniform_scalar(1e-3 * (1 + 1j), requires_grad=True), (3.14,), 'complex_scalar_constant', (True,)),
('pow', uniform_scalar(1e-3 * (1 + 1j), requires_grad=True), (3.14j,), 'complex_imaginary_exponent', (True,)),
('__rpow__', uniform_scalar(1e-3, requires_grad=True), (3.14,), 'scalar_constant', (True, 'aten::pow')),
('float_power', torch.rand(S, S, S) + 1e-3, (torch.rand(S, S, S) + 0.1,), ''),
('float_power', torch.rand(S, S, S) + 1e-3, (torch.rand(1,) + 0.1,), 'broadcast_rhs'),
('float_power', torch.rand(1,) + 1e-3, (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs'),
('float_power', torch.rand(S, 1, S) + 1e-3, (torch.rand(1, S, 1) + 0.1,), 'broadcast_all'),
('float_power', uniform_scalar(1e-3, requires_grad=True), (uniform_scalar(0.1),), 'scalar'),
('float_power', torch.rand(S, S, S) + 1e-3, (uniform_scalar(0.1),), 'scalar_broadcast_rhs'),
('float_power', uniform_scalar(1e-3, requires_grad=True), (torch.rand(S, S, S) + 0.1,), 'scalar_broadcast_lhs'),
('float_power', torch.rand(S, S, S) + 1e-3, (3.14,), 'constant'),
('transpose', (1, 2, 3), (1, 2), 'dim', (False,), [0, 1]),
('transpose', (), (0, 0), 'scalar', (False,)),
('transpose', (1,), (0, 0), '1d', (False,)),
('transpose', (L, L), (0, 1), '2d', (False,)),
('transpose', (S, S, S), (2, 0), '3d', (False,)),
('swapdims', (1, 2, 3), (1, 2), 'dim', (False,), [0, 1]),
('swapdims', (), (0, 0), 'scalar', (False,)),
('swapdims', (1,), (0, 0), '1d', (False,)),
('swapdims', (L, L), (0, 1), '2d', (False,)),
('swapdims', (S, S, S), (2, 0), '3d', (False,)),
('swapaxes', (1, 2, 3), (1, 2), 'dim', (False,), [0, 1]),
('swapaxes', (), (0, 0), 'scalar', (False,)),
('swapaxes', (1,), (0, 0), '1d', (False,)),
('swapaxes', (L, L), (0, 1), '2d', (False,)),
('swapaxes', (S, S, S), (2, 0), '3d', (False,)),
('t', (1, 2), NO_ARGS, '', (False,)),
('view', (S, S, S), (S * S, S), '', (False,)),
('view', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)),
('view', (S,), (S,), '1d', (False,)),
('view', (), (dont_convert(()),), 'scalar_to_scalar', (False,)),
('view', (), (1,), 'scalar_to_1d', (False,)),
('ravel', (S, S, S), NO_ARGS, '', (False,)),
('reshape', (S, S, S), (S * S, S), '', (False,)),
('reshape', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)),
('reshape', (S,), (S,), '1d', (False,)),
('reshape', (), (dont_convert(()),), 'scalar_to_scalar', (False,)),
('reshape', (), (1,), 'scalar_to_1d', (False,)),
('reshape_as', (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),
('reshape_as', (), (non_differentiable(torch.tensor(42.)),), 'scalar'),
('reshape_as', (), (non_differentiable(torch.rand(1, 1)),), 'scalar_to_dims'),
('roll', (S, S, S), (0, 0), 'd0'),
('roll', (S, S, S), (1, 2), 'd12'),
('roll', (S, S, S), (0, 2,), 'd02'),
('roll', (S, S, S), (2, 0,), 'd20'),
('roll', (S, S, S), (-1, 0), 'neg_shift'),
('roll', (S, S, S), (10000, 1), 'loop_shift'),
('roll', (S, S, S), (2,), 'flattened'),
('roll', (S, S, S), ([1, 2, -1], [0, 1, 2]), 'three_dims'),
('rot90', (S, S, S), (1, [0, 1],), 'k1_d01'),
('rot90', (S, S, S), (1, [1, 2],), 'k1_d12'),
('rot90', (S, S, S), (1, [1, -1],), 'k1_neg_d'),
('rot90', (S, S, S), (), 'default'),
('view_as', (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),
('view_as', (), (non_differentiable(torch.tensor(5.5)),), 'scalar'),
('view_as', (), (non_differentiable(torch.rand(1, 1)),), 'scalar_to_dims'),
('expand', (S, 1, 1), (S, S, S), '', (False,)),
('expand', (torch.Size([S, 1, S]),), (S, S, S), 'size', (False,)),
('expand', (S, 1), (S, S, S), 'new_dim', (False,)),
('expand', (1,), (S, S, S), '1_element', (False,)),
('expand', (1, S), (1, 1, S), 'new_dim_front_old_front_1', (False,)),
('expand', (), (dont_convert(()),), 'scalar_to_scalar'),
('expand', (), (1, 3, 2), 'scalar_to_dims', (False,)),
('expand_as', (S, 1, 1), (torch.rand(S, S, S),), '', (False,)),
('conj', (S, S, S), NO_ARGS),
('copysign', (S, S, S), ((S, S, S),), '', (False,)),
('copysign', (S, S, S), ((S, S),), 'broadcast_rhs', (False,)),
('copysign', (S, S), ((S, S, S),), 'broadcast_lhs', (False,)),
('copysign', (S, 1, S), ((M, S),), 'broadcast_all', (False,)),
('copysign', (S, S), (3.14,), 'scalar', (False,)),
('copysign', (S, S), (0.0,), 'scalar_pos_zero', (False,)),
# TorchScript does not recognize -0.0: Issue #46848
# https://github.com/pytorch/pytorch/issues/46848
# ('copysign', (S, S), (-0.0,), 'scalar_neg_zero', (False,)),
('real', (S, S, S), NO_ARGS, 'complex'),
('imag', (S, S, S), NO_ARGS, 'complex'),
('view_as_real', (S, S, S), NO_ARGS, 'complex'),
('view_as_complex', (S, S, 2), NO_ARGS),
('complex', (S, S, S), ((S, S, S),), ''),
('clamp', (S, S, S), (0, 1), '', (True,)),
('clamp', (S, S, S), (None, 0.5), 'min', (True,)),
('clamp', (S, S, S), (0.5, None), 'max', (True,)),
('clamp', (), (0, 1), 'scalar', (True,)),
('clamp', (), (None, 0.5), 'min_scalar', (True,)),
('clamp', (), (0.5, None), 'max_scalar', (True,)),
('clamp', (S, S), (), 'max_scalar_kwarg', (True,), (), (), ident, {'max': 1}),
('atan2', (S, S, S), ((S, S, S),)),
('atan2', (), ((),), 'scalar'),
('atan2', (S, S, S), ((S,),), 'broadcast_rhs'),
('atan2', (S,), ((S, S, S),), 'broadcast_lhs'),
('atan2', (S, 1, S), ((S, S),), 'broadcast_all'),
('sign', (S, S, S), NO_ARGS),
('sign', (), NO_ARGS, 'scalar'),
('sgn', (S, S, S), NO_ARGS),
('sgn', (), NO_ARGS, 'scalar'),
('trunc', (S, S, S), NO_ARGS, '', (True,)),
('trunc', (), NO_ARGS, 'scalar', (True,)),
('rad2deg', (S, S, S), NO_ARGS),
('deg2rad', (S, S, S), NO_ARGS),
# Removing the 'rsqrt' entries leads to failure in
# test_index_fill_variable_dim_*
# TODO: Remove when fixed.
# Reference: https://github.com/pytorch/pytorch/issues/48230
('rsqrt', torch.rand(S, S, S) + 1e-2, NO_ARGS, '', (True,)),
('rsqrt', uniform_scalar(1e-2, requires_grad=True), NO_ARGS, 'scalar', (True,)),
('rsqrt', torch.rand(S, S, S, dtype=torch.cfloat) + 1e-2, NO_ARGS, 'complex', (True,)),
('rsqrt', uniform_scalar(1e-2 * (1 + 1j), requires_grad=True), NO_ARGS, 'complex_scalar', (True,)),
('frac', (S, S, S), NO_ARGS, '', (True,)),
('frac', (), NO_ARGS, 'scalar', (True,)),
('fmod', (S, S, S), (1.5,), '', (True,)),
('fmod', (), (1.5,), 'scalar', (True,)),
('fmod', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),
('fmod', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),
('fmod', (S, S, S), (non_differentiable(torch.rand(S) + 1.5),), 'tensor_broadcast_rhs'),
('fmod', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),
('fmod', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),
('fmod', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),
('fmod', (S, S, S), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor_broadcast_rhs'),
('hypot', (S, S), ((S, S),)),
('remainder', (S, S, S), (1.5,), '', (True,)),
('remainder', (), (1.5,), 'scalar', (True,)),
('remainder', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),
('remainder', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),
('remainder', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),
('remainder', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),
('remainder', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),
('lerp', (S, S, S), ((S, S, S), 0.4), 'scalar_no_broadcast', (True,)),
('lerp', (S, S, S), ((S,), 0.4), 'broadcast_rhs', (True,)),
('lerp', (S,), ((S, S, S), 0.4), 'broadcast_lhs', (True,)),
('lerp', (S, 1, S), ((S, S), 0.4), 'broadcast_all', (True,)),
('lerp', (), ((), 0.4), 'scalar', (True,)),
('lerp', (S, S, S), ((), 0.4), 'scalar_broadcast_rhs', (True,)),
('lerp', (), ((S, S, S), 0.4), 'scalar_broadcast_lhs', (True,)),
('max', (S, S, S), NO_ARGS),
('max', (S, S, S), (1,), 'dim', (), [0]),
('max', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('max', (), NO_ARGS, 'scalar'),
('max', (), (0,), 'scalar_dim', (), [0]),
('max', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('max', (S, S, S), ((S, S, S),), 'elementwise', (True,)),
('max', (S, S, S), ((S,),), 'elementwise_broadcast_rhs', (True,)),
('max', (S,), ((S, S, S),), 'elementwise_broadcast_lhs', (True,)),
('max', (S, 1, S), ((S, S),), 'elementwise_broadcast_all', (True,)),
('max', (), ((),), 'scalar_elementwise', (True,)),
('max', (S, S, S), ((),), 'scalar_elementwise_broadcast_rhs', (True,)),
('max', (), ((S, S, S),), 'scalar_elementwise_broadcast_lhs', (True,)),
('min', (S, S, S), NO_ARGS, ),
('min', (S, S, S), (1,), 'dim', (), [0]),
('min', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('min', (), NO_ARGS, 'scalar'),
('min', (), (0,), 'scalar_dim', (), [0]),
('min', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('min', (S, S, S), ((S, S, S),), 'elementwise', (True,)),
('min', (S, S, S), ((S,),), 'elementwise_broadcast_rhs', (True,)),
('min', (S,), ((S, S, S),), 'elementwise_broadcast_lhs', (True,)),
('min', (S, 1, S), ((S, S),), 'elementwise_broadcast_all', (True,)),
('min', (), ((),), 'scalar_elementwise', (True,)),
('min', (S, S, S), ((),), 'scalar_elementwise_broadcast_rhs', (True,)),
('min', (), ((S, S, S),), 'scalar_elementwise_broadcast_lhs', (True,)),
('amax', (S, S, S), NO_ARGS),
('amax', (S, S, S), (1,), 'dim'),
('amax', (S, S, S), ([1, 2],), 'multiple_dim'),
('amax', (S, S, S), (1, True,), 'keepdim_dim'),
('amax', (), NO_ARGS, 'scalar'),
('amax', (), (0,), 'scalar_dim'),
('amax', (), (0, True,), 'scalar_keepdim_dim'),
('amin', (S, S, S), NO_ARGS, ),
('amin', (S, S, S), (1,), 'dim',),
('amin', (S, S, S), ([1, 2],), 'multiple_dim'),
('amin', (S, S, S), (1, True,), 'keepdim_dim'),
('amin', (), NO_ARGS, 'scalar'),
('amin', (), (0,), 'scalar_dim'),
('amin', (), (0, True,), 'scalar_keepdim_dim'),
('mean', (S, S, S), NO_ARGS, '', (True,)),
('mean', (S, S, S), (1,), 'dim', (True,), [0]),
('mean', (S, S, S), (1, True,), 'keepdim_dim', (True,), [0]),
('mean', (), NO_ARGS, 'scalar', (True,)),
('mean', (), (0,), 'scalar_dim', (True,), [0]),
('mean', (), (0, True,), 'scalar_keepdim_dim', (True,), [0]),
('mean', (S, S, S), (), 'dtype', (True,), (), (), ident, {'dtype': torch.float64}),
('kthvalue', (S, S, S), (2,)),
('kthvalue', (S, S, S), (2, 1,), 'dim', (), [1]),
('kthvalue', (S, S, S), (2, 1,), 'dim_alert_nondeterministic', (), [1],
[expectedAlertNondeterministic('kthvalue CUDA', 'cuda')]),
('kthvalue', (S, S, S), (2, 1, True,), 'keepdim_dim', (), [1]),
('kthvalue', (S,), (2, 0,), 'dim_1d', (), [1]),
('kthvalue', (S,), (2, 0, True,), 'keepdim_dim_1d', (), [1]),
('kthvalue', (), (1,), 'scalar', (), ()),
('kthvalue', (), (1, 0,), 'scalar_dim', (), [1]),
('kthvalue', (), (1, 0, True), 'scalar_keepdim_dim', (), [1]),
('quantile', (S, S, S), (0.5,)),
('quantile', (S, S, S), (0.5, 0), 'dim', (), [1]),
('quantile', (S, S, S), (0.5, None, True), 'keepdim'),
('quantile', (S, S, S), (0.5, 0, True), 'keepdim_dim', (), [1]),
('quantile', (), (0.5,), 'scalar'),
('nanquantile', (S, S, S), (0.5,)),
('nanquantile', (S, S, S), (0.5, 0), 'dim', (), [1]),
('nanquantile', (S, S, S), (0.5, None, True), 'keepdim'),
('nanquantile', (S, S, S), (0.5, 0, True), 'keepdim_dim', (), [1]),
('nanquantile', (), (0.5,), 'scalar'),
('median', (S, S, S), NO_ARGS),
('median', (S, S, S), (1,), 'dim', (), [0]),
('median', (S, S, S), (1,), 'dim_alert_nondeterministic', (), [0],
[expectedAlertNondeterministic('median CUDA with indices output', 'cuda')]),
('median', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('median', (), NO_ARGS, 'scalar'),
('median', (), (0,), 'scalar_dim', (), [0]),
('median', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('nanmedian', (S, S, S), NO_ARGS),
('nanmedian', (S, S, S), (1,), 'dim', (), [0]),
('nanmedian', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('nanmedian', (), NO_ARGS, 'scalar'),
('nanmedian', (), (0,), 'scalar_dim', (), [0]),
('nanmedian', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('mode', (S, S, S), NO_ARGS),
('mode', (S, S, S), (1,), 'dim', (), [0]),
('mode', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('mode', (), NO_ARGS, 'scalar'),
('mode', (), (0,), 'scalar_dim', (), [0]),
('mode', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('sum', (S, S, S), NO_ARGS),
('sum', (S, S, S), (1,), 'dim', (), [0]),
('sum', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('sum', (), NO_ARGS, 'scalar'),
('sum', (), (0,), 'scalar_dim', (), [0]),
('sum', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('sum', (S, S, S), ([1, 2],), 'multi_dim'),
('sum', (S, S, S), ([1, 2], True,), 'multi_dim_keepdim'),
('nansum', (S, S, S), NO_ARGS),
('nansum', (S, S, S), (1,), 'dim', (), [0]),
('nansum', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('nansum', (), NO_ARGS, 'scalar'),
('nansum', (), (0,), 'scalar_dim', (), [0]),
('nansum', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('nansum', (S, S, S), ([1, 2],), 'multi_dim'),
('nansum', (S, S, S), ([1, 2], True,), 'multi_dim_keepdim'),
('prod', (S, S, S), NO_ARGS),
('prod', (S, S, S), (1,), 'dim', (), [0]),
('prod', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),
('prod', (), NO_ARGS, 'scalar'),
('prod', (), (0,), 'scalar_dim', (), [0]),
('prod', (), (0, True,), 'scalar_keepdim_dim', (), [0]),
('prod', prod_zeros(S, [0, 1]), NO_ARGS, 'zerodims2'),
('prod', prod_zeros(S, [0, 2]), NO_ARGS, 'zerodims1'),
('prod', prod_zeros(S, [1, 2]), NO_ARGS, 'zerodims0'),
('prod', prod_zeros(S, [0, 1]), (1,), 'zeros_dims2', (), [0]),
('prod', prod_zeros(S, [0, 2]), (1,), 'zeros_dims1', (), [0]),
('prod', prod_zeros(S, [1, 2]), (1,), 'zeros_dims0', (), [0]),
('prod', prod_zeros(S, [0, 1]), (1, True), 'keepdim_zeros_dims2', (), [0]),
('prod', prod_zeros(S, [0, 2]), (1, True), 'keepdim_zeros_dims1', (), [0]),
('prod', prod_zeros(S, [1, 2]), (1, True), 'keepdim_zeros_dims0', (), [0]),
('prod', prod_single_zero(S), NO_ARGS, 'single_zero'),
('prod', (torch.tensor(0., requires_grad=True)), NO_ARGS, 'scalar_zero'),
('prod', (torch.tensor(0., requires_grad=True)), (0,), 'scalar_dim_zero', (), [0]),
('prod', (torch.tensor(0., requires_grad=True)), (0, True,), 'scalar_keepdim_dim_zero', (), [0]),
('var_mean', (S, S, S), NO_ARGS, ''),
('var_mean', (S, S, S), (1,), 'dim', [0]),
('var_mean', (S, S, S), (1, True, True), 'keepdim_dim', [0]),
('var_mean', (S,), (0,), 'dim_1d', [0]),
('var_mean', (S,), (0, True, True), 'keepdim_dim_1d', [0]),
('std_mean', (S, S, S), NO_ARGS, ''),
('std_mean', (S, S, S), (1,), 'dim', [0]),
('std_mean', (S, S, S), (1, True, True), 'keepdim_dim', [0]),
('std_mean', (S,), (0,), 'dim_1d', [0]),
('std_mean', (S,), (0, True, True), 'keepdim_dim_1d', [0]),
('renorm', (S, S, S), (2, 1, 0.5), 'dim', (), [1]),
('renorm', (S, S, S), (1, 2, 3), 'norm_1'),
('renorm', (S, S, S), (inf, 2, 0.5), 'norm_inf'),
('logcumsumexp', (S, S, S), (0,), 'dim0', (), [0]),
('logcumsumexp', (S, S, S), (1,), 'dim1', (), [0]),
('logcumsumexp', (), (0,), 'dim0_scalar', (), [0]),
('cummax', (S, S, S), (0,), 'dim0', (), [0]),
('cummax', (S, S, S), (1,), 'dim1', (), [0]),
('cummax', (), (0,), 'dim0_scalar', (), [0]),
('cummin', (S, S, S), (0,), 'dim0', (), [0]),
('cummin', (S, S, S), (1,), 'dim1', (), [0]),
('cummin', (), (0,), 'dim0_scalar', (), [0]),
('cumsum', (S, S, S), (0,), 'dim0', (), [0]),
('cumsum', (S, S, S), (1,), 'dim1', (), [0]),
('cumsum', (S, S, S), (1,), 'dim1_cast', (), [0], (), ident, {'dtype': torch.float64}),
('cumsum', (), (0,), 'dim0_scalar', (), [0]),
('cumprod', (S, S, S), (0,)),
('cumprod', (S, S, S), (1,), 'dim1', (), [0]),
('cumprod', (), (0,), 'scalar'),
('cumprod', (torch.tensor(0., requires_grad=True)), (0,), 'scalar_zeros'),
('cumprod', prod_zeros(S, [0, 1]), (1,), 'zeros_dim2', (), [0]),
('cumprod', prod_zeros(S, [0, 2]), (1,), 'zeros_dim1', (), [0]),
('cumprod', prod_zeros(S, [1, 2]), (1,), 'zeros_dim0', (), [0]),
('cumprod', prod_zeros(S, [1, 2]), (1,), 'zeros_dim0_cast', (), [0], (), ident, {'dtype': torch.float64}),
('log_softmax', (S, S, S), (1, torch.float64,), 'kwarg_dtype_would_break_jit_loader', (True,)),
('unfold', (), (0, 1, 1), 'scalar', (), [0]),
('unfold', (S, S, S, S), (0, 3, 1), '4d_dim0_step1', (), [0]),
('unfold', (S, S, S, S), (1, 3, 1), '4d_dim1_step1', (), [0]),
('unfold', (S, S, S, S), (2, 3, 1), '4d_dim2_step1', (), [0]),
('unfold', (S, S, S, S), (3, 3, 1), '4d_dim3_step1', (), [0]),
('unfold', (S, S, S, S), (0, 3, 2), '4d_dim0_step2', (), [0]),
('unfold', (S, S, S, S), (1, 3, 2), '4d_dim1_step2', (), [0]),
('unfold', (S, S, S, S), (2, 3, 2), '4d_dim2_step2', (), [0]),
('unfold', (S, S, S, S), (3, 3, 2), '4d_dim3_step2', (), [0]),
('unfold', (S, S, S, S), (0, 4, 1), '4d_dim0_size4', (), [0]),
('unfold', (S, S, S, S), (1, 4, 1), '4d_dim1_size4', (), [0]),
('unfold', (S, S, S, S), (2, 4, 1), '4d_dim2_size4', (), [0]),
('unfold', (S, S, S, S), (3, 4, 1), '4d_dim3_size4', (), [0]),
('unfold', (M,), (0, 3, 1), '1d_step1', (), [0]),
('unfold', (M,), (0, 3, 2), '1d_step2', (), [0]),
('unfold', (M,), (0, 3, 3), '1d_step3', (), [0]),
('unfold', (1000,), (0, 3, 11), '1d_step_gt_size', (), [0]),
('unfold', (1000,), (0, 2, 27), '1d_step_gt_size2', (), [0]),
('unfold', (10, 10), (0, 1, 2), '2d_step_gt_size', (), [0]),
('unfold', (10, 10), (1, 2, 3), '2d_step_gt_size2', (), [0]),
('unfold', (10, 10), (1, 2, 2), '2d_step_ge_size2', (), [0]),
('unfold', (S, S, S), (2, 3, 2), 'lastdim', (), [0]),
('addmm', (S, M), ((S, S), (S, M)), '', (True, ['aten::add', 'aten::mm'])),
('addmm', (1,), ((S, S), (S, M)), 'broadcast_lhs', (True, ['aten::add', 'aten::mm'])),
('addmm', (S, M), ((S, S), (S, M)), 'coef', (True,), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmm', (1,), ((S, S), (S, M)), 'broadcast_lhs_coef', (True,), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmm', (), ((S, S), (S, M)), 'scalar_broadcast_lhs', (True, ['aten::add', 'aten::mm'])),
('addmm', (), ((S, S), (S, M)), 'scalar_broadcast_lhs_coef', (True,), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (S, M), ((S, S, S), (S, S, M)),),
('addbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs'),
('addbmm', (S, M), ((S, S, S), (S, S, M)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs_coef', (),
(), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs'),
('addbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs_coef', (), (), (), ident,
{'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (S, S, M), ((S, S, S), (S, S, M)),),
('baddbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs'),
('baddbmm', (S, S, M), ((S, S, S), (S, S, M)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs_coef', (),
(), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs'),
('baddbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs_coef', (), (), (), ident,
{'beta': 0.2, 'alpha': 0.6}),
('addmv', (S,), ((S, M), (M,)),),
('addmv', (1,), ((S, M), (M,)), 'broadcast_lhs'),
('addmv', (S,), ((S, M), (M,)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmv', (1,), ((S, M), (M,)), 'broadcast_lhs_coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmv', (), ((S, M), (M,)), 'scalar_broadcast_lhs'),
('addmv', (), ((S, M), (M,)), 'scalar_broadcast_lhs_coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('dot', (L,), ((L,),), '', (True,)),
('vdot', (L,), ((L,),),),
('mm', (S, M), ((M, S),), '', (True,)),
('bmm', (M, S, M), ((M, M, S),), '', (True,)),
('mv', (S, M), ((M,),), '', (True,)),
('ger', (S,), ((M,),)),
('inner', (S,), ((S,),), "1d_1d", (False,)),
('inner', (), ((S, S),), "scalar_2d", (False,)),
('matmul', (L,), ((L,),), '', (True,)),
('matmul', (S, M), ((M,),), "2d_1d", (True,)),
('matmul', (M,), ((M, S),), "1d_2d", (True,)),
('matmul', (S, M), ((M, S),), "2d_2d", (True,)),
('matmul', (S, S, M), ((M,),), "3d_1d", (True,)),
('matmul', (S, S, M), ((M, S),), "3d_2d", (True,)),
('matmul', (M,), ((S, M, S),), "1d_3d", (True,)),
('matmul', (S, M), ((S, M, S),), "2d_3d", (True,)),
('matmul', (S, S, M, M), ((S, S, M, S),), "4d_4d", (True,)),
('matmul', (S, S, M, M), ((M,),), "4d_1d", (True,)),
('matmul', (M,), ((S, S, M, S),), "1d_4d", (True,)),
('matrix_power', (S, S), [2], "n=2"),
('matrix_power', (S, S, S), [3], "n=3"),
('matrix_power', (S, S, S), [1], "n=1"),
('matrix_power', (S, S, S), [0], "n=0"),
('matrix_power', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S), [-1], "n=-1", (),
NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('matrix_power', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S), [-3], "n=-3", (),
NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('matrix_power', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S, S), [-2], "n=-2", (),
NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('matrix_exp', (S, S), NO_ARGS, "single_matrix"),
('matrix_exp', (S, S, S), NO_ARGS, "batch_of_matrices"),
('mvlgamma', torch.empty(S,).uniform_(0.5, 1), [1], "p=1"),
('mvlgamma', torch.empty(S,).uniform_(1, 2), [2], "p=2"),
('mvlgamma', torch.empty(S, S).uniform_(1.5, 3), [3], "p=3"),
('mvlgamma', torch.empty(S, S).uniform_(2.5, 5), [5], "p=5"),
('addcmul', (S, S), ((S, S), (S, S)), '', (True,)),
('addcmul', (S, S), ((S, 1), (1, S)), 'broadcast_rhs', (True,)),
('addcmul', (1,), ((S, S, 1), (1, S)), 'broadcast_all', (True,)),
('addcmul', (S, S), ((S, S), (S, S)), 'scale', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (S, S), ((S, 1), (1, S)), 'scale_broadcast_rhs', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (1,), ((S, S, 1), (1, S)), 'scale_broadcast_all', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (), ((), ()), 'scalar', (True,)),
('addcmul', (S, S), ((), ()), 'scalar_broadcast_rhs', (True,)),
('addcmul', (), ((S, S, 1), (1, S)), 'scalar_broadcast_lhs', (True,)),
('addcmul', (), ((), ()), 'scalar_scale', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (S, S), ((), ()), 'scalar_scale_broadcast_rhs', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (), ((S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs', (True,), (), (), ident, {'value': 0.5}),
('addcdiv', (S, S), ((S, S), (S, S))),
('addcdiv', (S, S), ((S, 1), (1, S)), 'broadcast_rhs'),
('addcdiv', (1,), ((S, S, 1), (1, S)), 'broadcast_all'),
('addcdiv', (S, S), ((S, S), (S, S)), 'scale', (), (), (), ident, {'value': 0.5}),
('addcdiv', (S, S), ((S, 1), (1, S)), 'scale_broadcast_rhs', (), (), (), ident, {'value': 0.5}),
('addcdiv', (1,), ((S, S, 1), (1, S)), 'scale_broadcast_all', (), (), (), ident, {'value': 0.5}),
('addcdiv', (), ((), ()), 'scalar'),
('addcdiv', (S, S), ((), ()), 'scalar_broadcast_rhs'),
('addcdiv', (), ((S, S, 1), (1, S)), 'scalar_broadcast_lhs'),
('addcdiv', (), ((), ()), 'scalar_scale', (), (), (), ident, {'value': 0.5}),
('addcdiv', (S, S), ((), ()), 'scalar_scale_broadcast_rhs', (), (), (), ident, {'value': 0.5}),
('addcdiv', (), ((S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs', (), (), (), ident, {'value': 0.5}),
('zero_', (S, S, S), NO_ARGS),
('zero_', (), NO_ARGS, 'scalar'),
('logaddexp', (S, S), ((S, S),)),
('logaddexp2', (S, S), ((S, S),)),
('logsumexp', (S, S), (1,), '', (True,)),
('logsumexp', (), (0,), 'scalar', (True,)),
('norm', (S, S), (), 'default'),
('norm', (S, S), (2,), '2'),
('norm', (S, S), (0,), '0'),
('norm', (S, S), (0.5,), '0_5'),
('norm', (S, S), (1,), '1'),
('norm', (S, S), (3,), '3'),
('norm', (S, S), (inf,), 'inf'),
('norm', (S, S), (-inf,), '-inf'),
('norm', (S, S), ('fro',), 'fro_default'),
('norm', (S, S), ('fro', [0, 1],), 'fro'),
('norm', (S, S), ('nuc',), 'nuc', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('norm', (S, S, S), ('nuc', [1, 2]), 'nuc_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('norm', (S, S), (-1,), 'neg_1'),
('norm', (S, S), (-2,), 'neg_2'),
('norm', (S, S), (-0.5,), 'neg_0_5'),
('norm', (S, S), (-1.5,), 'neg_1_5'),
('norm', (S, S), (-2, 1,), 'neg_2_2_dim', (), [1]),
('norm', (S, S), (-1, 1,), 'neg_1_2_dim', (), [1]),
('norm', (S, S), (0, 1,), '0_2_dim', (), [1]),
('norm', (S, S), (1, 1,), '1_2_dim', (), [1]),
('norm', (S, S), (2, 1,), '2_2_dim', (), [1]),
('norm', (S, S), (3, 1,), '3_2_dim', (), [1]),
('norm', (S, S), (inf, 1,), 'inf_2_dim'),
('norm', torch.rand(S, S, S) + 5e-2, (1.5,), '1_5_default'),
('norm', (S, S, S), (2, 1), '2_dim', (), [1]),
('norm', (S, S, S), (3, 1), '3_dim', (), [1]),
('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1), '1_5_dim', (), [1]),
('norm', (S, S, S), (2, 1, True), 'keepdim_2_dim', (), [1]),
('norm', (S, S, S), (3, 1, True), 'keepdim_3_dim', (), [1]),
('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1, True), 'keepdim_1_5_dim', (), [1]),
('norm', (), (2, 0), '2_dim_scalar', (), [1]),
('norm', (), (3, 0), '3_dim_scalar', (), [1]),
('norm', (), (2, 0, True), 'keepdim_2_dim_scalar', (), [1]),
('norm', (), (3, 0, True), 'keepdim_3_dim_scalar', (), [1]),
('clone', (S, M, S), NO_ARGS),
('clone', (), NO_ARGS, 'scalar'),
('contiguous', (S, S), NO_ARGS, '', (True,)),
('contiguous', torch.randn(S, S).transpose(0, 1), NO_ARGS, 'not_contiguous', (True,)),
('dist', (S, S, S), ((S, S, S),)),
('dist', (S, S, S), ((S,),), 'broadcast_rhs'),
('dist', (S,), ((S, S, S),), 'broadcast_lhs'),
('dist', (S, 1, S), ((S, S),), 'broadcast_all'),
('dist', (), ((),), 'scalar'),
('dist', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('dist', (), ((S, S, S),), 'scalar_broadcast_lhs'),
('dist', (S, S, S), ((S, S, S), 4), '4'),
('dist', (S, S, S), ((S,), 4), '4_broadcast_rhs'),
('dist', (S,), ((S, S, S), 4), '4_broadcast_lhs'),
('dist', (S, 1, S), ((S, S), 4), '4_broadcast_all'),
('dist', (), ((), 4), 'scalar_4'),
('dist', (S, S, S), ((), 4), 'scalar_4_broadcast_rhs'),
('dist', (), ((S, S, S), 4), 'scalar_4_broadcast_lhs'),
('diag_embed', (S, S), NO_ARGS),
('diagonal', (M, M), NO_ARGS, '2d'),
('diagonal', (3, 5), NO_ARGS, '2d_wide'),
('diagonal', (3, 5), (2,), '2d_wide_pos'),
('diagonal', (3, 5), (-2,), '2d_wide_neg'),
('diagonal', (5, 3), NO_ARGS, '2d_tall'),
('diagonal', (5, 3), (2,), '2d_tall_pos'),
('diagonal', (5, 3), (-2,), '2d_tall_neg'),
('diagonal', (M, M), (1,), '2d_1'),
('diagonal', (M, M), (2,), '2d_2'),
('diagonal', (M, M, M), (1, 1, 2), '3d_1'),
('diagonal', (M, M, M), (2, 0, 1), '3d_2'),
('diagonal', (M, M, M), (-2, 0, 1), '3d_3'),
('tril', (M, M), NO_ARGS),
('tril', (M, M), (2,), 'idx'),
('tril', (S, M, M), NO_ARGS, 'batched'),
('tril', (S, M, M), (2,), 'batched_idx'),
('tril', (3, 3, S, S), NO_ARGS, 'more_batched'),
('triu', (M, M), NO_ARGS),
('triu', (M, M), (2,), 'idx'),
('triu', (S, M, M), NO_ARGS, 'batched'),
('triu', (S, M, M), (2,), 'batched_idx'),
('triu', (3, 3, S, S), NO_ARGS, 'more_batched'),
('cross', (S, 3), ((S, 3),)),
('cross', (S, 3, S), ((S, 3, S), 1), 'dim'),
('index_add', (S, S), (0, index_variable(2, S), (2, S)), 'dim', (), [0]),
('index_add', (), (0, torch.tensor([0], dtype=torch.int64), (1,)), 'scalar_input_dim', (), [0]),
('index_add', (), (0, torch.tensor(0, dtype=torch.int64), ()), 'scalar_all_dim', (), [0]),
('index_add', (S, S), (0, index_variable(2, S), (2, S)), 'alert_nondeterministic', (), [0],
[expectedAlertNondeterministic('index_add_cuda_', 'cuda')]),
('index_copy', (S, S), (0, index_perm_variable(2, S), (2, S)), 'dim', (), [0]),
('index_copy', (S, S), (0, index_perm_variable(2, S), (2, S)), 'dim_alert_nondeterministic', (), [0],
[expectedAlertNondeterministic('index_copy')]),
('index_copy', (), (0, torch.tensor([0], dtype=torch.int64), (1,)), 'scalar_input_dim', (), [0]),
('index_copy', (), (0, torch.tensor(0, dtype=torch.int64), ()), 'scalar_all_dim', (), [0]),
('index_fill', (S, S), (0, index_variable(2, S), 2), 'dim', (), [0]),
('index_fill', (S, S), (0, index_variable(2, S), ()), 'variable_dim', (), [0]),
('index_fill', (S, S), (0, torch.tensor(0, dtype=torch.int64), 2), 'scalar_index_dim', (), [0]),
('index_fill', (), (0, torch.tensor([0], dtype=torch.int64), 2), 'scalar_input_dim', (), [0]),
('index_fill', (), (0, torch.tensor(0, dtype=torch.int64), 2), 'scalar_both_dim', (), [0]),
('inverse', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('inverse', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S, 2, 3, dtype=dtype).to(device),
NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', (S, S), NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', (1, 1), NO_ARGS, '1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_symmetric_matrix(S), NO_ARGS, 'symmetric', (),
NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_symmetric_psd_matrix(S),
NO_ARGS, 'symmetric_psd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_symmetric_pd_matrix(S),
NO_ARGS, 'symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_square_matrix_of_rank(S, S - 2),
NO_ARGS, 'dim2_null', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_square_matrix_of_rank(S, 1), NO_ARGS, 'rank1', (),
NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_square_matrix_of_rank(S, 2), NO_ARGS, 'rank2', (),
NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS,
'distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', (3, 3, S, S), NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', (3, 3, 1, 1), NO_ARGS, 'batched_1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_symmetric_matrix(S, 3),
NO_ARGS, 'batched_symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_symmetric_psd_matrix(S, 3),
NO_ARGS, 'batched_symmetric_psd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_symmetric_pd_matrix(S, 3),
NO_ARGS, 'batched_symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('det', lambda dtype, device: random_fullrank_matrix_distinct_singular_value(S, 3, 3), NO_ARGS,
'batched_distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
# For `logdet` the function at det=0 is not smooth.
# We need to exclude tests with det=0 (e.g. dim2_null, rank1, rank2) and use
# `make_nonzero_det` to make the random matrices have nonzero det. For
# `logdet`, we also set `make_nonzero_det(matrix, sign=1)` to make the
# matrix have positive det.
('logdet', lambda dtype, device: make_nonzero_det(torch.randn(S, S), 1),
NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(torch.randn(1, 1), 1),
NO_ARGS, '1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(random_symmetric_matrix(S), 1), NO_ARGS,
'symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(random_symmetric_pd_matrix(S), 1), NO_ARGS,
'symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(random_fullrank_matrix_distinct_singular_value(S), 1, 0), NO_ARGS,
'distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(torch.randn(3, 3, S, S), 1),
NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(torch.randn(3, 3, 1, 1), 1),
NO_ARGS, 'batched_1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(random_symmetric_matrix(S, 3), 1), NO_ARGS,
'batched_symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(random_symmetric_pd_matrix(S, 3), 1), NO_ARGS,
'batched_symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('logdet', lambda dtype, device: make_nonzero_det(random_fullrank_matrix_distinct_singular_value(S, 3), 1, 0), NO_ARGS,
'batched_distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (S, S), (False,), 'square_single', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (S, S - 2), (True,), 'tall_single' , (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (S - 2, S), (False,), 'wide_single' , (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (3, S, S), (False,), 'square_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (3, S, S - 2), (True,), 'tall_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (3, S - 2, S), (True,), 'wide_batched' , (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (3, 2, S, S), (False,), 'square_many_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (3, 2, S, S - 2), (True,), 'tall_many_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('qr', (3, 2, S - 2, S), (True,), 'wide_many_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('lu', (S, S), (True, False), 'square_single_no_info', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('lu', (S, S), (True, True), 'square_single_with_info', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('lu', (3, S, S), (True, False), 'square_batch_no_info', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('lu', (3, S, S), (True, True), 'square_batch_with_info', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('lu', (3, 3, S, S), (True, False), 'square_many_batches_no_info', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('lu', (3, 3, S, S), (True, True), 'square_many_batches_with_info', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('solve', (S, S), (lambda dtype, device: random_fullrank_matrix_distinct_singular_value(
S, silent=True, dtype=dtype, device=device),), '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('solve', (S, S, S),
(lambda dtype, device:
random_fullrank_matrix_distinct_singular_value(S, S, silent=True, dtype=dtype, device=device),),
'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('solve', (2, 3, S, S),
(lambda dtype, device:
random_fullrank_matrix_distinct_singular_value(S, 2, 3, silent=True, dtype=dtype, device=device),),
'batched_dims', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('solve', (2, 2, S, S),
(lambda dtype, device:
random_fullrank_matrix_distinct_singular_value(S, 1, silent=True, dtype=dtype, device=device),),
'batched_broadcast_A', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('solve', (1, S, S),
(lambda dtype, device:
random_fullrank_matrix_distinct_singular_value(S, 2, 2, silent=True, dtype=dtype, device=device),),
'batched_broadcast_b', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
('fill_', (S, S, S), (1,), 'number'),
('fill_', (), (1,), 'number_scalar'),
('fill_', (S, S, S), ((),), 'variable'),
('eq_', (S, S, S), ((S, S, S),)),
('eq_', (S, S, S), ((1,),), 'broadcast_rhs'),
('eq_', (), ((),), 'scalar'),
('eq_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('ne_', (S, S, S), ((S, S, S),)),
('ne_', (S, S, S), ((1,),), 'broadcast_rhs'),
('ne_', (), ((),), 'scalar'),
('ne_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('gt_', (S, S, S), ((S, S, S),)),
('gt_', (S, S, S), ((1,),), 'broadcast_rhs'),
('gt_', (), ((),), 'scalar'),
('gt_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('ge_', (S, S, S), ((S, S, S),)),
('ge_', (S, S, S), ((1,),), 'broadcast_rhs'),
('ge_', (), ((),), 'scalar'),
('ge_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('lt_', (S, S, S), ((S, S, S),)),
('lt_', (S, S, S), ((1,),), 'broadcast_rhs'),
('lt_', (), ((),), 'scalar'),
('lt_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('le_', (S, S, S), ((S, S, S),)),
('le_', (S, S, S), ((1,),), 'broadcast_rhs'),
('le_', (), ((),), 'scalar'),
('le_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('eq_', (S, S, S), (0,), 'pyscalar'),
('ne_', (S, S, S), (0,), 'pyscalar'),
('gt_', (S, S, S), (0,), 'pyscalar'),
('ge_', (S, S, S), (0,), 'pyscalar'),
('le_', (S, S, S), (0,), 'pyscalar'),
('lt_', (), (0,), 'pyscalar'),
('eq_', (), (0,), 'pyscalar_scalar'),
('ne_', (), (0,), 'pyscalar_scalar'),
('gt_', (), (0,), 'pyscalar_scalar'),
('ge_', (), (0,), 'pyscalar_scalar'),
('lt_', (), (0,), 'pyscalar_scalar'),
('le_', (), (0,), 'pyscalar_scalar'),
('permute', (1, 2, 3, 4), (0, 2, 3, 1), '', (True,)),
('permute', (1, 2, 3, 4), (0, -2, -1, 1), 'neg_dim', (True,)),
('permute', (), (dont_convert(()),), 'scalar', (True,)),
('select', (S, S, S), (1, 2), 'dim', (), [0]),
('select', (S, S, S), (1, -1), 'wrap_dim', (), [0]),
('select', (S,), (0, 2), '1d'),
('narrow', (S, S, S), (1, 2, 2), 'dim', (), [0]),
('narrow', (S, S, S), (1, 0, 0), 'empty_dim', (), [0]),
('squeeze', (S, 1, S, 1), NO_ARGS, '', (True,)),
('squeeze', (1, 1, 1, 1), NO_ARGS, 'input_sizes_are_ones', (True,)),
('squeeze', (S, 1, S, 1), (1,), '1_dim', (True,), [0]),
('squeeze', (S, 1, S, 1), (2,), 'not_1_dim', (True,), [0]),
('squeeze', (), (0,), 'scalar', (True,), [0]),
('unsqueeze', (S, S, S), (0,), 'first', (True,), [0]),
('unsqueeze', (S, S, S), (1,), 'middle', (True,), [0]),
('unsqueeze', (S, S, S), (3,), 'last', (True,), [0]),
('unsqueeze', (), (0,), 'scalar', (True,), [0]),
('chunk', (S, S, S), (2,), '', (True, 'prim::ConstantChunk')),
('chunk', (S, S, S), (S, 1), 'dim', (True, 'prim::ConstantChunk'), [1]),
('split', (S, S, S), (2,), '', (True,)),
('split', (S, S, S), (S, 1), 'dim', (True,), [1]),
('split', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'size_list',
(True, 'aten::split_with_sizes')),
('split', (S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2), 'size_list_dim',
(True, 'aten::split_with_sizes'), [1]),
('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), '', (True,)),
('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3), 0],), 'size_0', (True, )),
('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'dim', (True, ), [1]),
('tensor_split', (S, S, S), (3,), 'sections', (False,)),
('tensor_split', (S, S, S), (3, 1), 'sections_dim', (False,), [1]),
('tensor_split', (S, S, S), ([2, 4],), 'indices', (False,)),
('tensor_split', (S, S, S), ([2, 4], 1), 'indices_dim', (False,), [1]),
('scatter', (M, S), (0, gather_variable((S, S), 1, M), (S, S)), 'dim0', (), [0]),
('scatter', (M, S), (1, gather_variable((M, S // 2), 0, S), (M, S // 2)), 'dim1', (), [0]),
('scatter', (), (0, torch.tensor(0, dtype=torch.int64), ()), 'scalartensor_all_dim0', (), [0]),
('scatter', (), (0, torch.tensor(0, dtype=torch.int64), 2.5), 'scalar_all_dim0', (), [0]),
('scatter_add', (M, S), (0, gather_variable((S, S), 1, M), (S, S)), 'dim0', (), [0]),
('scatter_add', (M, S), (1, gather_variable((M, S // 2), 0, S), (M, S // 2)), 'dim1', (), [0]),
('scatter_add', (), (0, torch.tensor(0, dtype=torch.int64), ()), 'scalar_all_dim0', (), [0]),
('scatter_add', (M, S), (0, gather_variable((S, S), 1, M), (S, S)), 'alert_nondeterministic', (), [0],
[expectedAlertNondeterministic('scatter_add_cuda_kernel', 'cuda')]),
('masked_fill', (M, M), (torch.BoolTensor(M, M).bernoulli_(), 10)),
('masked_fill', (M, M), (torch.BoolTensor(M, M).bernoulli_(), ()), 'tensor'),
('masked_fill', (M,), (torch.BoolTensor(M, M).bernoulli_(), 10), 'broadcast_lhs'),
('masked_fill', (M, M), (torch.BoolTensor(M,).bernoulli_(), 10), 'broadcast_rhs'),
('masked_fill', (), (torch.tensor(0, dtype=torch.bool).bernoulli_(), 10), 'scalar'),
('masked_fill', (), (torch.tensor(0, dtype=torch.bool).bernoulli_(), ()),
'scalar_variable'),
('masked_fill', (M, M), (torch.tensor(0, dtype=torch.bool).bernoulli_(), 10),
'scalar_broadcast_rhs'),
('masked_scatter', (M,), (torch.BoolTensor(M, M).bernoulli_(), (M, M)),
'broadcast_lhs'),
('maximum', (S, S), ((S, S),)),
('minimum', (S, S), ((S, S),)),
('fmax', (S, S), ((S, S),)),
('fmin', (S, S), ((S, S),)),
('resize_', (S, S, S), (torch.Size([S * S, S])), 'fewer_dims'),
('resize_', (), (dont_convert(()),), 'scalar'),
('resize_', (), (torch.Size([1, 1, 1])), 'scalar_to_dims'),
('resize_as_', (), (non_differentiable(torch.tensor(5.)),), 'scalar'),
('resize_as_', (), (non_differentiable(torch.randn((1, 1, 1))),), 'scalar_to_dims'),
('resize_as_', (S, S, S), (non_differentiable(torch.randn(S * S, S)),)),
('sort', (S, M, S), NO_ARGS),
('sort', (S, M, S), (1,), 'dim'),
('sort', (S, M, S), (1, True), 'dim_desc'),
('sort', (), NO_ARGS, 'scalar'),
('sort', (), (0,), 'dim_scalar'),
('sort', (), (0, True), 'dim_desc_scalar'),
('msort', (S, M, S), NO_ARGS),
('topk', (S, M, S), (3,)),
('topk', (S, M, S), (3, 1), 'dim', (), [1]),
('topk', (S, M, S), (3, 1, True), 'dim_desc', (), [1]),
('topk', (S, M, S), (3, 1, True, True), 'dim_desc_sort', (), [1]),
('topk', (), (1,), 'scalar'),
('topk', (), (1, 0), 'dim_scalar', (), [1]),
('topk', (), (1, 0, True), 'dim_desc_scalar', (), [1]),
('topk', (), (1, 0, True, True), 'dim_desc_sort_scalar', (), [1]),
('take', (S, S, S), (torch.LongTensor([[-3, 2], [20, 2]]),)),
('take', (S, S, S), (torch.tensor(0, dtype=torch.int64),), 'scalar_index'),
('take', (), (torch.LongTensor([0]),), 'scalar_data'),
('take', (), (torch.tensor(0, dtype=torch.int64),), 'scalar_both'),
('where', (M, M), (mask_not_all_zeros((M, M)), (M, M)), '', (True,)),
('where', (M, 1, M), (mask_not_all_zeros((M, M)), (M, M, 1)), 'broadcast_all', (True,)),
('where', (), (bernoulli_scalar(), ()), 'scalar', (True,)),
('where', (M, 1, M), (bernoulli_scalar(), (M, M, 1)), 'scalar_broadcast_mask', (True,)),
('where', (), (mask_not_all_zeros((M, M)), ()), 'scalar_broadcast_non_mask', (True,)),
('__getitem__', torch.randn(S, S, S), (dont_convert([1, 2]),)),
('__getitem__', torch.randn(S, S, S), (slice(0, 3),), 'slice'),
('__getitem__', torch.randn(S, S, S), (dont_convert([slice(0, 3), 1]),), 'slice_index'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 2, 3], [1, 3, 3], [0, 0, 2]]),), 'adv_index'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 0, 3], [1, 1, 3], [0, 0, 2]]),), 'adv_index_dup'),
('__getitem__', torch.randn(S, S, S), (dont_convert([slice(None), slice(None), [0, 3]]),), 'adv_index_end'),
('__getitem__', torch.randn(S, S, S), (dont_convert([slice(None), [0, 3], slice(None)]),), 'adv_index_mid'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], slice(None), slice(None)]),), 'adv_index_beg'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], [1, 2], slice(None)]),), 'adv_index_comb'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], ]),), 'adv_index_sub'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], slice(None)]),), 'adv_index_sub_2'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], Ellipsis]),), 'adv_index_sub_3'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 2, 3], [1, 3, 3],
torch.LongTensor([0, 0, 2])]),), 'adv_index_var'),
('to_sparse', (S, S), (), '', (), (), [], lambda x: x.to_dense()),
('kron', (S, S), ((M, L),))
]
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = torch.randn((), dtype=dtype, device=device)
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return Variable(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device)), requires_grad=requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
return maybe_non_contig(arg.tensor.to(device=device))
return maybe_non_contig(arg.tensor.to(device=device))
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
if arg.dtype == torch.cfloat:
arg = arg.to(torch.cdouble)
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = maybe_non_contig(arg).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
def _compare_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
else:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.tril_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.triu_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1)
]
tri_large_tests_args: List[Tuple[int, ...]] = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(
3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(
l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(
u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(
1, 1, device=device, layout=torch.sparse_coo))
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(
1, 1, device=device, layout=torch.sparse_coo))
def unpack_variables(args):
if istuple(args):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
EXCLUDE_FUNCTIONAL = {
'addmm',
'addmm_',
'addbmm',
'baddbmm',
'addmv',
'addmv_',
'addr',
'addr_',
'reshape',
'where' # argument order
}
EXCLUDE_GRADCHECK: Dict[str, Any] = {
}
EXCLUDE_GRADGRADCHECK: Dict[str, Any] = {
}
EXCLUDE_GRADGRADCHECK_BY_TEST_NAME = {
# *det methods uses svd in backward when matrix is not invertible. However,
# svd backward is unstable unless the matrix has positive distinct singular
# values. Generated random matrices satisfy this with high probability, but
# we can't rely on it. So only test gradgrad on invertible test cases and
# _distinct_singular_values.
'test_det',
'test_det_1x1',
'test_det_symmetric',
'test_det_symmetric_psd',
'test_det_dim2_null',
'test_det_rank1',
'test_det_rank2',
'test_det_batched',
'test_det_batched_1x1',
'test_det_batched_symmetric',
'test_det_batched_symmetric_psd',
# `other` expand_as(self, other) is not used in autograd.
'test_expand_as',
'test_logdet',
'test_logdet_1x1',
'test_logdet_symmetric',
'test_logdet_batched',
'test_logdet_batched_1x1',
'test_logdet_batched_symmetric',
'test_cdist',
}
def exclude_tensor_method(name, test_name):
# there are no tensor equivalents for these (inplace or out)
exclude_all_tensor_method_by_test_name = {
'test_slice',
'test_where',
'test_where_broadcast_all',
'test_where_scalar',
'test_where_scalar_broadcast_mask',
'test_where_scalar_broadcast_non_mask',
'test_var_mean_keepdim_dim_1d',
'test_var_mean_keepdim_dim',
'test_var_mean_dim_1d',
'test_var_mean_dim',
'test_var_mean',
'test_std_mean_keepdim_dim_1d',
'test_std_mean_keepdim_dim',
'test_std_mean_dim_1d',
'test_std_mean_dim',
'test_std_mean',
'test_view_as_complex',
'test_view_as_real_complex',
'test_real_complex',
'test_imag_complex',
'test_complex'
}
# there are no out-of-place tensor equivalents for these
exclude_outplace_tensor_method = {
'index_add',
'index_copy',
'index_fill',
'masked_fill',
'masked_scatter',
'scatter',
'scatter_add',
'det',
}
if test_name in exclude_all_tensor_method_by_test_name:
return True
is_magic_method = name[:2] == '__' and name[-2:] == '__'
is_inplace = name[-1] == "_" and not is_magic_method
if not is_inplace and name in exclude_outplace_tensor_method:
return True
if 'fft.' in name:
return True
return False
| 52.639624 | 129 | 0.546352 | 20,401 | 0.110322 | 0 | 0 | 128 | 0.000692 | 0 | 0 | 42,592 | 0.230323 |
101a8bad1d0114bbb79e68c96eb4371364f2ff83 | 771 | py | Python | examples/python/numpy_functions.py | benedicteb/FYS2140-Resources | 31b572e455c3ac8dff868db903f18687e363f1bf | [
"MIT"
] | null | null | null | examples/python/numpy_functions.py | benedicteb/FYS2140-Resources | 31b572e455c3ac8dff868db903f18687e363f1bf | [
"MIT"
] | null | null | null | examples/python/numpy_functions.py | benedicteb/FYS2140-Resources | 31b572e455c3ac8dff868db903f18687e363f1bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Script viser import av funksjoner fra numpy og bruk av noen.
@author Benedicte Emilie Braekken
"""
from numpy import *
print 'e^1 =', exp( 1 ) # Eksponentialfunksjonen
print 'cos(pi) =', cos( pi ) # Cosinus
print 'sqrt(4) =', sqrt( 4 ) # Kvadratrot
print 'range(5) =', range(5) # Rekke opp til 4
print 'zeros(5) =', zeros(5) # Tom array med 5 elementer
print 'linspace(0,5,5) =', linspace(0,5,5) # Rekke som ikke oeker med 1
"""
bruker @ unix $ python numpy_functions.py
e^1 = 2.71828182846
cos(pi) = -1.0
sqrt(4) = 2.0
range(5) = [0, 1, 2, 3, 4]
zeros(5) = [ 0. 0. 0. 0. 0.]
linspace(0,5,5) = [ 0. 1.25 2.5 3.75 5. ]
"""
| 28.555556 | 72 | 0.553826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 548 | 0.710765 |
63d7715ef7d63209fb2728483a0c7ddffb7d19ed | 4,261 | py | Python | homeassistant/components/automation/sun.py | instantchow/home-assistant | 6797365d4fd74328a0c9e961f652cfb37f48bc7d | [
"MIT"
] | null | null | null | homeassistant/components/automation/sun.py | instantchow/home-assistant | 6797365d4fd74328a0c9e961f652cfb37f48bc7d | [
"MIT"
] | null | null | null | homeassistant/components/automation/sun.py | instantchow/home-assistant | 6797365d4fd74328a0c9e961f652cfb37f48bc7d | [
"MIT"
] | null | null | null | """
Offer sun based automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#sun-trigger
"""
import logging
from datetime import timedelta
import homeassistant.util.dt as dt_util
from homeassistant.components import sun
from homeassistant.helpers.event import track_sunrise, track_sunset
DEPENDENCIES = ['sun']
CONF_OFFSET = 'offset'
CONF_EVENT = 'event'
CONF_BEFORE = "before"
CONF_BEFORE_OFFSET = "before_offset"
CONF_AFTER = "after"
CONF_AFTER_OFFSET = "after_offset"
EVENT_SUNSET = 'sunset'
EVENT_SUNRISE = 'sunrise'
_LOGGER = logging.getLogger(__name__)
def trigger(hass, config, action):
"""Listen for events based on configuration."""
event = config.get(CONF_EVENT)
if event is None:
_LOGGER.error("Missing configuration key %s", CONF_EVENT)
return False
event = event.lower()
if event not in (EVENT_SUNRISE, EVENT_SUNSET):
_LOGGER.error("Invalid value for %s: %s", CONF_EVENT, event)
return False
offset = _parse_offset(config.get(CONF_OFFSET))
if offset is False:
return False
# Do something to call action
if event == EVENT_SUNRISE:
track_sunrise(hass, action, offset)
else:
track_sunset(hass, action, offset)
return True
def if_action(hass, config):
"""Wrap action method with sun based condition."""
before = config.get(CONF_BEFORE)
after = config.get(CONF_AFTER)
# Make sure required configuration keys are present
if before is None and after is None:
logging.getLogger(__name__).error(
"Missing if-condition configuration key %s or %s",
CONF_BEFORE, CONF_AFTER)
return None
# Make sure configuration keys have the right value
if before not in (None, EVENT_SUNRISE, EVENT_SUNSET) or \
after not in (None, EVENT_SUNRISE, EVENT_SUNSET):
logging.getLogger(__name__).error(
"%s and %s can only be set to %s or %s",
CONF_BEFORE, CONF_AFTER, EVENT_SUNRISE, EVENT_SUNSET)
return None
before_offset = _parse_offset(config.get(CONF_BEFORE_OFFSET))
after_offset = _parse_offset(config.get(CONF_AFTER_OFFSET))
if before_offset is False or after_offset is False:
return None
if before is None:
def before_func():
"""Return no point in time."""
return None
elif before == EVENT_SUNRISE:
def before_func():
"""Return time before sunrise."""
return sun.next_rising(hass) + before_offset
else:
def before_func():
"""Return time before sunset."""
return sun.next_setting(hass) + before_offset
if after is None:
def after_func():
"""Return no point in time."""
return None
elif after == EVENT_SUNRISE:
def after_func():
"""Return time after sunrise."""
return sun.next_rising(hass) + after_offset
else:
def after_func():
"""Return time after sunset."""
return sun.next_setting(hass) + after_offset
def time_if():
"""Validate time based if-condition."""
now = dt_util.now()
before = before_func()
after = after_func()
if before is not None and now > now.replace(hour=before.hour,
minute=before.minute):
return False
if after is not None and now < now.replace(hour=after.hour,
minute=after.minute):
return False
return True
return time_if
def _parse_offset(raw_offset):
"""Parse the offset."""
if raw_offset is None:
return timedelta(0)
negative_offset = False
if raw_offset.startswith('-'):
negative_offset = True
raw_offset = raw_offset[1:]
try:
(hour, minute, second) = [int(x) for x in raw_offset.split(':')]
except ValueError:
_LOGGER.error('Could not parse offset %s', raw_offset)
return False
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
| 28.986395 | 78 | 0.633185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 921 | 0.216146 |
63d787602e9b9889274c51a3e392c5c66b98f987 | 4,821 | py | Python | docs/source/conf.py | tanlin2013/Tensor-Network | 5b9a48076db57fc16f874ee453fe1ededdc5ebf6 | [
"Apache-2.0"
] | 1 | 2020-09-07T21:35:34.000Z | 2020-09-07T21:35:34.000Z | docs/source/conf.py | tanlin2013/Tensor-Network | 5b9a48076db57fc16f874ee453fe1ededdc5ebf6 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | tanlin2013/Tensor-Network | 5b9a48076db57fc16f874ee453fe1ededdc5ebf6 | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../tnpy'))
from tnpy import __version__
# -- Project information -----------------------------------------------------
project = 'tnpy'
copyright = '2021, Tan Tao-Lin'
author = 'Tan Tao-Lin'
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'nbsphinx',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.inheritance_diagram',
'm2r2',
]
# Turn on sphinx.ext.autosummary
autosummary_generate = True
# Looks for objects in external projects
intersphinx_mapping = {
'tensornetwork': ('https://tensornetwork.readthedocs.io/en/latest/', None),
}
# Mathjax
mathjax_path = 'https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
mathjax2_config = {
'tex2jax': {
'inlineMath': [['$', '$'], ['\\(', '\\)']],
'displayMath': [["$$", "$$"]],
'processEscapes': True,
'ignoreClass': 'document',
'processClass': 'math|output_area',
}
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Allowing docstring in both __init__ and right under class definition
autoclass_content = 'both'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"github_url": "https://github.com/tanlin2013/tnpy",
"repository_url": "https://github.com/tanlin2013/tnpy",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "docs",
"use_fullscreen_button": False,
"use_download_button": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for LaTeX output ---------------------------------------------
latex_elements = { # type: ignore
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tnpy.tex', 'tnpy Documentation',
'Tan Tao-Lin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
| 30.512658 | 95 | 0.669156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,920 | 0.813109 |
63d7cdd21b779bfe16c82daade1776e92192157f | 1,963 | py | Python | tests/modules/organizations/test_models.py | karenc/houston | 4eaaaf11d61394035e34b55bb847ea7eb4099c61 | [
"Apache-2.0"
] | null | null | null | tests/modules/organizations/test_models.py | karenc/houston | 4eaaaf11d61394035e34b55bb847ea7eb4099c61 | [
"Apache-2.0"
] | 2 | 2021-03-16T20:28:06.000Z | 2021-03-29T15:54:11.000Z | tests/modules/organizations/test_models.py | karenc/houston | 4eaaaf11d61394035e34b55bb847ea7eb4099c61 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name,missing-docstring
import sqlalchemy
import logging
def test_Organization_add_members(db, temp_user): # pylint: disable=unused-argument
from app.modules.organizations.models import (
Organization,
OrganizationUserMembershipEnrollment,
)
temp_org = Organization(
title='Temp Organization', website='temp@temp.org', owner_guid=temp_user.guid
)
temp_enrollment = OrganizationUserMembershipEnrollment()
temp_enrollment.user = temp_user
temp_org.user_membership_enrollments.append(temp_enrollment)
# Doing this multiple times should not have an effect
temp_org.user_membership_enrollments.append(temp_enrollment)
temp_org.user_membership_enrollments.append(temp_enrollment)
temp_org.user_membership_enrollments.append(temp_enrollment)
with db.session.begin():
db.session.add(temp_org)
db.session.add(temp_enrollment)
db.session.refresh(temp_user)
db.session.refresh(temp_org)
db.session.refresh(temp_enrollment)
logging.info(temp_user.organization_membership_enrollments)
logging.info(temp_org.user_membership_enrollments)
logging.info(temp_user.get_org_memberships())
logging.info(temp_org.get_members())
assert len(temp_user.get_org_memberships()) == 1
assert temp_org in temp_user.get_org_memberships()
assert len(temp_org.get_members()) == 1
assert temp_user in temp_org.get_members()
try:
duplicate_enrollment = OrganizationUserMembershipEnrollment()
duplicate_enrollment.user = temp_user
temp_org.user_membership_enrollments.append(duplicate_enrollment)
with db.session.begin():
db.session.add(duplicate_enrollment)
except (sqlalchemy.orm.exc.FlushError, sqlalchemy.exc.IntegrityError):
pass
with db.session.begin():
db.session.delete(temp_org)
db.session.delete(temp_enrollment)
| 32.716667 | 85 | 0.745288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.0973 |