hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2facb93fab811cb05caccbdb7caab3f1d5d8e1cc | 1,764 | py | Python | projects/machine_learning/tensor_flow/mnist/mnist_beginner.py | nickmalleson/surf | d6b8abb75635ac0fbadb445e67fc50ccb8b19945 | [
"MIT"
] | 3 | 2018-09-15T03:16:33.000Z | 2020-07-11T00:50:39.000Z | projects/machine_learning/tensor_flow/mnist/mnist_beginner.py | nickmalleson/surf | d6b8abb75635ac0fbadb445e67fc50ccb8b19945 | [
"MIT"
] | null | null | null | projects/machine_learning/tensor_flow/mnist/mnist_beginner.py | nickmalleson/surf | d6b8abb75635ac0fbadb445e67fc50ccb8b19945 | [
"MIT"
] | 10 | 2016-08-25T13:38:57.000Z | 2021-02-01T10:20:01.000Z | # tensorflow mnist beginner tutorial
# https://www.tensorflow.org/versions/r0.10/tutorials/mnist/beginners/index.html
import tensorflow as tf
# Download the input data. This will give three data sets:
# mnist.train, mnist.test and mnist.validation
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# A placeholder for mnist images (these are dim N=784 )
x = tf.placeholder(tf.float32, [None, 784]) # (None means dimension of any length)
# Weights and biasses represented by 'variables'
W = tf.Variable(tf.zeros([784, 10])) # The one-hot images (784 pixels and a one-hot array giving the number)
b = tf.Variable(tf.zeros([10])) # Beta values
# Regression model:
# Multiply multiply x by W then add b, and run through softmax
# Define our cost function, 'cross entrophy' in this case
y_ = tf.placeholder(tf.float32, [None, 10]) # A placeholder to store the correct answers
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Will train the model using gradient descent
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Initialise the variables
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# RUN!
for i in range(1000):
# Collet a batch of 100 training data points
batch_xs, batch_ys = mnist.train.next_batch(100)
# Feed the batch into the session, replacing the placeholders
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Evaulte the model
# argmax gives the index with the largest value; here compare the most likely image to the correct one and
# see how many times they are equal
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) | 36 | 108 | 0.748299 | # tensorflow mnist beginner tutorial
# https://www.tensorflow.org/versions/r0.10/tutorials/mnist/beginners/index.html
import tensorflow as tf
# Download the input data. This will give three data sets:
# mnist.train, mnist.test and mnist.validation
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# A placeholder for mnist images (these are dim N=784 )
x = tf.placeholder(tf.float32, [None, 784]) # (None means dimension of any length)
# Weights and biasses represented by 'variables'
W = tf.Variable(tf.zeros([784, 10])) # The one-hot images (784 pixels and a one-hot array giving the number)
b = tf.Variable(tf.zeros([10])) # Beta values
# Regression model:
# Multiply multiply x by W then add b, and run through softmax
# Define our cost function, 'cross entrophy' in this case
y_ = tf.placeholder(tf.float32, [None, 10]) # A placeholder to store the correct answers
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Will train the model using gradient descent
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Initialise the variables
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# RUN!
for i in range(1000):
# Collet a batch of 100 training data points
batch_xs, batch_ys = mnist.train.next_batch(100)
# Feed the batch into the session, replacing the placeholders
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Evaulte the model
# argmax gives the index with the largest value; here compare the most likely image to the correct one and
# see how many times they are equal
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) | 0 | 0 | 0 |
ce7239951aa53c806eead6acfee3dc12f4c62421 | 1,884 | py | Python | aws_lambda_builders/workflows/go_dep/workflow.py | calavera/aws-lambda-builders | 13108125df51f67ea25e5c451846ae6d06fcee1a | [
"Apache-2.0"
] | null | null | null | aws_lambda_builders/workflows/go_dep/workflow.py | calavera/aws-lambda-builders | 13108125df51f67ea25e5c451846ae6d06fcee1a | [
"Apache-2.0"
] | null | null | null | aws_lambda_builders/workflows/go_dep/workflow.py | calavera/aws-lambda-builders | 13108125df51f67ea25e5c451846ae6d06fcee1a | [
"Apache-2.0"
] | null | null | null | """
Go Dep Workflow
"""
import logging
from warnings import warn
from aws_lambda_builders.actions import CopySourceAction
from aws_lambda_builders.workflow import BaseWorkflow, Capability
from .actions import DepEnsureAction, GoBuildAction
from .utils import OSUtils
from .subproc_exec import SubprocessExec
LOG = logging.getLogger(__name__)
class GoDepWorkflow(BaseWorkflow):
"""
A Lambda builder workflow that knows how to build
Go projects using `dep`
"""
NAME = "GoDepBuilder"
CAPABILITY = Capability(language="go", dependency_manager="dep", application_framework=None)
EXCLUDED_FILES = (".aws-sam", ".git")
| 31.932203 | 116 | 0.666667 | """
Go Dep Workflow
"""
import logging
from warnings import warn
from aws_lambda_builders.actions import CopySourceAction
from aws_lambda_builders.workflow import BaseWorkflow, Capability
from .actions import DepEnsureAction, GoBuildAction
from .utils import OSUtils
from .subproc_exec import SubprocessExec
LOG = logging.getLogger(__name__)
class GoDepWorkflow(BaseWorkflow):
"""
A Lambda builder workflow that knows how to build
Go projects using `dep`
"""
NAME = "GoDepBuilder"
CAPABILITY = Capability(language="go", dependency_manager="dep", application_framework=None)
EXCLUDED_FILES = (".aws-sam", ".git")
def __init__(self, source_dir, artifacts_dir, scratch_dir, manifest_path, runtime=None, osutils=None, **kwargs):
warn(f"{self.__class__.__name__} will be removed on April 11, 2022.", DeprecationWarning, stacklevel=2)
super(GoDepWorkflow, self).__init__(
source_dir, artifacts_dir, scratch_dir, manifest_path, runtime=runtime, **kwargs
)
options = kwargs["options"] if "options" in kwargs else {}
handler = options.get("artifact_executable_name", None)
if osutils is None:
osutils = OSUtils()
# project base name, where the Gopkg.toml and vendor dir are.
base_dir = osutils.abspath(osutils.dirname(manifest_path))
output_path = osutils.joinpath(osutils.abspath(artifacts_dir), handler)
subprocess_dep = SubprocessExec(osutils, "dep")
subprocess_go = SubprocessExec(osutils, "go")
self.actions = [
DepEnsureAction(base_dir, subprocess_dep),
GoBuildAction(
base_dir,
osutils.abspath(source_dir),
output_path,
subprocess_go,
self.architecture,
env=osutils.environ,
),
]
| 1,209 | 0 | 27 |
abf910f5a57e5f37cbdee17d7f3e619da55d7af0 | 6,469 | py | Python | multiwallet.py | Frichetten/Specter | 8fe5b63c226a8c21ffcd282bf72a4dc5b79985d4 | [
"MIT"
] | 16 | 2017-12-29T17:25:22.000Z | 2020-02-13T16:13:12.000Z | multiwallet.py | Frichetten/Specter | 8fe5b63c226a8c21ffcd282bf72a4dc5b79985d4 | [
"MIT"
] | 5 | 2017-12-26T16:18:42.000Z | 2018-02-20T23:00:07.000Z | multiwallet.py | Frichetten/Specter | 8fe5b63c226a8c21ffcd282bf72a4dc5b79985d4 | [
"MIT"
] | 5 | 2017-12-27T15:11:03.000Z | 2018-10-28T13:46:07.000Z | #!/usr/bin/env python
# Specter MultiWallet Implementation
# Nick Frichette 12/10/2017
"""The purpose of the multiwallet is to provide an interface for users
to interact with their Specter wallets. This application will show
all the wallets thy currently possess, as well as allow users to
create more."""
import shutil
from wallet import *
from blockchain import *
# ANSI escape sequences
FAIL = '\033[91m'
END = '\033[0m'
OK = '\033[92m'
if __name__ == '__main__':
main()
| 38.96988 | 103 | 0.578915 | #!/usr/bin/env python
# Specter MultiWallet Implementation
# Nick Frichette 12/10/2017
"""The purpose of the multiwallet is to provide an interface for users
to interact with their Specter wallets. This application will show
all the wallets thy currently possess, as well as allow users to
create more."""
import shutil
from wallet import *
from blockchain import *
# ANSI escape sequences
FAIL = '\033[91m'
END = '\033[0m'
OK = '\033[92m'
def create_wallet(wallets):
wallet_name = raw_input("What would you like to name the wallet?: ")
print OK + "Creating " + wallet_name + END
wallets[wallet_name] = Wallet(wallet_name)
def delete_wallet(wallets, wallet_name):
answer = raw_input("Are you sure you want to delete this wallet? It cannot be undone[y/n]: ")
if answer.lower() == 'n' or answer.lower() == 'no':
print FAIL + "Deletion aborted" + END
elif answer.lower() == 'y' or answer.lower() == 'yes':
name = wallet_name
print FAIL + "Wallet to delete: " + name + END
proof = raw_input("Please type the name of the wallet to finalize decision [" + name + "]: ")
if proof == name:
wallets.pop(name, None)
shutil.rmtree('key-' + name)
raw_input("Wallet deleted! Press [Enter] to continue...")
else:
print FAIL + "Name improperly typed. Aborting!" + END
def specific_wallet_input(wallets, guide, index, local_blockchain):
selection = ""
selected_wallet = wallets[guide[int(index)]]
approved_input = ['0', '1', 'd']
while selection != 'exit':
print "\033[H\033[J\r", # Note the comma
print "(0) Display Address"
print "(1) Send Amount to Other Wallet"
selected_wallet.display_address_and_balance(local_blockchain)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "To delete this wallet please enter 'd' and hit [Enter]"
selection = raw_input(selected_wallet.name + ">> ")
# Validate input
if selection in approved_input:
# If the input is '0' we need to display the public address
if selection == '0':
print selected_wallet.get_address()
raw_input("Press [Enter] to continue...")
# If the input is '1' we need to send funds to a public address
if selection == '1':
to = raw_input("What is the public address of the wallet you'd like to send to?: ")
amount = raw_input("How much would you like to send? [Current funds: " +
str(selected_wallet.get_balance(local_blockchain)) + "]: ")
transaction = selected_wallet.create_transaction(amount, to)
validation = selected_wallet.broadcast_transaction(transaction)
if validation:
print "Transaction was successful. Updating Blockchain"
local_blockchain.update_blockchain()
raw_input("Transaction Complete. Press [Enter] to continue...")
else:
raw_input("Transaction Was Invalid. Press [Enter] to continue...")
# If the input is 'd' we need to delete a wallet
if selection == 'd':
delete_wallet(wallets, selected_wallet.name)
selection = 'exit'
else:
None
def lookup_wallet(local_blockchain, address):
# Given an address let's find the balance of that wallet
balance = local_blockchain.lookup_address(address)
return balance
def main():
# Create a dictionary to hold wallets
wallets = {}
# This will be our local instantiation of the Blockchain
local_blockchain = Blockchain(is_node=False)
# Setting the approved input
approved_input = ['c', 'i']
[approved_input.append(str(x)) for x in range(1000)]
# The convention for identifying wallets it having the public and
# private keys in a local directory with the name key-"wallet name"
for item in os.listdir('.'):
if 'key-' in item and 'nodekey' not in item:
wallets[item[item.index('-') + 1:]] = Wallet(item)
# If there are no keys, then we need to offer the chance to make a wallet
ans = ""
if len(wallets.keys()) == 0:
ans = raw_input("We didn't find a wallet, would you like to create one? [y/n]: ")
if ans.lower() == 'y' or ans.lower() == 'yes':
create_wallet(wallets)
if ans.lower() == 'n' or ans.lower() == 'no':
print FAIL + "With no keys we'll have to exit. Goodbye" + END
exit(0)
""" Now that we've loaded the wallets, lets give the users some choices """
guide = {}
while ans != 'exit':
print "\033[H\033[J\r", # Note the comma
print "Welcome to Specter Multi_Wallet V0.02" # \r is to clear that line
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print ""
print "To begin, select a wallet listed below"
print ""
for i, item in enumerate(wallets.keys()):
guide[i] = item
print "(" + str(i) + ") " + wallets[item].name
print ""
print "To select a wallet please enter a number and hit [Enter]"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "To create a wallet please enter 'c' and hit [Enter]"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "To see the balance of any wallet with a public address please enter 'i' and hit [Enter]"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
ans = raw_input(">> ")
# Validate input
if ans in approved_input:
# If the input is 'c' we need to create a new wallet
if ans == 'c':
create_wallet(wallets)
# If the input is 'i' we need to get information on a wallet
elif ans == 'i':
address = raw_input("Please enter the public address of the wallet: ")
balance = lookup_wallet(local_blockchain, address)
print "Balance: " + str(balance)
raw_input("Press [Enter] to continue...")
# If the user selects a number, we should check if it is a valid selection
elif guide[int(ans)] in wallets.keys():
specific_wallet_input(wallets, guide, ans, local_blockchain)
else:
None
if __name__ == '__main__':
main()
| 5,852 | 0 | 115 |
e03884d7efd811ca10ce1ecb5780bf9ae05dca5d | 292 | py | Python | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_min_exclusive_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_min_exclusive_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_min_exclusive_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.atomic.g_year_month.schema_instance.nistschema_sv_iv_atomic_g_year_month_min_exclusive_5_xsd.nistschema_sv_iv_atomic_g_year_month_min_exclusive_5 import NistschemaSvIvAtomicGYearMonthMinExclusive5
__all__ = [
"NistschemaSvIvAtomicGYearMonthMinExclusive5",
]
| 48.666667 | 225 | 0.907534 | from output.models.nist_data.atomic.g_year_month.schema_instance.nistschema_sv_iv_atomic_g_year_month_min_exclusive_5_xsd.nistschema_sv_iv_atomic_g_year_month_min_exclusive_5 import NistschemaSvIvAtomicGYearMonthMinExclusive5
__all__ = [
"NistschemaSvIvAtomicGYearMonthMinExclusive5",
]
| 0 | 0 | 0 |
5e940be24241b9f3f371ec03cccd80b5f1cd76de | 387 | py | Python | utils.py | neilalex/SublimeWebInspector | e808d12956f115ea2b37bdf46fbfdb20b4e5ec13 | [
"MIT"
] | 416 | 2015-01-02T04:41:42.000Z | 2022-01-14T10:19:55.000Z | utils.py | neilalex/SublimeWebInspector | e808d12956f115ea2b37bdf46fbfdb20b4e5ec13 | [
"MIT"
] | 65 | 2015-01-06T09:32:53.000Z | 2018-09-24T03:17:30.000Z | utils.py | neilalex/SublimeWebInspector | e808d12956f115ea2b37bdf46fbfdb20b4e5ec13 | [
"MIT"
] | 51 | 2015-03-18T08:06:29.000Z | 2021-08-20T04:30:54.000Z |
import threading
import sublime
main_thread = threading.current_thread()
| 22.764706 | 86 | 0.710594 |
import threading
import sublime
def assert_main_thread():
global main_thread
assert threading.current_thread().ident == main_thread.ident, "not on main thread"
main_thread = threading.current_thread()
def get_setting(key, default = None):
s = sublime.load_settings("swi.sublime-settings")
if s and s.has(key):
return s.get(key)
else:
return default
| 265 | 0 | 46 |
f1cabad6bc33ec2a8ea388dc17a0ce8e7248edcc | 37,368 | py | Python | discord/ext/vbu/cogs/utils/settings_menu.py | 6days9weeks/Novus | a21157f15d7a07669cb75b3f023bd9eedf40e40e | [
"MIT"
] | 2 | 2022-01-22T16:05:42.000Z | 2022-01-22T16:06:07.000Z | discord/ext/vbu/cogs/utils/settings_menu.py | 6days9weeks/Novus | a21157f15d7a07669cb75b3f023bd9eedf40e40e | [
"MIT"
] | null | null | null | discord/ext/vbu/cogs/utils/settings_menu.py | 6days9weeks/Novus | a21157f15d7a07669cb75b3f023bd9eedf40e40e | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import typing
import discord
from discord.ext import commands
from .errors import InvokedMetaCommand
class SettingsMenuOption(object):
"""
An option that can be chosen for a settings menu's selectable item,
eg an option that refers to a sub-menu, or a setting that refers to grabbing
a role list, etc.
"""
__slots__ = ('context', '_display', 'converter_args', 'callback', 'emoji', 'allow_nullable',)
def __init__(
self,
ctx: commands.Context,
display: typing.Union[str, typing.Callable[[commands.Context], str]],
converter_args: typing.List[SettingsMenuConverter] = None,
callback: typing.Callable[['SettingsMenuOption', typing.List[typing.Any]], None] = lambda x: None,
emoji: str = None,
allow_nullable: bool = True,
):
"""
Args:
ctx (commands.Context): The context for which the menu is being invoked.
display (Union[str, Callable[[commands.Context], str]]): A string (or callable that returns string) that gives the
display prompt for the option.
converter_args (List[SettingsMenuConverter], optional): A list of converter arguments that should be used to
convert the user-provided arguments. Tuples are passed directly into `convert_prompted_information`.
callback (Callable[['SettingsMenuOption', List[Any]], None], optional): A callable that's passed the
information from the converter for you do to whatever with.
emoji (str, optional): The emoji that this option refers to.
allow_nullable (bool, optional): Whether or not this option is allowed to return None.
"""
self.context: commands.Context = ctx
self._display: typing.Union[str, typing.Callable[[commands.Context], str]] = display
self.converter_args: typing.List[SettingsMenuConverter] = converter_args or list()
self.callback: typing.Callable[['SettingsMenuOption', typing.List[typing.Any]], None] = callback
self.allow_nullable: bool = allow_nullable
def get_display(self) -> str:
"""
Get the display prompt for this option.
Returns:
str: The string to be displayed
"""
if isinstance(self._display, str):
return self._display
return self._display(self.context)
async def perform_action(self) -> None:
"""
Runs through the converters before calling the instance's callback method with the converted data.
"""
# Get data
returned_data = []
for arg in self.converter_args:
try:
data = await self.convert_prompted_information(arg.prompt, arg.asking_for, arg.converter, arg.emojis)
except SettingsMenuError as e:
if not self.allow_nullable:
raise e
data = None
returned_data.append(data)
if data is None:
break
# Do callback
if isinstance(self.callback, commands.Command):
await self.callback.invoke(self.context)
elif isinstance(self.callback, SettingsMenu):
await self.callback.start(self.context)
else:
called_data = self.callback(self, *returned_data)
if asyncio.iscoroutine(called_data):
await called_data
async def convert_prompted_information(
self,
prompt: str,
asking_for: str,
converter: commands.Converter,
reactions: typing.List[discord.Emoji] = None,
) -> typing.Any:
"""
Ask the user for some information, convert said information, and then return that converted value.
Args:
prompt (str): The text that we sent to the user -- something along the lines of "what
channel do you want to use" etc.
asking_for (str): Say what we're looking for them to send - doesn't need to be anything important,
it just goes to the timeout message.
converter (commands.Converter): The converter used to work out what to change the given user value to.
reactions (typing.List[discord.Emoji], optional): The reactions that should be added to the prompt
message. If provided then the content of the added reaction is thrown into the converter instead.
Returns:
typing.Any: The converted information.
Raises:
InvokedMetaCommand: If converting the information timed out, raise this error to signal to
the menu that we should exit.
SettingsMenuError: If the converting failed for some other reason.
"""
# Send prompt
sendable: typing.Dict[str, typing.Any] = {"content": prompt}
if reactions:
x = discord.ui.MessageComponents.add_buttons_with_rows(*[
discord.ui.Button(emoji=i, custom_id=str(i))
for i in reactions
])
sendable["components"] = x
bot_message = await self.context.send(**sendable)
# Wait for a response from the user
user_message = None
try:
if reactions:
payload = await self.context.bot.wait_for("component_interaction", timeout=120, check=check)
await payload.response.defer_update()
content = str(payload.component.custom_id)
else:
user_message = await self.context.bot.wait_for("message", timeout=120, check=check)
content = user_message.content
except asyncio.TimeoutError:
await self.context.send(f"Timed out asking for {asking_for}.")
raise InvokedMetaCommand()
# Run converter
conversion_failed = False
value = None
if hasattr(converter, 'convert'):
try:
converter = converter()
except TypeError:
pass
try:
value = await converter.convert(self.context, content)
except commands.CommandError:
conversion_failed = True
else:
try:
value = converter(content)
except Exception:
conversion_failed = True
# Delete prompt messages
try:
await bot_message.delete()
except discord.NotFound:
pass
try:
await user_message.delete()
except (discord.Forbidden, discord.NotFound, AttributeError):
pass
# Check conversion didn't fail
if conversion_failed:
raise SettingsMenuError()
# Return converted value
return value
@classmethod
def get_guild_settings_mention(
cls,
ctx: commands.Context,
attr: str,
default: str = 'none',
) -> str:
"""
Get an item from the cached `Bot.guild_settings` object for the running guild and return
either it's mention string, or the `default` arg.
Args:
ctx (commands.Context): The context for the command.
attr (str): The attribute we want to mention.
default (str, optional): If not found, what should the default be.
Returns:
str: The mention string.
"""
settings = ctx.bot.guild_settings[ctx.guild.id]
return cls.get_settings_mention(ctx, settings, attr, default)
@classmethod
def get_user_settings_mention(
cls,
ctx: commands.Context,
attr: str,
default: str = 'none',
) -> str:
"""
Get an item from the cached `Bot.user_settings` object for the running user and return
either it's mention string, or the `default` arg.
Args:
ctx (commands.Context): The context for the command.
attr (str): The attribute we want to mention.
default (str, optional): If not found, what should the default be.
Returns:
str: The mention string.
"""
settings = ctx.bot.user_settings[ctx.author.id]
return cls.get_settings_mention(ctx, settings, attr, default)
@classmethod
def get_settings_mention(
cls,
ctx: commands.Context,
settings: dict,
attr: str,
default: str = 'none',
) -> str:
"""
Get an item from the bot's settings.
:meta private:
Args:
ctx (commands.Context): The context for the command.
settings (dict): The dictionary with the settings in it that we want to grab.
attr (str): The attribute we want to mention.
default (str, optional): If not found, what should the default be.
Returns:
str: The mention string.
"""
# Run converters
if 'channel' in attr.lower().split('_'):
data = ctx.bot.get_channel(settings[attr])
elif 'role' in attr.lower().split('_'):
data = ctx.guild.get_role(settings[attr])
else:
data = settings[attr]
if isinstance(data, bool):
return str(data).lower()
return data
# Get mention
return cls.get_mention(data, default)
@staticmethod
def get_mention(
data: typing.Union[discord.abc.GuildChannel, discord.Role, None],
default: str,
) -> str:
"""
Get the mention of an object.
:meta private:
Args:
data (typing.Union[discord.abc.GuildChannel, discord.Role, None]): The object we want to mention.
default (str): The default string that should be output if we can't mention the object.
Returns:
str: The mention string.
"""
mention = data.mention if data else default
return mention
@classmethod
def get_set_guild_settings_callback(
cls,
table_name: str,
column_name: str,
serialize_function: typing.Callable[[typing.Any], typing.Any] = None,
) -> typing.Callable[[typing.Any], None]:
"""
Return an async method that takes the data given by `convert_prompted_information`, then
saves it into the database - should be used in the SettingsMenu init.
:meta private:
Args:
table_name (str): The name of the table the data should be inserted into. This is
not used when caching information. This should NOT be a user supplied value.
column_name (str): The name of the column that the data should be inserted into.
This is the same name that's used for caching. This should NOT be a user supplied value.
serialize_function (typing.Callable[[typing.Any], typing.Any], optional): The function that is called to convert the
input data in the callback into a database-friendly value. This is *not* called for caching the value, only
for databasing. The default serialize function doesn't do anything, but is provided so you don't have to provide
one yourself.
Returns:
typing.Callable[[typing.Any], None]: A callable function that sets the guild settings when provided with data
"""
if serialize_function is None:
serialize_function = do_nothing
return cls.get_set_settings_callback(table_name, "guild_id", column_name, serialize_function)
@classmethod
def get_set_user_settings_callback(
cls,
table_name: str,
column_name: str,
serialize_function: typing.Callable[[typing.Any], typing.Any] = None,
) -> typing.Callable[[dict], None]:
"""
Return an async method that takes the data given by `convert_prompted_information`, then
saves it into the database - should be used in the SettingsMenu init.
:meta private:
Args:
table_name (str): The name of the table the data should be inserted into. This is not used when caching information.
This should NOT be a user supplied value.
column_name (str): The name of the column that the data should be inserted into. This is the same name that's used for
caching the value. This should NOT be a user supplied value.
serialize_function (typing.Callable[[typing.Any], typing.Any], optional): The function that is called to convert the input data
in the callback into a database-friendly value. This is *not* called for caching the value, only for databasing. The default
serialize function doesn't do anything, but is provided so you don't have to provide one yourself.
Returns:
typing.Callable[[dict], None]: A callable function that sets the user settings when provided with data
"""
if serialize_function is None:
serialize_function = do_nothing
return cls.get_set_settings_callback(table_name, "user_id", column_name, serialize_function)
@staticmethod
def get_set_settings_callback(
table_name: str,
primary_key: str,
column_name: str,
serialize_function: typing.Callable[[typing.Any], typing.Any] = None
) -> typing.Callable[[dict], None]:
"""
Return an async method that takes the data given by `convert_prompted_information`, then
saves it into the database - should be used in the SettingsMenu init.
Args:
table_name (str): The name of the table the data should be inserted into. This is not used when caching information.
This should NOT be a user supplied value.
primary_key (str): The primary key of the table that you want to insert to. This *only* supports single primary keys and not
compound ones.
column_name (str): The name of the column that the data should be inserted into. This is the same name that's used for
caching the value. This should NOT be a user supplied value.
serialize_function (typing.Callable[[typing.Any], typing.Any], optional): The function that is called to convert the input data
in the callback into a database-friendly value. This is *not* called for caching the value, only for databasing. The default
serialize function doesn't do anything, but is provided so you don't have to provide one yourself.
Returns:
typing.Callable[[dict], None]: A callable function that sets the user settings when provided with data
"""
async def callback(self, data):
"""
The function that actually sets the data in the specified table in the database.
Any input to this function should be a direct converted value from `convert_prompted_information`.
If the input is a discord.Role or discord.TextChannel, it is automatcally converted to that value's ID,
which is then put into the datbase and cache.
"""
# See if we need to get the object's ID
if isinstance(data, (discord.Role, discord.TextChannel, discord.User, discord.Member, discord.Object, discord.CategoryChannel)):
data = data.id
# Serialize via the passed serialize function
original_data, data = data, serialize_function(data)
# Add to the database
async with self.context.bot.database() as db:
await db(
"INSERT INTO {0} ({1}, {2}) VALUES ($1, $2) ON CONFLICT ({1}) DO UPDATE SET {2}=$2".format(table_name, primary_key, column_name),
self.context.guild.id, data,
)
# Cache
self.context.bot.guild_settings[self.context.guild.id][column_name] = original_data
# Return the callback
return callback
@staticmethod
def get_set_iterable_delete_callback(
table_name: str,
column_name: str,
cache_key: str,
database_key: str,
) -> typing.Callable[[SettingsMenu, commands.Context, int], None]:
"""
Return an async method that takes the data retuend by `convert_prompted_information` and then
saves it into the database - should be used for the SettingsMenu init.
Args:
table_name (str): The name of the database that you want to remove data from.
column_name (str): The column name that the key is inserted into in the table.
cache_key (str): The key that's used to access the cached value for the iterable in `bot.guilds_settings`.
database_key (str): The key that's used to refer to the role ID in the `role_list` table.
Returns:
Callable[[SettingsMenu, commands.Context, int], None]: A callable for `SettingsMenuIterable` objects to use.
"""
def wrapper(menu, ctx, delete_key: int):
"""
A sync wrapper so that we can return an async callback that deletes from the database.
"""
async def callback(menu):
"""
The function that actually deletes the role from the database
Any input to this function will be silently discarded, since the actual input to this function is defined
in the callback definition.
"""
# Database it
async with ctx.bot.database() as db:
await db(
"DELETE FROM {0} WHERE guild_id=$1 AND {1}=$2 AND key=$3".format(table_name, column_name),
ctx.guild.id, delete_key, database_key
)
# Remove the converted value from cache
try:
ctx.bot.guild_settings[ctx.guild.id][cache_key].remove(delete_key)
except AttributeError:
ctx.bot.guild_settings[ctx.guild.id][cache_key].pop(delete_key)
return callback
return wrapper
@staticmethod
def get_set_iterable_add_callback(
table_name: str, column_name: str, cache_key: str, database_key: str,
serialize_function: typing.Callable[[typing.Any], str] = None,
original_data_type: type = None,
) -> typing.Callable[['SettingsMenu', commands.Context], None]:
"""
Return an async method that takes the data retuend by `convert_prompted_information` and then
saves it into the database - should be used for the SettingsMenu init. This particular iterable
can only deal with one convertable datapoint (for a list) or two (for a mapping). Any more than that
and you will need to provide your own callback.
Args:
table_name (str): The name of the database that you want to add data to.
column_name (str): The column name that the key is inserted into in the table.
cache_key (str): This is the key that's used when caching the value in `bot.guild_settings`.
database_key (str): This is the key that the value is added to the database table `role_list`.
serialize_function (Callable[[Any], str], optional): The function run on the value to convert it
into to make it database-safe. Values are automatically cast to strings after being run through the serialize function.
The serialize_function is called when caching the value, but the cached value is not cast to a string. The default
serialize function doesn't do anything, but is provided so you don't have to provide one yourself.
Returns:
Callable[[SettingsMenu, commands.Context], Callable[[SettingsMenu, Any, Optional[Any]], None]]:
A callable for `SettingsMenuIterable` objects to use.
"""
if serialize_function is None:
serialize_function = do_nothing
def wrapper(menu, ctx):
"""
A sync wrapper so that we can return an async callback that deletes from the database.
"""
async def callback(menu, *data):
"""
The function that actually adds the role to the table in the database
Any input to this function will be direct outputs from perform_action's convert_prompted_information
This is a function that creates a callback, so the expectation of `data` in this instance is that data is either
a list of one item for a listing, eg [role_id], or a list of two items for a mapping, eg [role_id, value]
"""
# Unpack the data
try:
role, original_value = data
value = str(serialize_function(original_value))
except ValueError:
role, value = data[0], None
# Database it
async with ctx.bot.database() as db:
await db(
"""INSERT INTO {0} (guild_id, {1}, key, value) VALUES ($1, $2, $3, $4)
ON CONFLICT (guild_id, {1}, key) DO UPDATE SET value=excluded.value""".format(table_name, column_name),
ctx.guild.id, role.id, database_key, value
)
# Set the original value for the cache
if original_data_type is not None:
ctx.bot.guild_settings[ctx.guild.id].setdefault(cache_key, original_data_type())
# Cache the converted value
if value:
ctx.bot.guild_settings[ctx.guild.id][cache_key][role.id] = serialize_function(original_value)
else:
if role.id not in ctx.bot.guild_settings[ctx.guild.id][cache_key]:
ctx.bot.guild_settings[ctx.guild.id][cache_key].append(role.id)
return callback
return wrapper
class SettingsMenu:
"""
A settings menu object for setting up sub-menus or bot settings using reactions.
Each menu object must be added as its own command, with sub-menus being
referred to by string in the MenuItem's action.
Examples:
::
# We can pull out the settings menu mention method so that we can more easily refer to it in our lambdas.
settings_mention = vbu.menus.SettingsMenuOption.get_guild_settings_mention
# Make an initial menu.
menu = vbu.menus.SettingsMenu()
# And now we add some options.
menu.add_multiple_options(
# Every option that's added needs to be an instance of SettingsMenuOption.
vbu.menus.SettingsMenuOption(
# The first argument is the context, always.
ctx=ctx,
# Display is either a string, or a function that takes context as an argument to *return*
# a string.
display=lambda c: "Set quote channel (currently {0})".format(settings_mention(c, 'quote_channel_id')),
# Converter args should be a list of SettingsMenuConverter options if present. These are the questions
# asked to the user to get the relevant information out of them.
converter_args=(
vbu.menus.SettingsMenuConverter(
# Prompt is what's asked to the user
prompt="What do you want to set the quote channel to?",
# Converter is either a converter or a function that's run to convert the given argument.
converter=commands.TextChannelConverter,
),
),
# Callback is a function that's run with the converted information to store the data in a database
# or otherwise.
callback=vbu.menus.SettingsMenuOption.get_set_guild_settings_callback('guild_settings', 'quote_channel_id'),
),
# This is an option that calls a subcommand, also running some SettingsMenu code.
vbu.menus.SettingsMenuOption(
ctx=ctx,
display="Set up VC max members",
callback=self.bot.get_command("setup vcmaxmembers"),
),
)
# And now we can run the menu
try:
await menu.start(ctx)
await ctx.send("Done setting up!")
except voxelbotutils.errors.InvokedMetaCommand:
pass
"""
TICK_EMOJI = "\N{HEAVY CHECK MARK}"
PLUS_EMOJI = "\N{HEAVY PLUS SIGN}"
def add_option(self, option: SettingsMenuOption):
"""
Add an option to the settings list.
Args:
option (SettingsMenuOption): The option that you want to add to the menu.
"""
self.options.append(option)
def add_multiple_options(self, *option: SettingsMenuOption):
"""
Add multiple options to the settings list at once.
Args:
*option (SettingsMenuOption): A list of options that you want to add to the menu.
"""
self.options.extend(option)
async def start(self, ctx: commands.Context, *, timeout: float = 120):
"""
Starts the menu running.
Args:
ctx (commands.Context): The context object for the called command.
timeout (float, optional): How long the bot should wait for a reaction.
"""
message = None
while True:
# Send message
self.emoji_options.clear()
data, emoji_list = self.get_sendable_data(ctx)
if message is None:
message = await ctx.send(**data)
else:
await message.edit(**data)
# Get the reaction
try:
payload = await ctx.bot.wait_for("component_interaction", check=check, timeout=timeout)
await payload.response.defer_update()
except asyncio.TimeoutError:
break
picked_emoji = str(payload.component.custom_id)
# Get the picked option
try:
picked_option = self.emoji_options[picked_emoji]
except KeyError:
continue
# Process the picked option
if picked_option is None:
break
try:
await picked_option.perform_action()
except SettingsMenuError:
pass
# Delete all the processing stuff
try:
await message.delete()
except (discord.NotFound, discord.Forbidden, discord.HTTPException):
pass
def get_sendable_data(
self,
ctx: commands.Context
) -> typing.Tuple[dict, typing.List[str]]:
"""
Get a valid set of sendable data for the destination.
Args:
ctx (commands.Context): Just so we can set the invoke meta flag.
Returns:
Tuple[dict, List[str]]: A tuple of the sendable data for the destination that
can be unpacked into a `discord.abc.Messageable.send`, and a list of emoji
to add to the message in question.
"""
ctx.invoke_meta = True
# Create embed
embed = discord.Embed()
lines = []
emoji_list = []
index = 0
for index, i in enumerate(self.options):
emoji = i.emoji
if emoji is None:
emoji = f"{index}"
index += 1
display = i.get_display()
if display:
lines.append(f"{emoji}) {i.get_display()}")
self.emoji_options[emoji] = i
emoji_list.append(emoji)
# Finish embed
text_lines = '\n'.join(lines)
embed.description = text_lines or "No set data"
# Add tick
self.emoji_options[self.TICK_EMOJI] = None
emoji_list.append(self.TICK_EMOJI)
buttons = [
discord.ui.Button(emoji=i, custom_id=i)
for i in emoji_list
]
buttons += [
discord.ui.Button(label="Done", custom_id="done", style=discord.ui.ButtonStyle.success)
]
components = discord.ui.MessageComponents.add_buttons_with_rows(*buttons)
# Return data
return {'embed': embed, "components": components}, emoji_list
class SettingsMenuIterable(SettingsMenu):
"""
A version of the settings menu for dealing with things like lists and dictionaries.
"""
def __init__(
self,
table_name: str,
column_name: str,
cache_key: str,
database_key: str,
key_display_function: typing.Callable[[typing.Any], str],
value_display_function: typing.Callable[[typing.Any], str] = str,
converters: typing.List[SettingsMenuConverter] = None, *,
iterable_add_callback: typing.Callable[['SettingsMenu', commands.Context], None] = None,
iterable_delete_callback: typing.Callable[['SettingsMenu', commands.Context, int], None] = None,
):
"""
Args:
table_name (str): The name of the table that the data should be inserted into.
column_name (str): The column name for the table where the key should be inserted to.
cache_key (str): The key that goes into `bot.guild_settings` to get to the cached iterable.
database_key (str): The key that would be inserted into the default `role_list` or `channel_list`
tables. If you're not using this field then this will probably be pretty useless to you.
key_display_function (typing.Callable[[typing.Any], str]): A function used to take the raw data from the key and
change it into a display value.
value_display_function (typing.Callable[[typing.Any], str], optional): The function used to take the saved raw value
from the database and nicely show it to the user in the embed.
converters (typing.List[SettingsMenuConverter], optional): A list of the converters that should be used for the user to provide
their new values for the menu.
iterable_add_callback (typing.Callable[['SettingsMenu', commands.Context], None], optional): A function that's run with the
params of the database name, the column name, the cache key, the database key, and the value serialize function.
If left blank then it defaults to making a new callback for you that just adds to the `role_list` or `channel_list`
table as specified. These methods are only directly compatible with lists and dictionaries - nothing that requires multiple
arguments to be saved in a database; for those you will need to write your own method.
iterable_delete_callback (typing.Callable[['SettingsMenu', commands.Context, int], None], optional): A function that's run
with the params of the database name, the column name, the item to be deleted, the cache key, and the database key.
If left blank then it defaults to making a new callback for you that just deletes from the `role_list` or `channel_list`
table as specified. These methods are only directly compatible with lists and dictionaries - nothing that requires multiple
arguments to be saved in a database; for those you will need to write your own method.
"""
try:
if converters or not converters[0]:
pass
except Exception as e:
raise ValueError("You need to provide at least one converter.") from e
super().__init__()
# Set up the storage data
self.table_name = table_name
self.column_name = column_name
self.cache_key = cache_key
self.database_key = database_key
# Converters
self.key_display_function = key_display_function
self.value_display_function = value_display_function
self.converters = converters
# Add callback
self.iterable_add_callback = iterable_add_callback or SettingsMenuOption.get_set_iterable_add_callback(
table_name=table_name,
column_name=column_name,
cache_key=cache_key,
database_key=database_key,
serialize_function=str if len(self.converters) == 1 else self.converters[1].serialize,
original_data_type=list if len(self.converters) == 1 else dict,
)
# This default returns an async function which takes the content of the converted values which adds to the db.
# Callable[
# [SettingsMenu, commands.Context],
# Callable[
# [SettingsMenu, *typing.Any],
# None
# ]
# ]
# Delete callback
self.iterable_delete_callback = iterable_delete_callback or SettingsMenuOption.get_set_iterable_delete_callback(
table_name=table_name,
column_name=column_name,
cache_key=cache_key,
database_key=database_key,
)
# This default returns an async function which takes the content of the converted values which removes from the db.
# Callable[
# [SettingsMenu, commands.Context, int],
# Callable[
# [SettingsMenu],
# None
# ]
# ]
| 42.033746 | 149 | 0.597838 | from __future__ import annotations
import asyncio
import typing
import discord
from discord.ext import commands
from .errors import InvokedMetaCommand
def do_nothing(value):
return value
class SettingsMenuError(commands.CommandError):
pass
class SettingsMenuConverter(object):
__slots__ = ('prompt', 'asking_for', 'converter', 'emojis', 'serialize')
def __init__(
self, prompt: str, converter: typing.Union[typing.Callable, commands.Converter], asking_for: str = 'item',
emojis: typing.Optional[typing.List[discord.Emoji]] = None,
serialize_function: typing.Callable[[typing.Any], typing.Any] = lambda x: x,
):
self.prompt = prompt
self.asking_for = asking_for
self.converter = converter
self.emojis = emojis or list()
self.serialize = serialize_function
class SettingsMenuOption(object):
"""
An option that can be chosen for a settings menu's selectable item,
eg an option that refers to a sub-menu, or a setting that refers to grabbing
a role list, etc.
"""
__slots__ = ('context', '_display', 'converter_args', 'callback', 'emoji', 'allow_nullable',)
def __init__(
self,
ctx: commands.Context,
display: typing.Union[str, typing.Callable[[commands.Context], str]],
converter_args: typing.List[SettingsMenuConverter] = None,
callback: typing.Callable[['SettingsMenuOption', typing.List[typing.Any]], None] = lambda x: None,
emoji: str = None,
allow_nullable: bool = True,
):
"""
Args:
ctx (commands.Context): The context for which the menu is being invoked.
display (Union[str, Callable[[commands.Context], str]]): A string (or callable that returns string) that gives the
display prompt for the option.
converter_args (List[SettingsMenuConverter], optional): A list of converter arguments that should be used to
convert the user-provided arguments. Tuples are passed directly into `convert_prompted_information`.
callback (Callable[['SettingsMenuOption', List[Any]], None], optional): A callable that's passed the
information from the converter for you do to whatever with.
emoji (str, optional): The emoji that this option refers to.
allow_nullable (bool, optional): Whether or not this option is allowed to return None.
"""
self.context: commands.Context = ctx
self._display: typing.Union[str, typing.Callable[[commands.Context], str]] = display
self.converter_args: typing.List[SettingsMenuConverter] = converter_args or list()
self.callback: typing.Callable[['SettingsMenuOption', typing.List[typing.Any]], None] = callback
self.allow_nullable: bool = allow_nullable
def get_display(self) -> str:
"""
Get the display prompt for this option.
Returns:
str: The string to be displayed
"""
if isinstance(self._display, str):
return self._display
return self._display(self.context)
async def perform_action(self) -> None:
"""
Runs through the converters before calling the instance's callback method with the converted data.
"""
# Get data
returned_data = []
for arg in self.converter_args:
try:
data = await self.convert_prompted_information(arg.prompt, arg.asking_for, arg.converter, arg.emojis)
except SettingsMenuError as e:
if not self.allow_nullable:
raise e
data = None
returned_data.append(data)
if data is None:
break
# Do callback
if isinstance(self.callback, commands.Command):
await self.callback.invoke(self.context)
elif isinstance(self.callback, SettingsMenu):
await self.callback.start(self.context)
else:
called_data = self.callback(self, *returned_data)
if asyncio.iscoroutine(called_data):
await called_data
async def convert_prompted_information(
self,
prompt: str,
asking_for: str,
converter: commands.Converter,
reactions: typing.List[discord.Emoji] = None,
) -> typing.Any:
"""
Ask the user for some information, convert said information, and then return that converted value.
Args:
prompt (str): The text that we sent to the user -- something along the lines of "what
channel do you want to use" etc.
asking_for (str): Say what we're looking for them to send - doesn't need to be anything important,
it just goes to the timeout message.
converter (commands.Converter): The converter used to work out what to change the given user value to.
reactions (typing.List[discord.Emoji], optional): The reactions that should be added to the prompt
message. If provided then the content of the added reaction is thrown into the converter instead.
Returns:
typing.Any: The converted information.
Raises:
InvokedMetaCommand: If converting the information timed out, raise this error to signal to
the menu that we should exit.
SettingsMenuError: If the converting failed for some other reason.
"""
# Send prompt
sendable: typing.Dict[str, typing.Any] = {"content": prompt}
if reactions:
x = discord.ui.MessageComponents.add_buttons_with_rows(*[
discord.ui.Button(emoji=i, custom_id=str(i))
for i in reactions
])
sendable["components"] = x
bot_message = await self.context.send(**sendable)
# Wait for a response from the user
user_message = None
try:
if reactions:
def check(payload: discord.Interaction):
return all([
payload.message.id == bot_message.id,
payload.user.id == self.context.author.id,
])
payload = await self.context.bot.wait_for("component_interaction", timeout=120, check=check)
await payload.response.defer_update()
content = str(payload.component.custom_id)
else:
def check(message: discord.Message):
return all([
message.channel.id == self.context.channel.id,
message.author.id == self.context.author.id,
])
user_message = await self.context.bot.wait_for("message", timeout=120, check=check)
content = user_message.content
except asyncio.TimeoutError:
await self.context.send(f"Timed out asking for {asking_for}.")
raise InvokedMetaCommand()
# Run converter
conversion_failed = False
value = None
if hasattr(converter, 'convert'):
try:
converter = converter()
except TypeError:
pass
try:
value = await converter.convert(self.context, content)
except commands.CommandError:
conversion_failed = True
else:
try:
value = converter(content)
except Exception:
conversion_failed = True
# Delete prompt messages
try:
await bot_message.delete()
except discord.NotFound:
pass
try:
await user_message.delete()
except (discord.Forbidden, discord.NotFound, AttributeError):
pass
# Check conversion didn't fail
if conversion_failed:
raise SettingsMenuError()
# Return converted value
return value
@classmethod
def get_guild_settings_mention(
cls,
ctx: commands.Context,
attr: str,
default: str = 'none',
) -> str:
"""
Get an item from the cached `Bot.guild_settings` object for the running guild and return
either it's mention string, or the `default` arg.
Args:
ctx (commands.Context): The context for the command.
attr (str): The attribute we want to mention.
default (str, optional): If not found, what should the default be.
Returns:
str: The mention string.
"""
settings = ctx.bot.guild_settings[ctx.guild.id]
return cls.get_settings_mention(ctx, settings, attr, default)
@classmethod
def get_user_settings_mention(
cls,
ctx: commands.Context,
attr: str,
default: str = 'none',
) -> str:
"""
Get an item from the cached `Bot.user_settings` object for the running user and return
either it's mention string, or the `default` arg.
Args:
ctx (commands.Context): The context for the command.
attr (str): The attribute we want to mention.
default (str, optional): If not found, what should the default be.
Returns:
str: The mention string.
"""
settings = ctx.bot.user_settings[ctx.author.id]
return cls.get_settings_mention(ctx, settings, attr, default)
@classmethod
def get_settings_mention(
cls,
ctx: commands.Context,
settings: dict,
attr: str,
default: str = 'none',
) -> str:
"""
Get an item from the bot's settings.
:meta private:
Args:
ctx (commands.Context): The context for the command.
settings (dict): The dictionary with the settings in it that we want to grab.
attr (str): The attribute we want to mention.
default (str, optional): If not found, what should the default be.
Returns:
str: The mention string.
"""
# Run converters
if 'channel' in attr.lower().split('_'):
data = ctx.bot.get_channel(settings[attr])
elif 'role' in attr.lower().split('_'):
data = ctx.guild.get_role(settings[attr])
else:
data = settings[attr]
if isinstance(data, bool):
return str(data).lower()
return data
# Get mention
return cls.get_mention(data, default)
@staticmethod
def get_mention(
data: typing.Union[discord.abc.GuildChannel, discord.Role, None],
default: str,
) -> str:
"""
Get the mention of an object.
:meta private:
Args:
data (typing.Union[discord.abc.GuildChannel, discord.Role, None]): The object we want to mention.
default (str): The default string that should be output if we can't mention the object.
Returns:
str: The mention string.
"""
mention = data.mention if data else default
return mention
@classmethod
def get_set_guild_settings_callback(
cls,
table_name: str,
column_name: str,
serialize_function: typing.Callable[[typing.Any], typing.Any] = None,
) -> typing.Callable[[typing.Any], None]:
"""
Return an async method that takes the data given by `convert_prompted_information`, then
saves it into the database - should be used in the SettingsMenu init.
:meta private:
Args:
table_name (str): The name of the table the data should be inserted into. This is
not used when caching information. This should NOT be a user supplied value.
column_name (str): The name of the column that the data should be inserted into.
This is the same name that's used for caching. This should NOT be a user supplied value.
serialize_function (typing.Callable[[typing.Any], typing.Any], optional): The function that is called to convert the
input data in the callback into a database-friendly value. This is *not* called for caching the value, only
for databasing. The default serialize function doesn't do anything, but is provided so you don't have to provide
one yourself.
Returns:
typing.Callable[[typing.Any], None]: A callable function that sets the guild settings when provided with data
"""
if serialize_function is None:
serialize_function = do_nothing
return cls.get_set_settings_callback(table_name, "guild_id", column_name, serialize_function)
@classmethod
def get_set_user_settings_callback(
cls,
table_name: str,
column_name: str,
serialize_function: typing.Callable[[typing.Any], typing.Any] = None,
) -> typing.Callable[[dict], None]:
"""
Return an async method that takes the data given by `convert_prompted_information`, then
saves it into the database - should be used in the SettingsMenu init.
:meta private:
Args:
table_name (str): The name of the table the data should be inserted into. This is not used when caching information.
This should NOT be a user supplied value.
column_name (str): The name of the column that the data should be inserted into. This is the same name that's used for
caching the value. This should NOT be a user supplied value.
serialize_function (typing.Callable[[typing.Any], typing.Any], optional): The function that is called to convert the input data
in the callback into a database-friendly value. This is *not* called for caching the value, only for databasing. The default
serialize function doesn't do anything, but is provided so you don't have to provide one yourself.
Returns:
typing.Callable[[dict], None]: A callable function that sets the user settings when provided with data
"""
if serialize_function is None:
serialize_function = do_nothing
return cls.get_set_settings_callback(table_name, "user_id", column_name, serialize_function)
@staticmethod
def get_set_settings_callback(
table_name: str,
primary_key: str,
column_name: str,
serialize_function: typing.Callable[[typing.Any], typing.Any] = None
) -> typing.Callable[[dict], None]:
"""
Return an async method that takes the data given by `convert_prompted_information`, then
saves it into the database - should be used in the SettingsMenu init.
Args:
table_name (str): The name of the table the data should be inserted into. This is not used when caching information.
This should NOT be a user supplied value.
primary_key (str): The primary key of the table that you want to insert to. This *only* supports single primary keys and not
compound ones.
column_name (str): The name of the column that the data should be inserted into. This is the same name that's used for
caching the value. This should NOT be a user supplied value.
serialize_function (typing.Callable[[typing.Any], typing.Any], optional): The function that is called to convert the input data
in the callback into a database-friendly value. This is *not* called for caching the value, only for databasing. The default
serialize function doesn't do anything, but is provided so you don't have to provide one yourself.
Returns:
typing.Callable[[dict], None]: A callable function that sets the user settings when provided with data
"""
async def callback(self, data):
"""
The function that actually sets the data in the specified table in the database.
Any input to this function should be a direct converted value from `convert_prompted_information`.
If the input is a discord.Role or discord.TextChannel, it is automatcally converted to that value's ID,
which is then put into the datbase and cache.
"""
# See if we need to get the object's ID
if isinstance(data, (discord.Role, discord.TextChannel, discord.User, discord.Member, discord.Object, discord.CategoryChannel)):
data = data.id
# Serialize via the passed serialize function
original_data, data = data, serialize_function(data)
# Add to the database
async with self.context.bot.database() as db:
await db(
"INSERT INTO {0} ({1}, {2}) VALUES ($1, $2) ON CONFLICT ({1}) DO UPDATE SET {2}=$2".format(table_name, primary_key, column_name),
self.context.guild.id, data,
)
# Cache
self.context.bot.guild_settings[self.context.guild.id][column_name] = original_data
# Return the callback
return callback
@staticmethod
def get_set_iterable_delete_callback(
table_name: str,
column_name: str,
cache_key: str,
database_key: str,
) -> typing.Callable[[SettingsMenu, commands.Context, int], None]:
"""
Return an async method that takes the data retuend by `convert_prompted_information` and then
saves it into the database - should be used for the SettingsMenu init.
Args:
table_name (str): The name of the database that you want to remove data from.
column_name (str): The column name that the key is inserted into in the table.
cache_key (str): The key that's used to access the cached value for the iterable in `bot.guilds_settings`.
database_key (str): The key that's used to refer to the role ID in the `role_list` table.
Returns:
Callable[[SettingsMenu, commands.Context, int], None]: A callable for `SettingsMenuIterable` objects to use.
"""
def wrapper(menu, ctx, delete_key: int):
"""
A sync wrapper so that we can return an async callback that deletes from the database.
"""
async def callback(menu):
"""
The function that actually deletes the role from the database
Any input to this function will be silently discarded, since the actual input to this function is defined
in the callback definition.
"""
# Database it
async with ctx.bot.database() as db:
await db(
"DELETE FROM {0} WHERE guild_id=$1 AND {1}=$2 AND key=$3".format(table_name, column_name),
ctx.guild.id, delete_key, database_key
)
# Remove the converted value from cache
try:
ctx.bot.guild_settings[ctx.guild.id][cache_key].remove(delete_key)
except AttributeError:
ctx.bot.guild_settings[ctx.guild.id][cache_key].pop(delete_key)
return callback
return wrapper
@staticmethod
def get_set_iterable_add_callback(
table_name: str, column_name: str, cache_key: str, database_key: str,
serialize_function: typing.Callable[[typing.Any], str] = None,
original_data_type: type = None,
) -> typing.Callable[['SettingsMenu', commands.Context], None]:
"""
Return an async method that takes the data retuend by `convert_prompted_information` and then
saves it into the database - should be used for the SettingsMenu init. This particular iterable
can only deal with one convertable datapoint (for a list) or two (for a mapping). Any more than that
and you will need to provide your own callback.
Args:
table_name (str): The name of the database that you want to add data to.
column_name (str): The column name that the key is inserted into in the table.
cache_key (str): This is the key that's used when caching the value in `bot.guild_settings`.
database_key (str): This is the key that the value is added to the database table `role_list`.
serialize_function (Callable[[Any], str], optional): The function run on the value to convert it
into to make it database-safe. Values are automatically cast to strings after being run through the serialize function.
The serialize_function is called when caching the value, but the cached value is not cast to a string. The default
serialize function doesn't do anything, but is provided so you don't have to provide one yourself.
Returns:
Callable[[SettingsMenu, commands.Context], Callable[[SettingsMenu, Any, Optional[Any]], None]]:
A callable for `SettingsMenuIterable` objects to use.
"""
if serialize_function is None:
serialize_function = do_nothing
def wrapper(menu, ctx):
"""
A sync wrapper so that we can return an async callback that deletes from the database.
"""
async def callback(menu, *data):
"""
The function that actually adds the role to the table in the database
Any input to this function will be direct outputs from perform_action's convert_prompted_information
This is a function that creates a callback, so the expectation of `data` in this instance is that data is either
a list of one item for a listing, eg [role_id], or a list of two items for a mapping, eg [role_id, value]
"""
# Unpack the data
try:
role, original_value = data
value = str(serialize_function(original_value))
except ValueError:
role, value = data[0], None
# Database it
async with ctx.bot.database() as db:
await db(
"""INSERT INTO {0} (guild_id, {1}, key, value) VALUES ($1, $2, $3, $4)
ON CONFLICT (guild_id, {1}, key) DO UPDATE SET value=excluded.value""".format(table_name, column_name),
ctx.guild.id, role.id, database_key, value
)
# Set the original value for the cache
if original_data_type is not None:
ctx.bot.guild_settings[ctx.guild.id].setdefault(cache_key, original_data_type())
# Cache the converted value
if value:
ctx.bot.guild_settings[ctx.guild.id][cache_key][role.id] = serialize_function(original_value)
else:
if role.id not in ctx.bot.guild_settings[ctx.guild.id][cache_key]:
ctx.bot.guild_settings[ctx.guild.id][cache_key].append(role.id)
return callback
return wrapper
class SettingsMenu:
"""
A settings menu object for setting up sub-menus or bot settings using reactions.
Each menu object must be added as its own command, with sub-menus being
referred to by string in the MenuItem's action.
Examples:
::
# We can pull out the settings menu mention method so that we can more easily refer to it in our lambdas.
settings_mention = vbu.menus.SettingsMenuOption.get_guild_settings_mention
# Make an initial menu.
menu = vbu.menus.SettingsMenu()
# And now we add some options.
menu.add_multiple_options(
# Every option that's added needs to be an instance of SettingsMenuOption.
vbu.menus.SettingsMenuOption(
# The first argument is the context, always.
ctx=ctx,
# Display is either a string, or a function that takes context as an argument to *return*
# a string.
display=lambda c: "Set quote channel (currently {0})".format(settings_mention(c, 'quote_channel_id')),
# Converter args should be a list of SettingsMenuConverter options if present. These are the questions
# asked to the user to get the relevant information out of them.
converter_args=(
vbu.menus.SettingsMenuConverter(
# Prompt is what's asked to the user
prompt="What do you want to set the quote channel to?",
# Converter is either a converter or a function that's run to convert the given argument.
converter=commands.TextChannelConverter,
),
),
# Callback is a function that's run with the converted information to store the data in a database
# or otherwise.
callback=vbu.menus.SettingsMenuOption.get_set_guild_settings_callback('guild_settings', 'quote_channel_id'),
),
# This is an option that calls a subcommand, also running some SettingsMenu code.
vbu.menus.SettingsMenuOption(
ctx=ctx,
display="Set up VC max members",
callback=self.bot.get_command("setup vcmaxmembers"),
),
)
# And now we can run the menu
try:
await menu.start(ctx)
await ctx.send("Done setting up!")
except voxelbotutils.errors.InvokedMetaCommand:
pass
"""
TICK_EMOJI = "\N{HEAVY CHECK MARK}"
PLUS_EMOJI = "\N{HEAVY PLUS SIGN}"
def __init__(self):
self.options: typing.List[SettingsMenuOption] = list()
self.emoji_options: typing.Dict[str, SettingsMenuOption] = {}
def add_option(self, option: SettingsMenuOption):
"""
Add an option to the settings list.
Args:
option (SettingsMenuOption): The option that you want to add to the menu.
"""
self.options.append(option)
def add_multiple_options(self, *option: SettingsMenuOption):
"""
Add multiple options to the settings list at once.
Args:
*option (SettingsMenuOption): A list of options that you want to add to the menu.
"""
self.options.extend(option)
async def start(self, ctx: commands.Context, *, timeout: float = 120):
"""
Starts the menu running.
Args:
ctx (commands.Context): The context object for the called command.
timeout (float, optional): How long the bot should wait for a reaction.
"""
message = None
while True:
# Send message
self.emoji_options.clear()
data, emoji_list = self.get_sendable_data(ctx)
if message is None:
message = await ctx.send(**data)
else:
await message.edit(**data)
# Get the reaction
try:
def check(payload):
return all([
payload.message.id == message.id,
payload.user.id == ctx.author.id,
])
payload = await ctx.bot.wait_for("component_interaction", check=check, timeout=timeout)
await payload.response.defer_update()
except asyncio.TimeoutError:
break
picked_emoji = str(payload.component.custom_id)
# Get the picked option
try:
picked_option = self.emoji_options[picked_emoji]
except KeyError:
continue
# Process the picked option
if picked_option is None:
break
try:
await picked_option.perform_action()
except SettingsMenuError:
pass
# Delete all the processing stuff
try:
await message.delete()
except (discord.NotFound, discord.Forbidden, discord.HTTPException):
pass
def get_sendable_data(
self,
ctx: commands.Context
) -> typing.Tuple[dict, typing.List[str]]:
"""
Get a valid set of sendable data for the destination.
Args:
ctx (commands.Context): Just so we can set the invoke meta flag.
Returns:
Tuple[dict, List[str]]: A tuple of the sendable data for the destination that
can be unpacked into a `discord.abc.Messageable.send`, and a list of emoji
to add to the message in question.
"""
ctx.invoke_meta = True
# Create embed
embed = discord.Embed()
lines = []
emoji_list = []
index = 0
for index, i in enumerate(self.options):
emoji = i.emoji
if emoji is None:
emoji = f"{index}"
index += 1
display = i.get_display()
if display:
lines.append(f"{emoji}) {i.get_display()}")
self.emoji_options[emoji] = i
emoji_list.append(emoji)
# Finish embed
text_lines = '\n'.join(lines)
embed.description = text_lines or "No set data"
# Add tick
self.emoji_options[self.TICK_EMOJI] = None
emoji_list.append(self.TICK_EMOJI)
buttons = [
discord.ui.Button(emoji=i, custom_id=i)
for i in emoji_list
]
buttons += [
discord.ui.Button(label="Done", custom_id="done", style=discord.ui.ButtonStyle.success)
]
components = discord.ui.MessageComponents.add_buttons_with_rows(*buttons)
# Return data
return {'embed': embed, "components": components}, emoji_list
class SettingsMenuIterable(SettingsMenu):
"""
A version of the settings menu for dealing with things like lists and dictionaries.
"""
def __init__(
self,
table_name: str,
column_name: str,
cache_key: str,
database_key: str,
key_display_function: typing.Callable[[typing.Any], str],
value_display_function: typing.Callable[[typing.Any], str] = str,
converters: typing.List[SettingsMenuConverter] = None, *,
iterable_add_callback: typing.Callable[['SettingsMenu', commands.Context], None] = None,
iterable_delete_callback: typing.Callable[['SettingsMenu', commands.Context, int], None] = None,
):
"""
Args:
table_name (str): The name of the table that the data should be inserted into.
column_name (str): The column name for the table where the key should be inserted to.
cache_key (str): The key that goes into `bot.guild_settings` to get to the cached iterable.
database_key (str): The key that would be inserted into the default `role_list` or `channel_list`
tables. If you're not using this field then this will probably be pretty useless to you.
key_display_function (typing.Callable[[typing.Any], str]): A function used to take the raw data from the key and
change it into a display value.
value_display_function (typing.Callable[[typing.Any], str], optional): The function used to take the saved raw value
from the database and nicely show it to the user in the embed.
converters (typing.List[SettingsMenuConverter], optional): A list of the converters that should be used for the user to provide
their new values for the menu.
iterable_add_callback (typing.Callable[['SettingsMenu', commands.Context], None], optional): A function that's run with the
params of the database name, the column name, the cache key, the database key, and the value serialize function.
If left blank then it defaults to making a new callback for you that just adds to the `role_list` or `channel_list`
table as specified. These methods are only directly compatible with lists and dictionaries - nothing that requires multiple
arguments to be saved in a database; for those you will need to write your own method.
iterable_delete_callback (typing.Callable[['SettingsMenu', commands.Context, int], None], optional): A function that's run
with the params of the database name, the column name, the item to be deleted, the cache key, and the database key.
If left blank then it defaults to making a new callback for you that just deletes from the `role_list` or `channel_list`
table as specified. These methods are only directly compatible with lists and dictionaries - nothing that requires multiple
arguments to be saved in a database; for those you will need to write your own method.
"""
try:
if converters or not converters[0]:
pass
except Exception as e:
raise ValueError("You need to provide at least one converter.") from e
super().__init__()
# Set up the storage data
self.table_name = table_name
self.column_name = column_name
self.cache_key = cache_key
self.database_key = database_key
# Converters
self.key_display_function = key_display_function
self.value_display_function = value_display_function
self.converters = converters
# Add callback
self.iterable_add_callback = iterable_add_callback or SettingsMenuOption.get_set_iterable_add_callback(
table_name=table_name,
column_name=column_name,
cache_key=cache_key,
database_key=database_key,
serialize_function=str if len(self.converters) == 1 else self.converters[1].serialize,
original_data_type=list if len(self.converters) == 1 else dict,
)
# This default returns an async function which takes the content of the converted values which adds to the db.
# Callable[
# [SettingsMenu, commands.Context],
# Callable[
# [SettingsMenu, *typing.Any],
# None
# ]
# ]
# Delete callback
self.iterable_delete_callback = iterable_delete_callback or SettingsMenuOption.get_set_iterable_delete_callback(
table_name=table_name,
column_name=column_name,
cache_key=cache_key,
database_key=database_key,
)
# This default returns an async function which takes the content of the converted values which removes from the db.
# Callable[
# [SettingsMenu, commands.Context, int],
# Callable[
# [SettingsMenu],
# None
# ]
# ]
def get_sendable_data(self, ctx: commands.Context):
# Get the current data
data_points = ctx.bot.guild_settings[ctx.guild.id][self.cache_key]
# See what our display function should be
if isinstance(data_points, dict):
display_function = lambda i, o: f"{self.key_display_function(i)} - {self.value_display_function(o)!s}"
corrected_data_points = data_points.items()
elif isinstance(data_points, list):
display_function = lambda i, _: self.key_display_function(i)
corrected_data_points = [(i, _) for _, i in enumerate(data_points)]
else:
raise ValueError("Invalid cache type from database to use in an iterable.")
# Delete buttons
self.options = [
SettingsMenuOption(
ctx=ctx,
display=display_function(i, o),
converter_args=(),
callback=self.iterable_delete_callback(self, ctx, i),
allow_nullable=False,
)
for i, o in corrected_data_points
]
# Add more buttons
# TODO add pagination so that users can add more than 10 options
if len(self.options) < 10:
self.options.append(
SettingsMenuOption(
ctx=ctx,
display="",
converter_args=self.converters,
callback=self.iterable_add_callback(self, ctx),
emoji=self.PLUS_EMOJI,
allow_nullable=False,
)
)
# Generate the data as normal
return super().get_sendable_data(ctx)
| 2,862 | 155 | 237 |
2a73e47208d7cfb4b2be916d6afe55646fd23c20 | 24,128 | py | Python | src/main/resources/ispw/ISPWClient.py | xebialabs-external/compuware-xlr-ispw-plugin | a4d64d6d4b3c64f8974cff4706d860f6cc33bb8f | [
"MIT"
] | null | null | null | src/main/resources/ispw/ISPWClient.py | xebialabs-external/compuware-xlr-ispw-plugin | a4d64d6d4b3c64f8974cff4706d860f6cc33bb8f | [
"MIT"
] | 7 | 2017-06-13T15:16:41.000Z | 2018-08-13T05:29:46.000Z | src/main/resources/ispw/ISPWClient.py | xebialabs-external/compuware-xlr-ispw-plugin | a4d64d6d4b3c64f8974cff4706d860f6cc33bb8f | [
"MIT"
] | 2 | 2018-04-12T18:40:32.000Z | 2018-11-01T15:13:24.000Z | #
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import json
from ispw.AssignmentClient import AssignmentClient
from ispw.ReleaseClient import ReleaseClient
from ispw.SetClient import SetClient
from ispw.TestConnectionClient import TestConnectionClient
| 70.756598 | 462 | 0.505305 | #
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import json
from ispw.AssignmentClient import AssignmentClient
from ispw.ReleaseClient import ReleaseClient
from ispw.SetClient import SetClient
from ispw.TestConnectionClient import TestConnectionClient
class ISPWClient(object):
def __init__(self, http_connection, ces_token=None):
self.set_client = SetClient(http_connection, ces_token)
self.release_client = ReleaseClient(http_connection, ces_token)
self.assignment_client = AssignmentClient(http_connection, ces_token)
self.test_connection_client = TestConnectionClient(http_connection)
@staticmethod
def create_client(http_connection, ces_token=None):
return ISPWClient(http_connection, ces_token)
def ispwservices_createassignment(self, variables):
result = self.assignment_client.create_assignment(srid=variables['srid'], stream=variables['stream'],
application=variables['application'],
default_path=variables['defaultPath'],
description=variables['description'],
owner=variables['owner'],
assignment_prefix=variables['assignmentPrefix'],
reference_number=variables['referenceNumber'],
release_id=variables['relId'], user_tag=variables['userTag'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['assignmentId'] = result["assignmentId"]
variables['url'] = result["url"]
def ispwservices_loadtask(self, variables):
result = self.assignment_client.load_task(srid=variables['srid'], assignment_id=variables['assignmentId'],
stream=variables['stream'],
application=variables['application'],
module_name=variables['moduleName'],
module_type=variables['moduleType'],
current_level=variables['currentLevel'],
starting_level=variables['startingLevel'],
generate_sequence=variables['generateSequence'],
sql=variables['sql'], ims=variables['ims'],
cics=variables['cics'], program=variables['program'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
for key, value in result.iteritems():
variables[key] = value
def ispwservices_getassignmentinformation(self, variables):
result = self.assignment_client.get_assignment_information(srid=variables['srid'], assignment_id=variables['assignmentId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
for key, value in result.iteritems():
variables[key] = value
def ispwservices_getassignmenttasklist(self, variables):
result = self.assignment_client.get_assignment_task_list(srid=variables['srid'], assignment_id=variables['assignmentId'],
level=variables['level'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
processed_result = {}
for item in result["tasks"]:
task_id = item['taskId']
processed_result[task_id] = json.dumps(item)
variables['tasks'] = processed_result
def ispwservices_getassignmenttaskinformation(self, variables):
result = self.assignment_client.get_assignment_task_information(srid=variables['srid'], assignment_id=variables['assignmentId'],
task_id=variables['taskId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
for key, value in result.iteritems():
if key == "taskId":
variables["taskOutputId"] = value
elif key == "type":
variables["taskType"] = value
else:
variables[key] = value
def ispwservices_generatetasksinassignment(self, variables):
result = self.assignment_client.generate_tasks_in_assignment(srid=variables['srid'], assignment_id=variables['assignmentId'],
level=variables['level'],
runtime_configuration=variables['runtimeConfiguration'],
auto_deploy=variables['autoDeploy'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_promoteassignment(self, variables):
result = self.assignment_client.promote_assignment(srid=variables['srid'], assignment_id=variables['assignmentId'],
level=variables['level'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
override=variables['override'],
auto_deploy=variables['autoDeploy'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_deployassignment(self, variables):
result = self.assignment_client.deploy_assignment(srid=variables['srid'], assignment_id=variables['assignmentId'],
level=variables['level'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
dpenvlst=variables['dpenvlst'],
system=variables['system'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_regressassignment(self, variables):
result = self.assignment_client.regress_assignment(srid=variables['srid'], assignment_id=variables['assignmentId'],
level=variables['level'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_createrelease(self, variables):
result = self.release_client.create_release(srid=variables['srid'], application=variables['application'],
stream=variables['stream'],
description=variables['description'], release_id=variables['relId'],
release_prefix=variables['relPrefix'],
owner=variables['owner'],
reference_number=variables['referenceNumber'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['relOutputId'] = result["releaseId"]
variables['url'] = result["url"]
def ispwservices_getreleaseinformation(self, variables):
result = self.release_client.get_release_information(srid=variables['srid'], release_id=variables['relId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['relOutputId'] = result["releaseId"]
variables['application'] = result["application"]
variables['stream'] = result["stream"]
variables['description'] = result["description"]
variables['owner'] = result["owner"]
variables['referenceNumber'] = result['referenceNumber']
def ispwservices_getreleasetasklist(self, variables):
result = self.release_client.get_release_task_list(srid=variables['srid'], release_id=variables['relId'],
level=variables['level'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
processed_result = {}
for item in result["tasks"]:
task_id = item['taskId']
processed_result[task_id] = json.dumps(item)
variables['tasks'] = processed_result
def ispwservices_getreleasetaskinformation(self, variables):
result = self.release_client.get_release_task_information(srid=variables['srid'], release_id=variables['relId'],
task_id=variables['taskId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
for key, value in result.iteritems():
if key == "taskId":
variables["taskOutputId"] = value
elif key == "type":
variables["taskType"] = value
else:
variables[key] = value
def ispwservices_generatetasksinrelease(self, variables):
result = self.release_client.generate_tasks_in_release(srid=variables['srid'], release_id=variables['relId'],
level=variables['level'],
runtime_configuration=variables['runtimeConfiguration'],
auto_deploy=variables['autoDeploy'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_getreleasetaskgeneratelisting(self, variables):
result = self.release_client.get_release_task_generate_listing(srid=variables['srid'],
release_id=variables['relId'],
task_id=variables['taskId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['listing'] = result["listing"]
def ispwservices_promote(self, variables):
result = self.release_client.promote(srid=variables['srid'], release_id=variables['relId'],
level=variables['level'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
override=variables['override'],
auto_deploy=variables['autoDeploy'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_deploy(self, variables):
result = self.release_client.deploy(srid=variables['srid'], release_id=variables['relId'],
level=variables['level'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
dpenvlst=variables['dpenvlst'],
system=variables['system'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_regress(self, variables):
result = self.release_client.regress(srid=variables['srid'], release_id=variables['relId'],
level=variables['level'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setId'] = result["setId"]
variables['url'] = result["url"]
def ispwservices_getsetinformation(self, variables):
result = self.set_client.get_set_information(srid=variables['srid'], set_id=variables['setId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setOutputId'] = result["setid"]
variables['application'] = result["applicationId"]
variables['stream'] = result["streamName"]
variables['description'] = result["description"]
variables['owner'] = result["owner"]
variables['startDate'] = result["startDate"]
variables['startTime'] = result["startTime"]
variables['deployActivationDate'] = result["deployActiveDate"]
variables['deployActivationTime'] = result["deployActiveTime"]
variables['deployImplementationDate'] = result["deployImplementationDate"]
variables['deployImplementationTime'] = result["deployImplementationTime"]
variables['state'] = result["state"]
def ispwservices_pollgetsetinformation(self, variables):
result = self.set_client.poll_get_set_information(srid=variables['srid'], set_id=variables['setId'],
poll_interval=variables['pollInterval'],
poll_timeout_count=variables['pollTimeoutCount'],
status_field_name=variables['statusFieldName'],
expected_status_list=variables['expectedStatusList'])
variables['statusResult'] = result["status"]
def ispwservices_getsettasklist(self, variables):
result = self.set_client.get_set_task_list(srid=variables['srid'], set_id=variables['setId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
processed_result = {}
for item in result["tasks"]:
task_id = item['taskId']
processed_result[task_id] = json.dumps(item)
variables['tasks'] = processed_result
def ispwservices_getsetdeploymentinformation(self, variables):
result = self.set_client.get_set_deployment_information(srid=variables['srid'], set_id=variables['setId'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables["createDate"] = result["createDate"]
variables['description'] = result["description"]
variables['environment'] = result["environment"]
variables['packages'] = result["packages"]
variables['requestId'] = result["requestId"]
variables['setOutputId'] = result["setId"]
variables['state'] = result["status"]
def ispwservices_pollgetsetdeploymentinformation(self, variables):
result = self.set_client.poll_get_set_deployment_information(srid=variables['srid'], set_id=variables['setId'],
poll_interval=variables['pollInterval'],
poll_timeout_count=variables['pollTimeoutCount'],
status_field_name=variables['statusFieldName'],
expected_status_list=variables['expectedStatusList'])
variables['statusResult'] = result["status"]
def ispwservices_fallbackset(self, variables):
result = self.set_client.fallback_set(srid=variables['srid'], set_id=variables['setId'],
change_type=variables['changeType'],
execution_status=variables['executionStatus'],
runtime_configuration=variables['runtimeConfiguration'],
callback_task_id=variables['callbackTaskId'],
callback_url=variables['callbackUrl'],
callback_username=variables['callbackUsername'],
callback_password=variables['callbackPassword'],
retryInterval=variables['retryInterval'],
retryLimit=variables['retryLimit'])
variables['setOutputId'] = result["setId"]
variables['url'] = result["url"]
| 22,112 | 724 | 23 |
ee7adf29839ea1bca710f2462461dd777666093e | 289 | py | Python | SpaMetric/__init__.py | zccqq/SpaMetric | ab8bbcd2407d0d1cdcdc2dd2cbd06888b8302181 | [
"BSD-3-Clause"
] | null | null | null | SpaMetric/__init__.py | zccqq/SpaMetric | ab8bbcd2407d0d1cdcdc2dd2cbd06888b8302181 | [
"BSD-3-Clause"
] | null | null | null | SpaMetric/__init__.py | zccqq/SpaMetric | ab8bbcd2407d0d1cdcdc2dd2cbd06888b8302181 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from ._metric_learning import metric_learning
from ._metric_learning_minibatch import metric_learning_minibatch
from ._reference_centers import reference_centers
from ._spatial_reconstruction import spatial_reconstruction
__version__ = '0.1.0'
| 10.321429 | 65 | 0.782007 | # -*- coding: utf-8 -*-
from ._metric_learning import metric_learning
from ._metric_learning_minibatch import metric_learning_minibatch
from ._reference_centers import reference_centers
from ._spatial_reconstruction import spatial_reconstruction
__version__ = '0.1.0'
| 0 | 0 | 0 |
8305a26b2f3030527e0cec5779f42393b3ce03b3 | 54 | py | Python | skypack/hello.py | gksmfzz1/JavaProject | 6b22badf8052a66928ab3ef2f60e83bfa54466c1 | [
"Apache-2.0"
] | null | null | null | skypack/hello.py | gksmfzz1/JavaProject | 6b22badf8052a66928ab3ef2f60e83bfa54466c1 | [
"Apache-2.0"
] | null | null | null | skypack/hello.py | gksmfzz1/JavaProject | 6b22badf8052a66928ab3ef2f60e83bfa54466c1 | [
"Apache-2.0"
] | null | null | null | #간단한 인사말
| 10.8 | 27 | 0.592593 | #간단한 인사말
def sayhello():
print('Hello, World!!')
| 22 | 0 | 22 |
4610e4657ede2f28c827f6329028e6c910940645 | 4,046 | py | Python | diffabs/conc.py | ZikangXiong/DiffAbs | 6b3d50688cc2f3ceef5a23a8a162fc612c0b745b | [
"MIT"
] | 4 | 2020-09-30T22:04:41.000Z | 2021-03-21T18:32:44.000Z | diffabs/conc.py | ZikangXiong/DiffAbs | 6b3d50688cc2f3ceef5a23a8a162fc612c0b745b | [
"MIT"
] | null | null | null | diffabs/conc.py | ZikangXiong/DiffAbs | 6b3d50688cc2f3ceef5a23a8a162fc612c0b745b | [
"MIT"
] | 3 | 2020-09-06T22:55:33.000Z | 2021-03-21T18:32:34.000Z | """ Define some functions used in concrete domain. """
import torch
from torch import Tensor
from torch.nn import functional as F
from diffabs.abs import MetaFunc
from diffabs.utils import reduce_dim_dists
class ConcDist(MetaFunc):
""" Similar to AbsEle in abs.py, it needs the distance for concrete data points as well. Implementation similar to
the non-relational interval domain. Note that no eps is set for ConcDist, it could, but it is fine since
ConcDist is mainly used for validation but not training.
"""
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
pass
| 34.581197 | 118 | 0.57217 | """ Define some functions used in concrete domain. """
import torch
from torch import Tensor
from torch.nn import functional as F
from diffabs.abs import MetaFunc
from diffabs.utils import reduce_dim_dists
class ConcDist(MetaFunc):
""" Similar to AbsEle in abs.py, it needs the distance for concrete data points as well. Implementation similar to
the non-relational interval domain. Note that no eps is set for ConcDist, it could, but it is fine since
ConcDist is mainly used for validation but not training.
"""
@classmethod
def col_le_val(cls, outs: Tensor, idx: int, threshold: float, mean: float = 0., range: float = 1.) -> Tensor:
t = outs[..., idx]
threshold = (threshold - mean) / range
d = t - threshold
return F.relu(d)
@classmethod
def col_ge_val(cls, outs: Tensor, idx: int, threshold: float, mean: float = 0., range: float = 1.) -> Tensor:
t = outs[..., idx]
threshold = (threshold - mean) / range
d = threshold - t
return F.relu(d)
@classmethod
def bound_by(cls, outs: Tensor, lb: Tensor, ub: Tensor, reduce_by: str):
dist_lb = F.relu(lb - outs) # like col_ge_val
dist_ub = F.relu(outs - ub) # like col_le_val
dists = torch.cat((dist_lb, dist_ub), dim=-1)
return reduce_dim_dists(dists, reduce_by)
@classmethod
def not_bound_by(cls, outs: Tensor, lb: Tensor, ub: Tensor, reduce_by: str):
dist_lb = F.relu(outs - lb) # like col_le_val
dist_ub = F.relu(ub - outs) # like col_ge_val
dists = torch.cat((dist_lb, dist_ub), dim=-1)
return reduce_dim_dists(dists, reduce_by)
@classmethod
def cols_not_max(cls, outs: Tensor, *idxs: int) -> Tensor:
others = cls._idxs_not(outs, *idxs)
others = outs[..., others]
res = []
for i in idxs:
target = outs[..., [i]]
diff = target - others # will broadcast
diff = F.relu(diff)
mins, _ = torch.min(diff, dim=-1)
res.append(mins)
return sum(res)
@classmethod
def cols_is_max(cls, outs: Tensor, *idxs: int) -> Tensor:
others = cls._idxs_not(outs, *idxs)
others = outs[..., others]
res = []
for i in idxs:
target = outs[..., [i]]
diffs = others - target # will broadcast
diffs = F.relu(diffs)
res.append(diffs)
if len(idxs) == 1:
all_diffs = res[0]
else:
all_diffs = torch.stack(res, dim=-1)
all_diffs, _ = torch.min(all_diffs, dim=-1) # it's OK to have either one to be max, thus use torch.min()
# then it needs to surpass everybody else, thus use torch.max() for maximum distance
diffs, _ = torch.max(all_diffs, dim=-1)
return diffs
@classmethod
def cols_not_min(cls, outs: Tensor, *idxs: int) -> Tensor:
others = cls._idxs_not(outs, *idxs)
others = outs[..., others]
res = []
for i in idxs:
target = outs[..., [i]]
diffs = others - target # will broadcast
diffs = F.relu(diffs)
mins, _ = torch.min(diffs, dim=-1)
res.append(mins)
return sum(res)
@classmethod
def cols_is_min(cls, outs: Tensor, *idxs: int) -> Tensor:
others = cls._idxs_not(outs, *idxs)
others = outs[..., others]
res = []
for i in idxs:
target = outs[..., [i]]
diffs = target - others # will broadcast
diffs = F.relu(diffs)
res.append(diffs)
if len(idxs) == 1:
all_diffs = res[0]
else:
all_diffs = torch.stack(res, dim=-1)
all_diffs, _ = torch.min(all_diffs, dim=-1) # it's OK to have either one to be min, thus use torch.min()
# then it needs to surpass everybody else, thus use torch.max() for maximum distance
diffs, _ = torch.max(all_diffs, dim=-1)
return diffs
pass
| 3,144 | 0 | 208 |
5b1363485151128caf183c9f6b705444acca65c5 | 136 | py | Python | src/localsrv/urls.py | vladiibine/localsrv | 7bb8fd2e08f43a1b5adef9ad17ab534a317e0a57 | [
"MIT"
] | null | null | null | src/localsrv/urls.py | vladiibine/localsrv | 7bb8fd2e08f43a1b5adef9ad17ab534a317e0a57 | [
"MIT"
] | 4 | 2015-04-28T08:20:26.000Z | 2015-06-13T06:32:31.000Z | src/localsrv/urls.py | vladiibine/localsrv | 7bb8fd2e08f43a1b5adef9ad17ab534a317e0a57 | [
"MIT"
] | 1 | 2018-03-04T20:29:27.000Z | 2018-03-04T20:29:27.000Z | from django.conf.urls import url
from .views import serve_all
urlpatterns = (
url(r'^.*$', serve_all, name="localsrv:serve_all"),
) | 22.666667 | 55 | 0.705882 | from django.conf.urls import url
from .views import serve_all
urlpatterns = (
url(r'^.*$', serve_all, name="localsrv:serve_all"),
) | 0 | 0 | 0 |
9451399f112c8e97b880e6c3db93a0ca8732dbf0 | 12,441 | py | Python | proj/tests.py | bbrighttaer/irelease | 632e204436027fe30c2a3a22a1040fc0cd996a94 | [
"MIT"
] | 6 | 2020-08-17T21:59:25.000Z | 2021-11-15T05:02:23.000Z | proj/tests.py | bbrighttaer/irelease | 632e204436027fe30c2a3a22a1040fc0cd996a94 | [
"MIT"
] | 1 | 2021-05-10T07:50:28.000Z | 2021-05-11T11:28:57.000Z | proj/tests.py | bbrighttaer/irelease | 632e204436027fe30c2a3a22a1040fc0cd996a94 | [
"MIT"
] | 1 | 2020-12-21T12:07:49.000Z | 2020-12-21T12:07:49.000Z | import unittest
from collections import namedtuple, defaultdict
import numpy as np
import torch
from ptan.experience import ExperienceSourceFirstLast
from tqdm import tqdm
from irelease.data import GeneratorData
from irelease.env import MoleculeEnv
from irelease.model import Encoder, PositionalEncoding, StackDecoderLayer, LinearOut, StackRNN, RNNLinearOut, RewardNetRNN
from irelease.reward import RewardFunction
from irelease.rl import PolicyAgent, MolEnvProbabilityActionSelector, REINFORCE, GuidedRewardLearningIRL, \
StateActionProbRegistry
from irelease.stackrnn import StackRNNCell
from irelease.utils import init_hidden, init_stack, get_default_tokens, init_hidden_2d, init_stack_2d, init_cell, seq2tensor
gen_data_path = '../data/chembl_xsmall.smi'
tokens = get_default_tokens()
# print(f'Number of tokens = {len(tokens)}')
gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\t',
cols_to_read=[0], keep_header=True, tokens=tokens, tokens_reload=True)
bz = 32
if __name__ == '__main__':
unittest.main()
| 45.24 | 125 | 0.597138 | import unittest
from collections import namedtuple, defaultdict
import numpy as np
import torch
from ptan.experience import ExperienceSourceFirstLast
from tqdm import tqdm
from irelease.data import GeneratorData
from irelease.env import MoleculeEnv
from irelease.model import Encoder, PositionalEncoding, StackDecoderLayer, LinearOut, StackRNN, RNNLinearOut, RewardNetRNN
from irelease.reward import RewardFunction
from irelease.rl import PolicyAgent, MolEnvProbabilityActionSelector, REINFORCE, GuidedRewardLearningIRL, \
StateActionProbRegistry
from irelease.stackrnn import StackRNNCell
from irelease.utils import init_hidden, init_stack, get_default_tokens, init_hidden_2d, init_stack_2d, init_cell, seq2tensor
gen_data_path = '../data/chembl_xsmall.smi'
tokens = get_default_tokens()
# print(f'Number of tokens = {len(tokens)}')
gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\t',
cols_to_read=[0], keep_header=True, tokens=tokens, tokens_reload=True)
bz = 32
class MyTestCase(unittest.TestCase):
def test_batch(self):
batch = gen_data.random_training_set(batch_size=bz)
assert (len(batch[0]) == bz and len(batch[1]) == bz)
def test_embeddings(self):
x, y = gen_data.random_training_set(batch_size=bz)
encoder = Encoder(gen_data.n_characters, 128, gen_data.char2idx[gen_data.pad_symbol])
x = encoder((x,))
assert (x.ndim == 3)
print(f'x.shape = {x.shape}')
def test_positional_encodings(self):
x, y = gen_data.random_training_set(batch_size=bz)
encoder = Encoder(gen_data.n_characters, 128, gen_data.char2idx[gen_data.pad_symbol])
x = encoder(x)
enc_shape = x.shape
pe = PositionalEncoding(128, dropout=.2, max_len=500)
x = pe(x)
assert (x.shape == enc_shape)
print(f'x.shape = {x.shape}')
def test_stack_decoder_layer(self):
x, y = gen_data.random_training_set(batch_size=bz)
d_model = 128
d_hidden = 10
s_width = 16
s_depth = 20
encoder = Encoder(gen_data.n_characters, 128, gen_data.char2idx[gen_data.pad_symbol])
x = encoder(x)
pe = PositionalEncoding(d_model, dropout=.2, max_len=500)
x = pe(x)
h0 = init_hidden_2d(x.shape[1], x.shape[0], d_hidden)
s0 = init_stack_2d(x.shape[1], x.shape[0], s_depth, s_width)
stack_decoder = StackDecoderLayer(d_model=d_model, num_heads=1, stack_depth=s_depth,
stack_width=s_width, dropout=.1)
out = stack_decoder((x, s0))
assert (len(out) == 3)
def test_input_equals_output_embeddings(self):
x, y = gen_data.random_training_set(batch_size=bz)
encoder = Encoder(gen_data.n_characters, 128, gen_data.char2idx[gen_data.pad_symbol])
lin_out = LinearOut(encoder.embeddings_weight)
x = encoder(x)
x_out = lin_out(x)
assert x.shape == x_out.shape
def test_stack_rnn_cell(self):
x, y = gen_data.random_training_set(batch_size=bz)
d_model = 128
hidden_size = 16
stack_width = 10
stack_depth = 20
num_layers = 1
num_dir = 2
encoder = Encoder(gen_data.n_characters, d_model, gen_data.char2idx[gen_data.pad_symbol])
x = encoder(x)
rnn_cells = []
in_dim = d_model
cell_type = 'gru'
for _ in range(num_layers):
rnn_cells.append(StackRNNCell(in_dim, hidden_size, has_stack=True,
unit_type=cell_type, stack_depth=stack_depth,
stack_width=stack_width))
in_dim = hidden_size * num_dir
rnn_cells = torch.nn.ModuleList(rnn_cells)
h0 = init_hidden(num_layers=num_layers, batch_size=bz, hidden_size=hidden_size,
num_dir=num_dir)
c0 = init_hidden(num_layers=num_layers, batch_size=bz, hidden_size=hidden_size, num_dir=num_dir)
s0 = init_stack(bz, stack_width, stack_depth)
seq_length = x.shape[0]
hidden_outs = torch.zeros(num_layers, num_dir, seq_length, bz, hidden_size)
if cell_type == 'lstm':
cell_outs = torch.zeros(num_layers, num_dir, seq_length, bz, hidden_size)
assert 0 <= num_dir <= 2
for l in range(num_layers):
for d in range(num_dir):
h, c, stack = h0[l, d, :], c0[l, d, :], s0
if d == 0:
indices = range(x.shape[0])
else:
indices = reversed(range(x.shape[0]))
for i in indices:
x_t = x[i, :, :]
hx, stack = rnn_cells[l](x_t, h, c, stack)
if cell_type == 'lstm':
hidden_outs[l, d, i, :, :] = hx[0]
cell_outs[l, d, i, :, :] = hx[1]
else:
hidden_outs[l, d, i, :, :] = hx
def test_stack_rnn(self):
x, y = gen_data.random_training_set(batch_size=bz)
d_model = 12
hidden_size = 16
stack_width = 10
stack_depth = 20
unit_type = 'lstm'
num_layers = 2
hidden_states = [get_initial_states(bz, hidden_size, 1, stack_depth, stack_width, unit_type)
for _ in range(num_layers)]
encoder = Encoder(gen_data.n_characters, d_model, gen_data.char2idx[gen_data.pad_symbol])
x = encoder(x)
stack_rnn_1 = StackRNN(1, d_model, hidden_size, True, 'gru', stack_width, stack_depth,
k_mask_func=encoder.k_padding_mask)
stack_rnn_2 = StackRNN(2, hidden_size, hidden_size, True, 'gru', stack_width, stack_depth,
k_mask_func=encoder.k_padding_mask)
outputs = stack_rnn_1([x] + hidden_states)
outputs = stack_rnn_2(outputs)
assert len(outputs) > 1
linear = RNNLinearOut(4, hidden_size, bidirectional=False, )
x = linear(outputs)
print(x[0].shape)
def test_mol_env(self):
d_model = 8
hidden_size = 16
num_layers = 1
encoder = Encoder(gen_data.n_characters, d_model, gen_data.char2idx[gen_data.pad_symbol], return_tuple=True)
rnn = RewardNetRNN(d_model, hidden_size, num_layers, bidirectional=True, unit_type='gru')
reward_net = torch.nn.Sequential(encoder, rnn)
env = MoleculeEnv(gen_data, RewardFunction(reward_net=reward_net,
policy=lambda x: gen_data.all_characters[
np.random.randint(gen_data.n_characters)],
actions=gen_data.all_characters))
print(f'sample action: {env.action_space.sample()}')
print(f'sample observation: {env.observation_space.sample()}')
s = env.reset()
for i in range(5):
env.render()
action = env.action_space.sample()
print(f'action = {action}')
s_prime, reward, done, info = env.step(action)
if done:
env.reset()
break
def test_molecule_mcts(self):
d_model = 8
hidden_size = 16
num_layers = 2
encoder = Encoder(gen_data.n_characters, d_model, gen_data.char2idx[gen_data.pad_symbol],
return_tuple=False)
rnn = RewardNetRNN(d_model, hidden_size, num_layers, bidirectional=True, unit_type='gru')
env = MoleculeEnv(gen_data, RewardFunction(reward_net=torch.nn.Sequential(encoder, rnn),
policy=lambda x: gen_data.all_characters[
np.random.randint(gen_data.n_characters)],
actions=gen_data.all_characters))
rewards = []
for i in range(5):
env.render()
action = env.action_space.sample()
s_prime, reward, done, info = env.step(action)
rewards.append(reward)
if done:
env.reset()
break
print(f'rewards: {rewards}')
def test_reward_rnn(self):
x, y = gen_data.random_training_set(batch_size=bz)
d_model = 8
hidden_size = 16
num_layers = 2
encoder = Encoder(gen_data.n_characters, d_model, gen_data.char2idx[gen_data.pad_symbol],
return_tuple=False)
x = encoder([x])
rnn = RewardNetRNN(d_model, hidden_size, num_layers, bidirectional=True, unit_type='lstm')
r = rnn(x)
print(f'reward: {r}')
def test_policy_net(self):
d_model = 8
hidden_size = 16
num_layers = 1
stack_width = 10
stack_depth = 20
unit_type = 'lstm'
# Create a function to provide initial hidden states
def hidden_states_func(batch_size=1):
return [get_initial_states(batch_size, hidden_size, 1, stack_depth, stack_width, unit_type) for _ in
range(num_layers)]
# Encoder to map character indices to embeddings
encoder = Encoder(gen_data.n_characters, d_model, gen_data.char2idx[gen_data.pad_symbol], return_tuple=True)
# Create agent network
stack_rnn = StackRNN(1, d_model, hidden_size, True, 'lstm', stack_width, stack_depth,
k_mask_func=encoder.k_padding_mask)
stack_linear = RNNLinearOut(gen_data.n_characters, hidden_size, bidirectional=False)
agent_net = torch.nn.Sequential(encoder, stack_rnn, stack_linear)
# Create agent
selector = MolEnvProbabilityActionSelector(actions=gen_data.all_characters)
probs_reg = StateActionProbRegistry()
agent = PolicyAgent(model=agent_net,
action_selector=selector,
states_preprocessor=seq2tensor,
initial_state=hidden_states_func,
apply_softmax=True,
probs_registry=probs_reg,
device='cpu')
# Reward function model
rnn = RewardNetRNN(d_model, hidden_size, num_layers, bidirectional=True, unit_type='gru')
reward_net = torch.nn.Sequential(encoder, rnn)
reward_function = RewardFunction(reward_net=reward_net, mc_policy=agent, actions=gen_data.all_characters)
# Create molecule generation environment
env = MoleculeEnv(gen_data.all_characters, reward_function)
# Ptan ops for aggregating experiences
exp_source = ExperienceSourceFirstLast(env, agent, gamma=0.97)
rl_alg = REINFORCE(agent_net, torch.optim.Adam(agent_net.parameters()), hidden_states_func)
gen_data.set_batch_size(1)
irl_alg = GuidedRewardLearningIRL(reward_net, torch.optim.Adam(reward_net.parameters()),
demo_gen_data=gen_data)
# Begin simulation and training
batch_states, batch_actions, batch_qvals = [], [], []
traj_prob = 1.
for step_idx, exp in enumerate(exp_source):
batch_states.append(exp.state)
batch_actions.append(exp.action)
batch_qvals.append(exp.reward)
traj_prob *= probs_reg.get(list(exp.state), exp.action)
print(f'state = {exp.state}, action = {exp.action}, reward = {exp.reward}, next_state = {exp.last_state}')
if step_idx == 5:
break
def get_initial_states(batch_size, hidden_size, num_layers, stack_depth, stack_width, unit_type):
hidden = init_hidden(num_layers=num_layers, batch_size=batch_size, hidden_size=hidden_size, num_dir=1, dvc='cpu')
if unit_type == 'lstm':
cell = init_cell(num_layers=num_layers, batch_size=batch_size, hidden_size=hidden_size, num_dir=1, dvc='cpu')
else:
cell = None
stack = init_stack(batch_size, stack_width, stack_depth, dvc='cpu')
return hidden, cell, stack
if __name__ == '__main__':
unittest.main()
| 10,957 | 15 | 369 |
4637f258b1a9d35f5bdaf175b42c38b8b87f63c1 | 8,376 | py | Python | datasets/nvisii.py | TontonTremblay/nerf_pl | 686e09b7c642778e226cdf83b70f47a6abeda73a | [
"MIT"
] | null | null | null | datasets/nvisii.py | TontonTremblay/nerf_pl | 686e09b7c642778e226cdf83b70f47a6abeda73a | [
"MIT"
] | null | null | null | datasets/nvisii.py | TontonTremblay/nerf_pl | 686e09b7c642778e226cdf83b70f47a6abeda73a | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import Dataset
import json
import numpy as np
import os
from PIL import Image
from torchvision import transforms as T
try:
from .ray_utils import *
except :
from ray_utils import *
import glob
import cv2
if __name__ == '__main__':
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
train_ds = NvisiiDataset(
# root_dir='../falling_google_1/',
# split='val',
# root_dir='/home/titans/code/nerf_pytorch/data_tmp/falling_google_1/',
root_dir='/home/jtremblay/code/conditional-gan-inverse-rendering/S-GAN/falling_1/falling_google_1/',
split='train',
img_wh=(400, 400)
)
cam_pos = []
ray_end = []
rgbs = []
for ii in range(100):
i = random.randint(0,len(train_ds)-1)
data = train_ds[i]['rays']
rgbs.append(train_ds[i]['rgbs'])
cam_pos.append([data[0],data[1],data[2],])
ray_end.append([data[3],data[4],data[5],])
# print(train_ds[0]['rays']
visualize_ray(np.array(cam_pos),np.array(ray_end),np.array(rgbs))
train_ds[0]['rays']
# for i in range(len(train_ds)):
# item = train_ds[i]
# c2w = item["c2w"]
# c2w = torch.cat((c2w, torch.FloatTensor([[0, 0, 0, 1]])), dim=0)
# #np.save("nvisii_c2ws/c2w{}.npy".format(i), c2w.numpy())
| 36.736842 | 109 | 0.521132 | import torch
from torch.utils.data import Dataset
import json
import numpy as np
import os
from PIL import Image
from torchvision import transforms as T
try:
from .ray_utils import *
except :
from ray_utils import *
import glob
import cv2
class NvisiiDataset(Dataset):
def __init__(self, root_dir, split='train', img_wh=(800, 800)):
self.root_dir = root_dir
self.split = split
assert img_wh[0] == img_wh[1], 'image width must equal image height!'
self.img_wh = img_wh
self.define_transforms()
self.read_meta()
self.white_back = True
def read_meta(self):
# with open(os.path.join(self.root_dir,
# f"transforms_{self.split}.json"), 'r') as f:
# self.meta = json.load(f)
json_files = sorted(glob.glob(os.path.join(self.root_dir, f'*.json')))
self.json_files = json_files
if self.split == 'train':
# json_files = json_files[0:2]
json_files = json_files[0:100]
elif self.split == 'val':
# json_files = json_files[100:101]
json_files = json_files[100:105]
elif self.split == 'test':
json_files = json_files[105:]
transforms = []
for i_index, json_file in enumerate(json_files):
with open(json_file, 'r') as f:
meta = json.load(f)
transforms.append(np.array(meta['camera_data']['cam2world']).T)
# pass
w, h = self.img_wh
self.focal = 0.5*800/np.tan(0.5*0.785398) # original focal length
# when W=800
self.focal *= self.img_wh[0]/800 # modify focal length to match size self.img_wh
# bounds, common for all scenes
self.near = 0.03
self.far = 4.5
self.bounds = np.array([self.near, self.far])
# ray directions for all pixels, same for all images (same H, W, focal)
self.directions = \
get_ray_directions(h, w, self.focal) # (h, w, 3)
# if self.split == 'train': # create buffer of all rays and rgb data
self.image_paths = []
self.poses = []
self.all_rays = []
self.all_rgbs = []
for i_trans,trans in enumerate(transforms):
pose = trans[:3,:4]
# print(trans)
# print(trans[:3,:4])
self.poses += [torch.FloatTensor(pose)]
c2w = self.poses[-1]
# print(c2w)
image_path = json_files[i_trans].replace("json",'png')
self.image_paths += [image_path]
img = Image.open(image_path).convert('RGB')
img = img.resize(self.img_wh, Image.LANCZOS)
# load the mask
mask_path = json_files[i_trans].replace("json",'seg.exr')
mask = cv2.imread(mask_path,
cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
mask = cv2.resize(mask,self.img_wh,cv2.INTER_NEAREST)
mask[mask>3.4028235e+37] = 0
mask[mask<-3.4028235e+37] = 0
mask[mask<= 0] = 0
mask[mask> 0] = 255
mask = mask.astype(np.uint8)
img = Image.fromarray(np.concatenate([np.array(img), mask[:, :, 0:1]], -1), 'RGBA')
img = self.transform(img) # (4, h, w)
# print(img.shape)
# raise()
img = img.view(4, -1).permute(1, 0) # (h*w, 4) RGBA
img = img[:, :3]*img[:, -1:] + (1-img[:, -1:]) # blend A to RGB
self.all_rgbs.append(img)
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
# print(rays_o)
# print(pose)
# raise()
self.all_rays += [torch.cat([rays_o, rays_d,
self.near*torch.ones_like(rays_o[:, :1]),
self.far*torch.ones_like(rays_o[:, :1])],
1)] # (h*w, 8)
self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3)
self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3)
def define_transforms(self):
self.transform = T.ToTensor()
def __len__(self):
if self.split == 'train':
return len(self.all_rays)
else:
return len(self.poses)
# return 8 # only validate 8 images (to support <=8 gpus)
# return len(self.meta['frames'])
def __getitem__(self, idx):
if self.split == 'train': # use data in the buffers
sample = {'rays': self.all_rays[idx],
'rgbs': self.all_rgbs[idx]}
else: # create data for each image separately
c2w = self.poses[idx]
image_path = self.image_paths[idx]
img = Image.open(image_path).convert('RGB')
img = img.resize(self.img_wh, Image.LANCZOS)
# load the mask
mask_path = image_path.replace("json",'seg.exr')
mask = cv2.imread(mask_path,
cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
mask = cv2.resize(mask,self.img_wh,cv2.INTER_NEAREST)
mask[mask == mask.max()] = 0
mask[mask > 0] = 255
mask = mask.astype(np.uint8)
img = Image.fromarray(np.concatenate([np.array(img), mask[:, :, 0:1]], -1), 'RGBA')
img = self.transform(img) # (4, h, w)
img = img.view(4, -1).permute(1, 0) # (h*w, 4) RGBA
img = img[:, :3]*img[:, -1:] + (1-img[:, -1:]) # blend A to RGB
valid_mask = (torch.tensor(mask)[:,:,0]>0).flatten()
# print(valid_mask.min(),valid_mask.max(),valid_mask.shape)
rays_o, rays_d = get_rays(self.directions, c2w)
rays = torch.cat([rays_o, rays_d,
self.near*torch.ones_like(rays_o[:, :1]),
self.far*torch.ones_like(rays_o[:, :1])],
1) # (H*W, 8)
# print(rays.min(),rays.max(),rays.shape)
sample = {'rays': rays,
'rgbs': img,
'c2w': c2w,
'valid_mask': valid_mask}
return sample
if __name__ == '__main__':
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
def visualize_ray(camera_position, ray_dirs,rgbs):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, projection='3d')
# print(np.unique(camera_position))
for i in range(50):
# ii_pick = random.randint(0,len(camera_position)-1)
# i_sample = np.random.randint(0,camera_position.shape[0]-1)
end_point = camera_position[i] + 2 * ray_dirs[i]
# end_point = ray_dirs[i]
ax.plot([camera_position[i][0], end_point[0]],
[camera_position[i][1], end_point[1]],
zs=[camera_position[i][2], end_point[2]])
ax.scatter([camera_position[i][0]],
[camera_position[i][1]],
zs=[camera_position[i][2]],c=[rgbs[i].tolist()])
plt.show()
train_ds = NvisiiDataset(
# root_dir='../falling_google_1/',
# split='val',
# root_dir='/home/titans/code/nerf_pytorch/data_tmp/falling_google_1/',
root_dir='/home/jtremblay/code/conditional-gan-inverse-rendering/S-GAN/falling_1/falling_google_1/',
split='train',
img_wh=(400, 400)
)
cam_pos = []
ray_end = []
rgbs = []
for ii in range(100):
i = random.randint(0,len(train_ds)-1)
data = train_ds[i]['rays']
rgbs.append(train_ds[i]['rgbs'])
cam_pos.append([data[0],data[1],data[2],])
ray_end.append([data[3],data[4],data[5],])
# print(train_ds[0]['rays']
visualize_ray(np.array(cam_pos),np.array(ray_end),np.array(rgbs))
train_ds[0]['rays']
# for i in range(len(train_ds)):
# item = train_ds[i]
# c2w = item["c2w"]
# c2w = torch.cat((c2w, torch.FloatTensor([[0, 0, 0, 1]])), dim=0)
# #np.save("nvisii_c2ws/c2w{}.npy".format(i), c2w.numpy())
| 6,778 | 8 | 183 |
9f03fba40dd4b0c265394628fc0d17731c9c4574 | 243 | py | Python | src/spidery/ua/family.py | A2Media-id/spidery | 48cf0f30fb85c176db952b111e329c8bf644f6b4 | [
"MIT"
] | null | null | null | src/spidery/ua/family.py | A2Media-id/spidery | 48cf0f30fb85c176db952b111e329c8bf644f6b4 | [
"MIT"
] | null | null | null | src/spidery/ua/family.py | A2Media-id/spidery | 48cf0f30fb85c176db952b111e329c8bf644f6b4 | [
"MIT"
] | null | null | null | from enum import Enum
| 17.357143 | 24 | 0.522634 | from enum import Enum
class Family(Enum):
IOS = 'iOS'
ANDROID = 'Android'
MACOS = 'macOS'
FIRE_OS = 'Fire OS'
LINUX = 'Linux'
OS_X = 'OS X'
SOLARIS = 'Solaris'
WINDOWS = 'Windows'
BSD = 'BSD'
| 0 | 193 | 25 |
e5c19d090124ddb1d0f176369135901abab03958 | 1,188 | py | Python | decimals.py | tebeka/pythonwise | 56f8cb7aa792c3ad6a3dc754e15f6a04890d694a | [
"BSD-3-Clause"
] | 21 | 2016-11-16T20:08:56.000Z | 2021-12-11T23:13:05.000Z | decimals.py | tebeka/pythonwise | 56f8cb7aa792c3ad6a3dc754e15f6a04890d694a | [
"BSD-3-Clause"
] | 1 | 2020-10-05T08:35:31.000Z | 2020-10-05T08:35:31.000Z | decimals.py | tebeka/pythonwise | 56f8cb7aa792c3ad6a3dc754e15f6a04890d694a | [
"BSD-3-Clause"
] | 8 | 2016-11-12T22:54:55.000Z | 2021-02-10T10:46:23.000Z | '''Example of class decorator to make sure some attributes are always Decimal'''
from decimal import Decimal
from operator import attrgetter
def decimals(cls):
'''A class decorator that ensures all attributes specifiec in the class
__decimals__ will be Decimal.
Make sure your class is a new style class (inherits from object), otherwise
this won't work.
Example:
>>> @decimals
... class Sale(object):
... __decimals__ = ['price']
... def __init__(self, item, price):
... self.item = item
... self.price = price
...
>>> s1 = Sale('socks', 11.2)
>>> type(s1.price)
<class 'decimal.Decimal'>
>>> s1.price = 70
>>> type(s1.price)
<class 'decimal.Decimal'>
>>>
'''
for attr in cls.__decimals__:
name = '_{}'.format(attr)
getter = attrgetter(name)
setter = make_setter(name)
setattr(cls, attr, property(getter, setter, None, attr))
return cls
if __name__ == '__main__':
import doctest
doctest.testmod()
| 25.826087 | 80 | 0.601852 | '''Example of class decorator to make sure some attributes are always Decimal'''
from decimal import Decimal
from operator import attrgetter
def decimals(cls):
'''A class decorator that ensures all attributes specifiec in the class
__decimals__ will be Decimal.
Make sure your class is a new style class (inherits from object), otherwise
this won't work.
Example:
>>> @decimals
... class Sale(object):
... __decimals__ = ['price']
... def __init__(self, item, price):
... self.item = item
... self.price = price
...
>>> s1 = Sale('socks', 11.2)
>>> type(s1.price)
<class 'decimal.Decimal'>
>>> s1.price = 70
>>> type(s1.price)
<class 'decimal.Decimal'>
>>>
'''
def make_setter(name):
def setter(self, value):
setattr(self, name, Decimal(value))
return setter
for attr in cls.__decimals__:
name = '_{}'.format(attr)
getter = attrgetter(name)
setter = make_setter(name)
setattr(cls, attr, property(getter, setter, None, attr))
return cls
if __name__ == '__main__':
import doctest
doctest.testmod()
| 105 | 0 | 26 |
6f68ddb99dadaf13525373857bfa8fbb6c3c4282 | 916 | py | Python | ROMS-bathymetry/checkBathyRoughness.py | jvmcgovern/romstools_MI | 57c70c2f630a190fd23ac15ed2ce1b2fae949660 | [
"MIT"
] | 18 | 2015-03-02T14:27:00.000Z | 2021-12-16T05:53:08.000Z | ROMS-bathymetry/checkBathyRoughness.py | jvmcgovern/romstools_MI | 57c70c2f630a190fd23ac15ed2ce1b2fae949660 | [
"MIT"
] | 2 | 2016-08-08T12:21:45.000Z | 2021-02-03T22:40:52.000Z | ROMS-bathymetry/checkBathyRoughness.py | jvmcgovern/romstools_MI | 57c70c2f630a190fd23ac15ed2ce1b2fae949660 | [
"MIT"
] | 15 | 2015-10-05T15:36:28.000Z | 2022-01-28T16:46:54.000Z | import os
from numpy import *
from matplotlib.pyplot import *
from netCDF4 import Dataset
from pylab import *
from mpl_util import LevelColormap
import pyroms
import pyroms_toolbox
from mpl_toolkits.basemap import Basemap, shiftgrid
import mpl_toolkits.basemap as mp
from bathy_smoother import *
import mpl_util
import laplace_filter
from bathy_smoother import *
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@imr.no'
__created__ = datetime.datetime(2015, 7, 30)
__modified__ = datetime.datetime(2015, 7, 30)
__version__ = "1.0"
__status__ = "Development, 30.7.2015"
"""Get the grid file defined in /Users/trondkr/Projects/KINO/map/gridid.txt"""
grd = pyroms.grid.get_ROMS_grid('KINO1600M')
""" Check bathymetry roughness """
print "Checking rougness"
RoughMat = bathy_tools.RoughnessMatrix(grd.vgrid.h, grd.hgrid.mask_rho)
print '1a: Max Roughness value in file is: ', RoughMat.max()
| 28.625 | 78 | 0.779476 | import os
from numpy import *
from matplotlib.pyplot import *
from netCDF4 import Dataset
from pylab import *
from mpl_util import LevelColormap
import pyroms
import pyroms_toolbox
from mpl_toolkits.basemap import Basemap, shiftgrid
import mpl_toolkits.basemap as mp
from bathy_smoother import *
import mpl_util
import laplace_filter
from bathy_smoother import *
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@imr.no'
__created__ = datetime.datetime(2015, 7, 30)
__modified__ = datetime.datetime(2015, 7, 30)
__version__ = "1.0"
__status__ = "Development, 30.7.2015"
"""Get the grid file defined in /Users/trondkr/Projects/KINO/map/gridid.txt"""
grd = pyroms.grid.get_ROMS_grid('KINO1600M')
""" Check bathymetry roughness """
print "Checking rougness"
RoughMat = bathy_tools.RoughnessMatrix(grd.vgrid.h, grd.hgrid.mask_rho)
print '1a: Max Roughness value in file is: ', RoughMat.max()
| 0 | 0 | 0 |
3b8265f4fa3873c26ce1ad12e1478cea5dc98e3a | 985 | py | Python | py/datacentric/schema/declaration/value_decl.py | datacentricorg/datacentric-py | 40113ddfb68e62d98b880b3c7427db5cc9fbd8cd | [
"Apache-2.0"
] | 1 | 2020-02-03T18:32:42.000Z | 2020-02-03T18:32:42.000Z | py/datacentric/schema/declaration/value_decl.py | datacentricorg/datacentric-py | 40113ddfb68e62d98b880b3c7427db5cc9fbd8cd | [
"Apache-2.0"
] | null | null | null | py/datacentric/schema/declaration/value_decl.py | datacentricorg/datacentric-py | 40113ddfb68e62d98b880b3c7427db5cc9fbd8cd | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2013-present The DataCentric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import attr
from datacentric.storage.data import Data
from datacentric.schema.declaration.value_param_type import ValueParamType
@attr.s(slots=True, auto_attribs=True)
class ValueDecl(Data):
"""Value or atomic element declaration."""
type: ValueParamType = attr.ib(default=None, kw_only=True, metadata={'optional': True})
"""Value or atomic element type enumeration."""
| 37.884615 | 91 | 0.763452 | # Copyright (C) 2013-present The DataCentric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import attr
from datacentric.storage.data import Data
from datacentric.schema.declaration.value_param_type import ValueParamType
@attr.s(slots=True, auto_attribs=True)
class ValueDecl(Data):
"""Value or atomic element declaration."""
type: ValueParamType = attr.ib(default=None, kw_only=True, metadata={'optional': True})
"""Value or atomic element type enumeration."""
| 0 | 0 | 0 |
c72e7a7d5d73709ff82a2d449b0cb6fa31402a94 | 3,476 | py | Python | templates/header.py | nosmokingbandit/Autolycus | 37b376ba2fc27aa7e98f0071b457ebfbc605d4ab | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2017-03-19T05:51:53.000Z | 2021-01-25T20:59:13.000Z | templates/header.py | nosmokingbandit/Autolycus | 37b376ba2fc27aa7e98f0071b457ebfbc605d4ab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | templates/header.py | nosmokingbandit/Autolycus | 37b376ba2fc27aa7e98f0071b457ebfbc605d4ab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import core
from dominate.tags import *
class Header():
''' Header for pages with NavBar.
'''
@staticmethod
# pylama:ignore=W0401
| 46.972973 | 89 | 0.322209 | import core
from dominate.tags import *
class Header():
''' Header for pages with NavBar.
'''
@staticmethod
def insert_header(current):
with div(id='header'):
with div(id='header_container'):
img(src=core.URL_BASE + '/static/images/logo.png', alt='')
with ul(id='nav'):
if current == 'settings':
cls = 'settings current'
else:
cls = 'settings'
with li(u'Settings', cls=cls):
with ul(cls='settings_menu'):
with a(href=core.URL_BASE + '/settings/server/'):
with li():
i(cls='fa fa-server')
span(u'Server')
with a(href=core.URL_BASE + '/settings/search/'):
with li():
i(cls='fa fa-search')
span(u'Search')
with a(href=core.URL_BASE + '/settings/quality/'):
with li():
i(cls='fa fa-filter')
span(u'Quality')
with a(href=core.URL_BASE + '/settings/providers/'):
with li():
i(cls='fa fa-plug')
span(u'Providers')
with a(href=core.URL_BASE + '/settings/downloader/'):
with li():
i(cls='fa fa-download')
span(u'Downloader')
with a(href=core.URL_BASE + '/settings/postprocessing/'):
with li():
i(cls='fa fa-film')
span(u'Post Processing')
with a(href=core.URL_BASE + '/settings/plugins/'):
with li():
i(cls='fa fa-puzzle-piece')
span(u'Plugins')
with a(href=core.URL_BASE + '/settings/logs/'):
with li():
i(cls='fa fa-file-text')
span(u'Logs')
with a(href=core.URL_BASE + '/settings/about/'):
with li():
i(cls='fa fa-info-circle')
span(u'About')
with a(href=core.URL_BASE + '/add_movie/'):
if current == 'add_movie':
cls = 'add_movie current'
else:
cls = 'add_movie'
li(u'Add', cls=cls)
with a(href=core.URL_BASE + '/artists/'):
if current == 'artists':
cls = 'artists current'
else:
cls = 'artists'
li(u'Artists', cls=cls)
# pylama:ignore=W0401
| 3,304 | 0 | 26 |
a31032bdb20c827e80d029f1290939a29e66f5e4 | 2,047 | py | Python | plugins/mcafee_atd/icon_mcafee_atd/actions/get_report/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/mcafee_atd/icon_mcafee_atd/actions/get_report/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/mcafee_atd/icon_mcafee_atd/actions/get_report/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
| 20.068627 | 123 | 0.527113 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Download the report of an analysis"
class Input:
ID = "id"
REPORT_TYPE = "report_type"
TYPE_ID = "type_id"
class Output:
FILE = "file"
REPORT = "report"
class GetReportInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "The Task ID, job ID, or MD5 value for the prepared analysis report",
"order": 1
},
"report_type": {
"type": "string",
"title": "Report Type",
"description": "The file type of the report to return in the file output",
"default": "HTML",
"enum": [
"HTML",
"TXT",
"ZIP",
"XML",
"IOC",
"STIX",
"PDF",
"SAMPLE"
],
"order": 3
},
"type_id": {
"type": "string",
"title": "Type ID",
"description": "Type of given ID parameter, the type must match the value of the ID field. The default value is MD5",
"default": "MD5",
"enum": [
"MD5",
"TASK ID",
"JOB ID"
],
"order": 2
}
},
"required": [
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetReportOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"type": "string",
"title": "File",
"displayType": "bytes",
"description": "Prepared analysis report",
"format": "bytes",
"order": 1
},
"report": {
"type": "object",
"title": "Report",
"description": "Return report in JSON",
"order": 2
}
},
"required": [
"file"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 112 | 1,718 | 115 |
997c77916a99af9809c1b3005c521f58c47d0f70 | 243 | py | Python | dnnv/properties/expressions/terms/__init__.py | samysweb/dnnv | 58fb95b7300914d9da28eed86c39eca473b1aaef | [
"MIT"
] | 5 | 2022-01-28T20:30:34.000Z | 2022-03-17T09:26:52.000Z | dnnv/properties/expressions/terms/__init__.py | samysweb/dnnv | 58fb95b7300914d9da28eed86c39eca473b1aaef | [
"MIT"
] | 9 | 2022-01-27T03:50:28.000Z | 2022-02-08T18:42:17.000Z | dnnv/properties/expressions/terms/__init__.py | samysweb/dnnv | 58fb95b7300914d9da28eed86c39eca473b1aaef | [
"MIT"
] | 2 | 2022-02-03T17:32:43.000Z | 2022-03-24T16:38:49.000Z | from .base import Term
from .constant import Constant
from .image import Image
from .network import Network
from .parameter import Parameter
from .symbol import Symbol
__all__ = ["Constant", "Image", "Network", "Parameter", "Symbol", "Term"]
| 27 | 73 | 0.757202 | from .base import Term
from .constant import Constant
from .image import Image
from .network import Network
from .parameter import Parameter
from .symbol import Symbol
__all__ = ["Constant", "Image", "Network", "Parameter", "Symbol", "Term"]
| 0 | 0 | 0 |
91db883bb958820850b0586f3e218178cc92cc3b | 679 | py | Python | Tareas/Primeros_Problemas/PrimerProblema.py | monotera/Analisis-numerico_-1057-_2130 | f0acf6856028be8a20e33efd11f70d0817fdeeb0 | [
"MIT"
] | null | null | null | Tareas/Primeros_Problemas/PrimerProblema.py | monotera/Analisis-numerico_-1057-_2130 | f0acf6856028be8a20e33efd11f70d0817fdeeb0 | [
"MIT"
] | null | null | null | Tareas/Primeros_Problemas/PrimerProblema.py | monotera/Analisis-numerico_-1057-_2130 | f0acf6856028be8a20e33efd11f70d0817fdeeb0 | [
"MIT"
] | null | null | null | """
Created on Fri Jul 23 18:02:49 2021
@author: Nelson Mosquera (Monotera)
"""
# Problema 1
import math
import numpy as np
print("Raiz cuadrarda de 7 con una tolerancia de 10^-8 ", sqrt(7, 10**-124, 1))
print("Raiz cuadrarda de 7 con una tolerancia de 10^-16 ", sqrt(7, 10**-16, 1))
print("Raiz cuadrarda de 7 utilizando funcion de python sqrt ", math.sqrt(7))
| 24.25 | 79 | 0.586156 | """
Created on Fri Jul 23 18:02:49 2021
@author: Nelson Mosquera (Monotera)
"""
# Problema 1
import math
import numpy as np
def sqrt(n, E, x):
if(x <= 0 or n < 0):
raise ValueError("El valor de n y/o x no puede ser negativo")
elif n == 0:
return 0
y = np.clongdouble(1/2*(x + (n/x)))
while abs(x-y) > E:
x = y
y = np.clongdouble(1/2*(x + (n/x)))
print ("%.64f" %y)
return y
print("Raiz cuadrarda de 7 con una tolerancia de 10^-8 ", sqrt(7, 10**-124, 1))
print("Raiz cuadrarda de 7 con una tolerancia de 10^-16 ", sqrt(7, 10**-16, 1))
print("Raiz cuadrarda de 7 utilizando funcion de python sqrt ", math.sqrt(7))
| 288 | 0 | 23 |
ae634af624f9852dfe25586c8bbf00c012a782f6 | 1,053 | py | Python | redirect/views_show.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
] | null | null | null | redirect/views_show.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
] | null | null | null | redirect/views_show.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/dms/redirect/views_show.py
.. zeigt den Inhalt einer Weiterleitung an
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 23.01.2007 Beginn der Arbeit
"""
import string
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def redirect_show(request, item_container):
""" zeigt den Inhalt der Weiterleitung """
if string.find(item_container.item.url_more, 'http://') < 0:
site = item_container.container.site
path = item_container.container.path + item_container.item.url_more
length=len(site.base_folder)
if length < len(path):
url = site.url + path[length:]
else :
url = site.url + '/'
else:
url = item_container.item.url_more
return HttpResponseRedirect(url)
| 27 | 71 | 0.691358 | # -*- coding: utf-8 -*-
"""
/dms/redirect/views_show.py
.. zeigt den Inhalt einer Weiterleitung an
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 23.01.2007 Beginn der Arbeit
"""
import string
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def redirect_show(request, item_container):
""" zeigt den Inhalt der Weiterleitung """
if string.find(item_container.item.url_more, 'http://') < 0:
site = item_container.container.site
path = item_container.container.path + item_container.item.url_more
length=len(site.base_folder)
if length < len(path):
url = site.url + path[length:]
else :
url = site.url + '/'
else:
url = item_container.item.url_more
return HttpResponseRedirect(url)
| 0 | 0 | 0 |
b4211d52290de151098e9f206a261a7c961085dc | 2,215 | py | Python | pyasn1_alt_modules/rfc6482.py | CBonnell/pyasn1-alt-modules | cd3773ceaa6ab31b80b0b4013818ac47ee6215b8 | [
"BSD-2-Clause"
] | 2 | 2021-06-15T16:24:39.000Z | 2022-03-28T04:41:59.000Z | pyasn1_alt_modules/rfc6482.py | CBonnell/pyasn1-alt-modules | cd3773ceaa6ab31b80b0b4013818ac47ee6215b8 | [
"BSD-2-Clause"
] | null | null | null | pyasn1_alt_modules/rfc6482.py | CBonnell/pyasn1-alt-modules | cd3773ceaa6ab31b80b0b4013818ac47ee6215b8 | [
"BSD-2-Clause"
] | 1 | 2022-01-25T16:00:09.000Z | 2022-01-25T16:00:09.000Z | #
# This file is part of pyasn1-alt-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
# Modified by Russ Housley to include the opentypemap manager.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
# RPKI Route Origin Authorizations (ROAs)
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc6482.txt
# https://www.rfc-editor.org/errata/eid5881
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import opentypemap
cmsContentTypesMap = opentypemap.get('cmsContentTypesMap')
MAX = float('inf')
id_ct_routeOriginAuthz = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.24')
# Update the CMS Content Types Map
_cmsContentTypesMapUpdate = {
id_ct_routeOriginAuthz: RouteOriginAttestation(),
}
cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
| 28.397436 | 80 | 0.721445 | #
# This file is part of pyasn1-alt-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
# Modified by Russ Housley to include the opentypemap manager.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
# RPKI Route Origin Authorizations (ROAs)
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc6482.txt
# https://www.rfc-editor.org/errata/eid5881
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import opentypemap
cmsContentTypesMap = opentypemap.get('cmsContentTypesMap')
MAX = float('inf')
id_ct_routeOriginAuthz = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.24')
class ASID(univ.Integer):
pass
class IPAddress(univ.BitString):
pass
class ROAIPAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('address', IPAddress()),
namedtype.OptionalNamedType('maxLength', univ.Integer())
)
class ROAIPAddressFamily(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('addressFamily',
univ.OctetString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
namedtype.NamedType('addresses',
univ.SequenceOf(componentType=ROAIPAddress()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class RouteOriginAttestation(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
univ.Integer().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
namedtype.NamedType('asID', ASID()),
namedtype.NamedType('ipAddrBlocks',
univ.SequenceOf(componentType=ROAIPAddressFamily()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
# Update the CMS Content Types Map
_cmsContentTypesMapUpdate = {
id_ct_routeOriginAuthz: RouteOriginAttestation(),
}
cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
| 0 | 1,096 | 115 |
a910bac7a136a1bb81e8bd41868f02c0c9637afa | 19,057 | py | Python | mypyc/build.py | r3m0t/mypyc | 118d08c2fa8235f24724880b66b991b79462ff76 | [
"PSF-2.0"
] | null | null | null | mypyc/build.py | r3m0t/mypyc | 118d08c2fa8235f24724880b66b991b79462ff76 | [
"PSF-2.0"
] | null | null | null | mypyc/build.py | r3m0t/mypyc | 118d08c2fa8235f24724880b66b991b79462ff76 | [
"PSF-2.0"
] | null | null | null | """Support for building extensions using mypyc with distutils or setuptools
The main entry points are mypycify, which produces a list of extension
modules to be passed to setup, and MypycifyBuildExt, which must be
registered as a BuildExt command. A trivial setup.py for a mypyc built
project, then, looks like:
from distutils.core import setup
from mypyc.build import mypycify, MypycifyBuildExt
setup(name='test_module',
ext_modules=mypycify(['foo.py']),
cmdclass={{'build_ext': MypycifyBuildExt}},
See the mypycify docs for additional arguments.
Because MypycifyBuildExt needs to inherit from the
distutils/setuputils build_ext, we need to know at import-time whether
we are using distutils or setuputils. We hackily decide based on
whether setuptools has been imported already.
"""
import glob
import sys
import os.path
import subprocess
import hashlib
import time
import shutil
from typing import List, Tuple, Any, Optional, Union, Dict, cast
MYPY = False
if MYPY:
from typing import NoReturn
from mypy.main import process_options
from mypy.errors import CompileError
from mypy.options import Options
from mypy.build import BuildSource
from mypyc.namegen import exported_name
from mypyc import emitmodule
# We can work with either setuptools or distutils, and pick setuptools
# if it has been imported.
assert 'setuptools' in sys.modules or 'distutils' in sys.modules, (
"'setuptools' or 'distutils' must be imported before mypyc.build")
USE_SETUPTOOLS = 'setuptools' in sys.modules
if USE_SETUPTOOLS:
from setuptools import setup, Extension # type: ignore
from setuptools.command.build_ext import build_ext # type: ignore
else:
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext # type: ignore
from distutils import sysconfig, ccompiler
def setup_mypycify_vars() -> None:
"""Rewrite a bunch of config vars in pretty dubious ways."""
# There has to be a better approach to this.
# The vars can contain ints but we only work with str ones
vars = cast(Dict[str, str], sysconfig.get_config_vars())
if sys.platform == 'darwin':
# On OS X, force the creation of dynamic libraries instead of bundles so that
# we can link against multi-module shared libraries.
# From https://stackoverflow.com/a/32765319
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-dynamiclib')
# Also disable building 32-bit binaries, since we generate too much code
# for a 32-bit Mach-O object. There has to be a better way to do this.
vars['LDFLAGS'] = vars['LDFLAGS'].replace('-arch i386', '')
vars['CFLAGS'] = vars['CFLAGS'].replace('-arch i386', '')
class MypycifyExtension(Extension):
"""Represents an Extension generated by mypyc.
Stores a little bit of extra metadata to support that.
Arguments:
* is_mypyc_shared: True if this is a shared library generated to implement
multiple modules
* mypyc_shared_target: If this is a shim library, a reference to the shared library
that actually contains the implementation of the module
"""
def get_mypy_config(paths: List[str],
mypy_options: Optional[List[str]]) -> Tuple[List[BuildSource], Options]:
"""Construct mypy BuildSources and Options from file and options lists"""
# It is kind of silly to do this but oh well
mypy_options = mypy_options or []
mypy_options.append('--')
mypy_options.extend(paths)
sources, options = process_options(mypy_options)
if options.python_version[0] == 2:
fail('Python 2 not supported')
if not options.strict_optional:
fail('Disabling strict optional checking not supported')
options.show_traceback = True
# Needed to get types for all AST nodes
options.export_types = True
# TODO: Support incremental checking
options.incremental = False
for source in sources:
options.per_module_options.setdefault(source.module, {})['mypyc'] = True
return sources, options
shim_template_unix = """\
#include <Python.h>
PyObject *CPyInit_{full_modname}(void);
PyMODINIT_FUNC
PyInit_{modname}(void)
{{
return CPyInit_{full_modname}();
}}
"""
# As far as I could tell, Windows lacks the rpath style features we
# would need in automatically load the shared library (located
# relative to the module library) when a module library is loaded,
# which means that instead we get to do it dynamically.
#
# We do this by, at module initialization time, finding the location
# of the module dll and using it to compute the location of the shared
# library. We then load the shared library with LoadLibrary, find the
# appropriate CPyInit_ routine using GetProcAddress, and call it.
#
# The relative path of the shared library (from the shim library) is provided
# as the preprocessor define MYPYC_LIBRARY.
shim_template_windows = r"""\
#include <Python.h>
#include <windows.h>
#include <stdlib.h>
#include <stdio.h>
EXTERN_C IMAGE_DOS_HEADER __ImageBase;
typedef PyObject *(__cdecl *INITPROC)();
PyMODINIT_FUNC
PyInit_{modname}(void)
{{
char path[MAX_PATH];
char drive[MAX_PATH];
char directory[MAX_PATH];
HINSTANCE hinstLib;
INITPROC proc;
// get the file name of this dll
DWORD res = GetModuleFileName((HINSTANCE)&__ImageBase, path, sizeof(path));
if (res == 0 || res == sizeof(path)) {{
PyErr_SetString(PyExc_RuntimeError, "GetModuleFileName failed");
return NULL;
}}
// find the directory this dll is in
_splitpath(path, drive, directory, NULL, NULL);
// and use it to construct a path to the shared library
snprintf(path, sizeof(path), "%s%s%s", drive, directory, MYPYC_LIBRARY);
hinstLib = LoadLibrary(path);
if (!hinstLib) {{
PyErr_SetString(PyExc_RuntimeError, "LoadLibrary failed");
return NULL;
}}
proc = (INITPROC)GetProcAddress(hinstLib, "CPyInit_{full_modname}");
if (!proc) {{
PyErr_SetString(PyExc_RuntimeError, "GetProcAddress failed");
return NULL;
}}
return proc();
}}
// distutils sometimes spuriously tells cl to export CPyInit___init__,
// so provide that so it chills out
PyMODINIT_FUNC PyInit___init__(void) {{ return PyInit_{modname}(); }}
"""
def generate_c_extension_shim(full_module_name: str, module_name: str, dirname: str) -> str:
"""Create a C extension shim with a passthrough PyInit function."""
cname = '%s.c' % full_module_name.replace('.', '___') # XXX
cpath = os.path.join(dirname, cname)
with open(cpath, 'w') as f:
shim_template = shim_template_windows if sys.platform == 'win32' else shim_template_unix
f.write(shim_template.format(modname=module_name,
full_modname=exported_name(full_module_name)))
return cpath
def shared_lib_name(modules: List[str]) -> str:
"""Produce a probably unique name for a library from a list of module names."""
h = hashlib.sha1()
h.update(','.join(modules).encode())
return 'mypyc_%s' % h.hexdigest()[:20]
def include_dir() -> str:
"""Find the path of the lib-rt dir that needs to be included"""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib-rt')
def generate_c(sources: List[BuildSource], options: Options,
multi_file: bool,
shared_lib_name: Optional[str],
verbose: bool = False) -> Tuple[List[Tuple[str, str]], str]:
"""Drive the actual core compilation step.
Returns the C source code and (for debugging) the pretty printed IR.
"""
module_names = [source.module for source in sources]
# Do the actual work now
t0 = time.time()
try:
result = emitmodule.parse_and_typecheck(sources, options)
except CompileError as e:
for line in e.messages:
print(line)
fail('Typechecking failure')
t1 = time.time()
if verbose:
print("Parsed and typechecked in {:.3f}s".format(t1 - t0))
ops = [] # type: List[str]
ctext = emitmodule.compile_modules_to_c(result, module_names, shared_lib_name, multi_file,
ops=ops)
t2 = time.time()
if verbose:
print("Compiled to C in {:.3f}s".format(t2 - t1))
return ctext, '\n'.join(ops)
def build_using_shared_lib(sources: List[BuildSource],
lib_name: str,
cfiles: List[str],
build_dir: str,
extra_compile_args: List[str],
) -> List[MypycifyExtension]:
"""Produce the list of extension modules when a shared library is needed.
This creates one shared library extension module that all of the
others link against and then one shim extension module for each
module in the build, that simply calls an initialization function
in the shared library.
We treat the shared library as a python extension so that it is
cleanly processed by setuptools, but it isn't *really* a python C
extension module on its own.
"""
shared_lib = MypycifyExtension(
'lib' + lib_name,
is_mypyc_shared=True,
sources=cfiles,
include_dirs=[include_dir()],
extra_compile_args=extra_compile_args,
)
extensions = [shared_lib]
for source in sources:
module_name = source.module.split('.')[-1]
shim_file = generate_c_extension_shim(source.module, module_name, build_dir)
# We include the __init__ in the "module name" we stick in the Extension,
# since this seems to be needed for it to end up in the right place.
full_module_name = source.module
assert source.path
if os.path.split(source.path)[1] == '__init__.py':
full_module_name += '.__init__'
extensions.append(MypycifyExtension(
full_module_name,
mypyc_shared_target=shared_lib,
sources=[shim_file],
extra_compile_args=extra_compile_args,
))
return extensions
def build_single_module(sources: List[BuildSource],
cfiles: List[str],
extra_compile_args: List[str],
) -> List[MypycifyExtension]:
"""Produce the list of extension modules for a standalone extension.
This contains just one module, since there is no need for a shared module.
"""
return [MypycifyExtension(
sources[0].module,
sources=cfiles,
include_dirs=[include_dir()],
extra_compile_args=extra_compile_args,
)]
def mypycify(paths: List[str],
mypy_options: Optional[List[str]] = None,
opt_level: str = '3',
multi_file: bool = False,
skip_cgen: bool = False,
verbose: bool = False) -> List[MypycifyExtension]:
"""Main entry point to building using mypyc.
This produces a list of Extension objects that should be passed as the
ext_modules parameter to setup.
Arguments:
* paths: A list of file paths to build. It may contain globs.
* mypy_options: Optionally, a list of command line flags to pass to mypy.
(This can also contain additional files, for compatibility reasons.)
* opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
"""
setup_mypycify_vars()
# Create a compiler object so we can make decisions based on what
# compiler is being used. typeshed is missing some attribues on the
# compiler object so we give it type Any
compiler = ccompiler.new_compiler() # type: Any
sysconfig.customize_compiler(compiler)
expanded_paths = []
for path in paths:
expanded_paths.extend(glob.glob(path))
build_dir = 'build' # TODO: can this be overridden??
try:
os.mkdir(build_dir)
except FileExistsError:
pass
sources, options = get_mypy_config(expanded_paths, mypy_options)
# We generate a shared lib if there are multiple modules or if any
# of the modules are in package. (Because I didn't want to fuss
# around with making the single module code handle packages.)
use_shared_lib = len(sources) > 1 or any('.' in x.module for x in sources)
lib_name = shared_lib_name([source.module for source in sources]) if use_shared_lib else None
# We let the test harness make us skip doing the full compilation
# so that it can do a corner-cutting version without full stubs.
# TODO: Be able to do this based on file mtimes?
if not skip_cgen:
cfiles, ops_text = generate_c(sources, options, multi_file, lib_name, verbose)
# TODO: unique names?
with open(os.path.join(build_dir, 'ops.txt'), 'w') as f:
f.write(ops_text)
cfilenames = []
for cfile, ctext in cfiles:
cfile = os.path.join(build_dir, cfile)
with open(cfile, 'w', encoding='utf-8') as f:
f.write(ctext)
if os.path.splitext(cfile)[1] == '.c':
cfilenames.append(cfile)
else:
cfilenames = glob.glob(os.path.join(build_dir, '*.c'))
cflags = [] # type: List[str]
if compiler.compiler_type == 'unix':
cflags += [
'-O{}'.format(opt_level), '-Werror', '-Wno-unused-function', '-Wno-unused-label',
'-Wno-unreachable-code', '-Wno-unused-variable', '-Wno-trigraphs',
'-Wno-unused-command-line-argument'
]
if 'gcc' in compiler.compiler[0]:
# This flag is needed for gcc but does not exist on clang.
cflags += ['-Wno-unused-but-set-variable']
elif compiler.compiler_type == 'msvc':
if opt_level == '3':
opt_level = '2'
cflags += [
'/O{}'.format(opt_level),
'/wd4102', # unreferenced label
'/wd4101', # unreferenced local variable
'/wd4146', # negating unsigned int
]
if multi_file:
# Disable whole program optimization in multi-file mode so
# that we actually get the compilation speed and memory
# use wins that multi-file mode is intended for.
cflags += [
'/GL-',
'/wd9025', # warning about overriding /GL
]
# Copy the runtime library in
rt_file = os.path.join(build_dir, 'CPy.c')
shutil.copyfile(os.path.join(include_dir(), 'CPy.c'), rt_file)
cfilenames.append(rt_file)
if use_shared_lib:
assert lib_name
extensions = build_using_shared_lib(sources, lib_name, cfilenames, build_dir, cflags)
else:
extensions = build_single_module(sources, cfilenames, cflags)
return extensions
class MypycifyBuildExt(build_ext):
"""Custom setuptools/distutils build_ext command.
This overrides the build_extension method so that we can hook in
before and after the actual compilation.
The key thing here is that we need to hook in after compilation on
OS X, because we need to use `install_name_tool` to fix up the
libraries to use relative paths.
We hook in before compilation to update library paths to include
where the built shared library is placed. (We probably could have
hacked this together without hooking in here, but we were hooking
in already and build_ext makes it easy to get that information)
"""
| 38.114 | 97 | 0.651624 | """Support for building extensions using mypyc with distutils or setuptools
The main entry points are mypycify, which produces a list of extension
modules to be passed to setup, and MypycifyBuildExt, which must be
registered as a BuildExt command. A trivial setup.py for a mypyc built
project, then, looks like:
from distutils.core import setup
from mypyc.build import mypycify, MypycifyBuildExt
setup(name='test_module',
ext_modules=mypycify(['foo.py']),
cmdclass={{'build_ext': MypycifyBuildExt}},
See the mypycify docs for additional arguments.
Because MypycifyBuildExt needs to inherit from the
distutils/setuputils build_ext, we need to know at import-time whether
we are using distutils or setuputils. We hackily decide based on
whether setuptools has been imported already.
"""
import glob
import sys
import os.path
import subprocess
import hashlib
import time
import shutil
from typing import List, Tuple, Any, Optional, Union, Dict, cast
MYPY = False
if MYPY:
from typing import NoReturn
from mypy.main import process_options
from mypy.errors import CompileError
from mypy.options import Options
from mypy.build import BuildSource
from mypyc.namegen import exported_name
from mypyc import emitmodule
# We can work with either setuptools or distutils, and pick setuptools
# if it has been imported.
assert 'setuptools' in sys.modules or 'distutils' in sys.modules, (
"'setuptools' or 'distutils' must be imported before mypyc.build")
USE_SETUPTOOLS = 'setuptools' in sys.modules
if USE_SETUPTOOLS:
from setuptools import setup, Extension # type: ignore
from setuptools.command.build_ext import build_ext # type: ignore
else:
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext # type: ignore
from distutils import sysconfig, ccompiler
def setup_mypycify_vars() -> None:
"""Rewrite a bunch of config vars in pretty dubious ways."""
# There has to be a better approach to this.
# The vars can contain ints but we only work with str ones
vars = cast(Dict[str, str], sysconfig.get_config_vars())
if sys.platform == 'darwin':
# On OS X, force the creation of dynamic libraries instead of bundles so that
# we can link against multi-module shared libraries.
# From https://stackoverflow.com/a/32765319
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-dynamiclib')
# Also disable building 32-bit binaries, since we generate too much code
# for a 32-bit Mach-O object. There has to be a better way to do this.
vars['LDFLAGS'] = vars['LDFLAGS'].replace('-arch i386', '')
vars['CFLAGS'] = vars['CFLAGS'].replace('-arch i386', '')
class MypycifyExtension(Extension):
"""Represents an Extension generated by mypyc.
Stores a little bit of extra metadata to support that.
Arguments:
* is_mypyc_shared: True if this is a shared library generated to implement
multiple modules
* mypyc_shared_target: If this is a shim library, a reference to the shared library
that actually contains the implementation of the module
"""
def __init__(self, *args: Any,
is_mypyc_shared: bool = False,
mypyc_shared_target: Optional['MypycifyExtension'] = None,
**kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.is_mypyc_shared = is_mypyc_shared
self.mypyc_shared_target = mypyc_shared_target
def fail(message: str) -> 'NoReturn':
# TODO: Is there something else we should do to fail?
sys.exit(message)
def get_mypy_config(paths: List[str],
mypy_options: Optional[List[str]]) -> Tuple[List[BuildSource], Options]:
"""Construct mypy BuildSources and Options from file and options lists"""
# It is kind of silly to do this but oh well
mypy_options = mypy_options or []
mypy_options.append('--')
mypy_options.extend(paths)
sources, options = process_options(mypy_options)
if options.python_version[0] == 2:
fail('Python 2 not supported')
if not options.strict_optional:
fail('Disabling strict optional checking not supported')
options.show_traceback = True
# Needed to get types for all AST nodes
options.export_types = True
# TODO: Support incremental checking
options.incremental = False
for source in sources:
options.per_module_options.setdefault(source.module, {})['mypyc'] = True
return sources, options
shim_template_unix = """\
#include <Python.h>
PyObject *CPyInit_{full_modname}(void);
PyMODINIT_FUNC
PyInit_{modname}(void)
{{
return CPyInit_{full_modname}();
}}
"""
# As far as I could tell, Windows lacks the rpath style features we
# would need in automatically load the shared library (located
# relative to the module library) when a module library is loaded,
# which means that instead we get to do it dynamically.
#
# We do this by, at module initialization time, finding the location
# of the module dll and using it to compute the location of the shared
# library. We then load the shared library with LoadLibrary, find the
# appropriate CPyInit_ routine using GetProcAddress, and call it.
#
# The relative path of the shared library (from the shim library) is provided
# as the preprocessor define MYPYC_LIBRARY.
shim_template_windows = r"""\
#include <Python.h>
#include <windows.h>
#include <stdlib.h>
#include <stdio.h>
EXTERN_C IMAGE_DOS_HEADER __ImageBase;
typedef PyObject *(__cdecl *INITPROC)();
PyMODINIT_FUNC
PyInit_{modname}(void)
{{
char path[MAX_PATH];
char drive[MAX_PATH];
char directory[MAX_PATH];
HINSTANCE hinstLib;
INITPROC proc;
// get the file name of this dll
DWORD res = GetModuleFileName((HINSTANCE)&__ImageBase, path, sizeof(path));
if (res == 0 || res == sizeof(path)) {{
PyErr_SetString(PyExc_RuntimeError, "GetModuleFileName failed");
return NULL;
}}
// find the directory this dll is in
_splitpath(path, drive, directory, NULL, NULL);
// and use it to construct a path to the shared library
snprintf(path, sizeof(path), "%s%s%s", drive, directory, MYPYC_LIBRARY);
hinstLib = LoadLibrary(path);
if (!hinstLib) {{
PyErr_SetString(PyExc_RuntimeError, "LoadLibrary failed");
return NULL;
}}
proc = (INITPROC)GetProcAddress(hinstLib, "CPyInit_{full_modname}");
if (!proc) {{
PyErr_SetString(PyExc_RuntimeError, "GetProcAddress failed");
return NULL;
}}
return proc();
}}
// distutils sometimes spuriously tells cl to export CPyInit___init__,
// so provide that so it chills out
PyMODINIT_FUNC PyInit___init__(void) {{ return PyInit_{modname}(); }}
"""
def generate_c_extension_shim(full_module_name: str, module_name: str, dirname: str) -> str:
"""Create a C extension shim with a passthrough PyInit function."""
cname = '%s.c' % full_module_name.replace('.', '___') # XXX
cpath = os.path.join(dirname, cname)
with open(cpath, 'w') as f:
shim_template = shim_template_windows if sys.platform == 'win32' else shim_template_unix
f.write(shim_template.format(modname=module_name,
full_modname=exported_name(full_module_name)))
return cpath
def shared_lib_name(modules: List[str]) -> str:
"""Produce a probably unique name for a library from a list of module names."""
h = hashlib.sha1()
h.update(','.join(modules).encode())
return 'mypyc_%s' % h.hexdigest()[:20]
def include_dir() -> str:
"""Find the path of the lib-rt dir that needs to be included"""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib-rt')
def generate_c(sources: List[BuildSource], options: Options,
multi_file: bool,
shared_lib_name: Optional[str],
verbose: bool = False) -> Tuple[List[Tuple[str, str]], str]:
"""Drive the actual core compilation step.
Returns the C source code and (for debugging) the pretty printed IR.
"""
module_names = [source.module for source in sources]
# Do the actual work now
t0 = time.time()
try:
result = emitmodule.parse_and_typecheck(sources, options)
except CompileError as e:
for line in e.messages:
print(line)
fail('Typechecking failure')
t1 = time.time()
if verbose:
print("Parsed and typechecked in {:.3f}s".format(t1 - t0))
ops = [] # type: List[str]
ctext = emitmodule.compile_modules_to_c(result, module_names, shared_lib_name, multi_file,
ops=ops)
t2 = time.time()
if verbose:
print("Compiled to C in {:.3f}s".format(t2 - t1))
return ctext, '\n'.join(ops)
def build_using_shared_lib(sources: List[BuildSource],
lib_name: str,
cfiles: List[str],
build_dir: str,
extra_compile_args: List[str],
) -> List[MypycifyExtension]:
"""Produce the list of extension modules when a shared library is needed.
This creates one shared library extension module that all of the
others link against and then one shim extension module for each
module in the build, that simply calls an initialization function
in the shared library.
We treat the shared library as a python extension so that it is
cleanly processed by setuptools, but it isn't *really* a python C
extension module on its own.
"""
shared_lib = MypycifyExtension(
'lib' + lib_name,
is_mypyc_shared=True,
sources=cfiles,
include_dirs=[include_dir()],
extra_compile_args=extra_compile_args,
)
extensions = [shared_lib]
for source in sources:
module_name = source.module.split('.')[-1]
shim_file = generate_c_extension_shim(source.module, module_name, build_dir)
# We include the __init__ in the "module name" we stick in the Extension,
# since this seems to be needed for it to end up in the right place.
full_module_name = source.module
assert source.path
if os.path.split(source.path)[1] == '__init__.py':
full_module_name += '.__init__'
extensions.append(MypycifyExtension(
full_module_name,
mypyc_shared_target=shared_lib,
sources=[shim_file],
extra_compile_args=extra_compile_args,
))
return extensions
def build_single_module(sources: List[BuildSource],
cfiles: List[str],
extra_compile_args: List[str],
) -> List[MypycifyExtension]:
"""Produce the list of extension modules for a standalone extension.
This contains just one module, since there is no need for a shared module.
"""
return [MypycifyExtension(
sources[0].module,
sources=cfiles,
include_dirs=[include_dir()],
extra_compile_args=extra_compile_args,
)]
def mypycify(paths: List[str],
mypy_options: Optional[List[str]] = None,
opt_level: str = '3',
multi_file: bool = False,
skip_cgen: bool = False,
verbose: bool = False) -> List[MypycifyExtension]:
"""Main entry point to building using mypyc.
This produces a list of Extension objects that should be passed as the
ext_modules parameter to setup.
Arguments:
* paths: A list of file paths to build. It may contain globs.
* mypy_options: Optionally, a list of command line flags to pass to mypy.
(This can also contain additional files, for compatibility reasons.)
* opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
"""
setup_mypycify_vars()
# Create a compiler object so we can make decisions based on what
# compiler is being used. typeshed is missing some attribues on the
# compiler object so we give it type Any
compiler = ccompiler.new_compiler() # type: Any
sysconfig.customize_compiler(compiler)
expanded_paths = []
for path in paths:
expanded_paths.extend(glob.glob(path))
build_dir = 'build' # TODO: can this be overridden??
try:
os.mkdir(build_dir)
except FileExistsError:
pass
sources, options = get_mypy_config(expanded_paths, mypy_options)
# We generate a shared lib if there are multiple modules or if any
# of the modules are in package. (Because I didn't want to fuss
# around with making the single module code handle packages.)
use_shared_lib = len(sources) > 1 or any('.' in x.module for x in sources)
lib_name = shared_lib_name([source.module for source in sources]) if use_shared_lib else None
# We let the test harness make us skip doing the full compilation
# so that it can do a corner-cutting version without full stubs.
# TODO: Be able to do this based on file mtimes?
if not skip_cgen:
cfiles, ops_text = generate_c(sources, options, multi_file, lib_name, verbose)
# TODO: unique names?
with open(os.path.join(build_dir, 'ops.txt'), 'w') as f:
f.write(ops_text)
cfilenames = []
for cfile, ctext in cfiles:
cfile = os.path.join(build_dir, cfile)
with open(cfile, 'w', encoding='utf-8') as f:
f.write(ctext)
if os.path.splitext(cfile)[1] == '.c':
cfilenames.append(cfile)
else:
cfilenames = glob.glob(os.path.join(build_dir, '*.c'))
cflags = [] # type: List[str]
if compiler.compiler_type == 'unix':
cflags += [
'-O{}'.format(opt_level), '-Werror', '-Wno-unused-function', '-Wno-unused-label',
'-Wno-unreachable-code', '-Wno-unused-variable', '-Wno-trigraphs',
'-Wno-unused-command-line-argument'
]
if 'gcc' in compiler.compiler[0]:
# This flag is needed for gcc but does not exist on clang.
cflags += ['-Wno-unused-but-set-variable']
elif compiler.compiler_type == 'msvc':
if opt_level == '3':
opt_level = '2'
cflags += [
'/O{}'.format(opt_level),
'/wd4102', # unreferenced label
'/wd4101', # unreferenced local variable
'/wd4146', # negating unsigned int
]
if multi_file:
# Disable whole program optimization in multi-file mode so
# that we actually get the compilation speed and memory
# use wins that multi-file mode is intended for.
cflags += [
'/GL-',
'/wd9025', # warning about overriding /GL
]
# Copy the runtime library in
rt_file = os.path.join(build_dir, 'CPy.c')
shutil.copyfile(os.path.join(include_dir(), 'CPy.c'), rt_file)
cfilenames.append(rt_file)
if use_shared_lib:
assert lib_name
extensions = build_using_shared_lib(sources, lib_name, cfilenames, build_dir, cflags)
else:
extensions = build_single_module(sources, cfilenames, cflags)
return extensions
class MypycifyBuildExt(build_ext):
"""Custom setuptools/distutils build_ext command.
This overrides the build_extension method so that we can hook in
before and after the actual compilation.
The key thing here is that we need to hook in after compilation on
OS X, because we need to use `install_name_tool` to fix up the
libraries to use relative paths.
We hook in before compilation to update library paths to include
where the built shared library is placed. (We probably could have
hacked this together without hooking in here, but we were hooking
in already and build_ext makes it easy to get that information)
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# parallel is busted because the shared library needs to go first.
# We could override build_extensions to arrange for that to happen
# and to parallelize the rest but it wouldn't help much.
self.parallel = 0
def _get_rt_lib_path(self, ext: MypycifyExtension) -> str:
module_parts = ext.name.split('.')
if len(module_parts) > 1:
relative_lib_path = os.path.join(*(['..'] * (len(module_parts) - 1)))
else:
relative_lib_path = '.'
return relative_lib_path
def build_extension(self, ext: MypycifyExtension) -> None:
# First we need to figure out what the real library names are
# so that we can set them up properly.
if isinstance(ext, MypycifyExtension) and ext.mypyc_shared_target:
relative_lib_path = self._get_rt_lib_path(ext)
shared_dir, shared_file = os.path.split(
self.get_ext_fullpath(ext.mypyc_shared_target.name))
shared_name = os.path.splitext(shared_file)[0][3:]
if sys.platform == 'win32':
# On windows, instead of linking against the shared library,
# we dynamically load it at runtime. We generate our C shims
# before we have found out what the library filename is, so
# pass it in as a preprocessor define.
path = os.path.join(relative_lib_path, shared_file)
ext.extra_compile_args = ext.extra_compile_args + [
'/DMYPYC_LIBRARY=\\"{}\\"'.format(path.replace('\\', '\\\\'))]
else:
# On other platforms we link against the library normally
ext.libraries.append(shared_name)
ext.library_dirs.append(shared_dir)
if sys.platform == 'linux':
ext.runtime_library_dirs.append('$ORIGIN/{}'.format(
relative_lib_path))
# Run the actual C build
super().build_extension(ext)
# On OS X, we need to patch up these paths post-hoc, tragically
if sys.platform == 'darwin':
out_path = self.get_ext_fullpath(ext.name)
# After compiling the shared library, drop the path part from its name.
if isinstance(ext, MypycifyExtension) and ext.is_mypyc_shared:
subprocess.check_call(['install_name_tool', '-id',
os.path.basename(out_path),
out_path])
# For libraries that link against a shared one, update the path to
# the shared library to be relative to @loader_path.
if isinstance(ext, MypycifyExtension) and ext.mypyc_shared_target:
new_path = os.path.join('@loader_path', relative_lib_path,
shared_file)
subprocess.check_call(['install_name_tool', '-change',
shared_file, new_path, out_path])
| 3,443 | 0 | 130 |
c73bfbe38daeb6fa3fb557b96e8776ca3d1348e7 | 163 | py | Python | test_mysql.py | huobingli/WoodETF | f93d0938f295d1dac5ac36af289ac8bf78949d59 | [
"Apache-2.0"
] | null | null | null | test_mysql.py | huobingli/WoodETF | f93d0938f295d1dac5ac36af289ac8bf78949d59 | [
"Apache-2.0"
] | null | null | null | test_mysql.py | huobingli/WoodETF | f93d0938f295d1dac5ac36af289ac8bf78949d59 | [
"Apache-2.0"
] | null | null | null | from Middleware import *
if __name__ == '__main__':
fecth_ark_data() | 23.285714 | 66 | 0.705521 | from Middleware import *
def fecth_ark_data():
print(fecth_data("ARKK_ETF", "where ark_stock_name = 'TSLA'"))
if __name__ == '__main__':
fecth_ark_data() | 67 | 0 | 23 |
ba1b3575e31c6558549fbf69788c7153a676daf8 | 2,435 | py | Python | mainbackend.py | jemsbhai/sahaybackend | 80de93b64406e68245418a61a12f1de7c5ee77d6 | [
"MIT"
] | null | null | null | mainbackend.py | jemsbhai/sahaybackend | 80de93b64406e68245418a61a12f1de7c5ee77d6 | [
"MIT"
] | null | null | null | mainbackend.py | jemsbhai/sahaybackend | 80de93b64406e68245418a61a12f1de7c5ee77d6 | [
"MIT"
] | 1 | 2021-08-30T09:51:53.000Z | 2021-08-30T09:51:53.000Z | import os
import pymongo
import json
import random
import hashlib
import time
from flask import jsonify
from hashlib import sha256
import redis
def dummy(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
if request.method == 'OPTIONS':
# Allows GET requests from origin https://mydomain.com with
# Authorization header
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': '*',
'Access-Control-Max-Age': '3600',
'Access-Control-Allow-Credentials': 'true'
}
return ('', 204, headers)
# Set CORS headers for main requests
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': 'true'
}
request_json = request.get_json()
db = initsystem()
if request.method == 'GET':
msg = db.get('sahaywelcome')
msg = msg.decode()
return msg
retjson = {}
action = request_json['action']
if action == "getmeds":
meds = r.get('meds')
return json.dumps({"length": len(meds),
"meds": meds})
if action == "getplasma":
plasma = r.get('plasma')
return json.dumps({"length": len(plasma),
"meds": plasma})
if action == "getblood":
blood = r.get('blood')
return json.dumps({"length": len(blood),
"meds": blood})
retstr = "action not done"
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
else:
return retstr
| 20.991379 | 89 | 0.57577 | import os
import pymongo
import json
import random
import hashlib
import time
from flask import jsonify
from hashlib import sha256
import redis
def hashcalc(st):
hash_object = hashlib.md5(st.encode())
h = str(hash_object.hexdigest())
return h
def initsystem():
redisurl = os.environ.get('REDISURL')
redisport = os.environ.get('REDISPORT')
redispw = os.environ.get('REDISPW')
r = redis.Redis(host=redisurl, port=redisport, password=redispw)
return r
def dummy(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
if request.method == 'OPTIONS':
# Allows GET requests from origin https://mydomain.com with
# Authorization header
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': '*',
'Access-Control-Max-Age': '3600',
'Access-Control-Allow-Credentials': 'true'
}
return ('', 204, headers)
# Set CORS headers for main requests
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': 'true'
}
request_json = request.get_json()
db = initsystem()
if request.method == 'GET':
msg = db.get('sahaywelcome')
msg = msg.decode()
return msg
retjson = {}
action = request_json['action']
if action == "getmeds":
meds = r.get('meds')
return json.dumps({"length": len(meds),
"meds": meds})
if action == "getplasma":
plasma = r.get('plasma')
return json.dumps({"length": len(plasma),
"meds": plasma})
if action == "getblood":
blood = r.get('blood')
return json.dumps({"length": len(blood),
"meds": blood})
retstr = "action not done"
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
else:
return retstr
| 306 | 0 | 46 |
c89ebe61758e4f1c528898ee16d2415a6a909808 | 652 | py | Python | src/fitfuncdescriptor.py | takumak/tuna | a50d1d34c9917d73f02257bcffcf7cc6bf582747 | [
"MIT"
] | null | null | null | src/fitfuncdescriptor.py | takumak/tuna | a50d1d34c9917d73f02257bcffcf7cc6bf582747 | [
"MIT"
] | null | null | null | src/fitfuncdescriptor.py | takumak/tuna | a50d1d34c9917d73f02257bcffcf7cc6bf582747 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtGui import QImage
from PyQt5.QtWidgets import QLabel
from commonwidgets import *
| 24.148148 | 71 | 0.703988 | import sys
from PyQt5.QtGui import QImage
from PyQt5.QtWidgets import QLabel
from commonwidgets import *
class FitFuncDescriptor(DescriptionWidget):
def __init__(self, funcClass):
super().__init__()
self.funcClass = funcClass
self.addTitle(self.funcClass.label)
import lateximgs
img = QImage()
img.loadFromData(getattr(lateximgs, 'fitfunc_%s' % funcClass.name))
self.addLabel('Definition:')
self.addImage(img)
self.addLabel('Parameters:')
grid = self.addGrid()
for r, (name, desc) in enumerate(funcClass.parameters):
grid.addWidget(QLabel(name), r, 0)
grid.addWidget(QLabel(desc), r, 1)
| 475 | 22 | 47 |
225903a55acc1f6f1c4d59b6159c28cd65cfbbf0 | 5,475 | py | Python | gqcnn/utils/utils.py | v1viswan/dexnet_rrt_planner_surreal_robosuite | d66753963776ee4dd95b0b2b58a8618644e346f4 | [
"MIT"
] | 11 | 2020-03-11T07:53:25.000Z | 2022-02-23T05:28:48.000Z | gqcnn/utils/utils.py | iosmichael/dexnet_robosuite | d643d3fe9655d26b85b8e82c30bc320a1b1bf56c | [
"MIT"
] | null | null | null | gqcnn/utils/utils.py | iosmichael/dexnet_robosuite | d643d3fe9655d26b85b8e82c30bc320a1b1bf56c | [
"MIT"
] | 1 | 2021-01-05T16:55:09.000Z | 2021-01-05T16:55:09.000Z | # -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Simple utility functions.
Authors
-------
Jeff Mahler, Vishal Satish, Lucas Manuelli
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
import os
import sys
import numpy as np
from autolab_core import Logger
from .enums import GripperMode
# Set up logger.
logger = Logger.get_logger("gqcnn/utils/utils.py")
def set_cuda_visible_devices(gpu_list):
"""Sets CUDA_VISIBLE_DEVICES environment variable to only show certain
gpus.
Note
----
If gpu_list is empty does nothing.
Parameters
----------
gpu_list : list
List of gpus to set as visible.
"""
if len(gpu_list) == 0:
return
cuda_visible_devices = ""
for gpu in gpu_list:
cuda_visible_devices += str(gpu) + ","
logger.info(
"Setting CUDA_VISIBLE_DEVICES = {}".format(cuda_visible_devices))
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
def pose_dim(gripper_mode):
"""Returns the dimensions of the pose vector for the given
gripper mode.
Parameters
----------
gripper_mode: :obj:`GripperMode`
Enum for gripper mode, see optimizer_constants.py for all possible
gripper modes.
Returns
-------
:obj:`numpy.ndarray`
Sliced pose_data corresponding to gripper mode.
"""
if gripper_mode == GripperMode.PARALLEL_JAW:
return 1
elif gripper_mode == GripperMode.SUCTION:
return 2
elif gripper_mode == GripperMode.MULTI_SUCTION:
return 1
elif gripper_mode == GripperMode.LEGACY_PARALLEL_JAW:
return 1
elif gripper_mode == GripperMode.LEGACY_SUCTION:
return 2
else:
raise ValueError(
"Gripper mode '{}' not supported.".format(gripper_mode))
def read_pose_data(pose_arr, gripper_mode):
"""Read the pose data and slice it according to the specified gripper mode.
Parameters
----------
pose_arr: :obj:`numpy.ndarray`
Full pose data array read in from file.
gripper_mode: :obj:`GripperMode`
Enum for gripper mode, see optimizer_constants.py for all possible
gripper modes.
Returns
-------
:obj:`numpy.ndarray`
Sliced pose_data corresponding to input data mode.
"""
if gripper_mode == GripperMode.PARALLEL_JAW:
if pose_arr.ndim == 1:
return pose_arr[2:3]
else:
return pose_arr[:, 2:3]
elif gripper_mode == GripperMode.SUCTION:
if pose_arr.ndim == 1:
return np.r_[pose_arr[2], pose_arr[4]]
else:
return np.c_[pose_arr[:, 2], pose_arr[:, 4]]
elif gripper_mode == GripperMode.MULTI_SUCTION:
if pose_arr.ndim == 1:
return pose_arr[2:3]
else:
return pose_arr[:, 2:3]
elif gripper_mode == GripperMode.LEGACY_PARALLEL_JAW:
if pose_arr.ndim == 1:
return pose_arr[2:3]
else:
return pose_arr[:, 2:3]
elif gripper_mode == GripperMode.LEGACY_SUCTION:
if pose_arr.ndim == 1:
return pose_arr[2:4]
else:
return pose_arr[:, 2:4]
else:
raise ValueError(
"Gripper mode '{}' not supported.".format(gripper_mode))
def reduce_shape(shape):
"""Get shape of a layer for flattening."""
shape = [x.value for x in shape[1:]]
f = lambda x, y: 1 if y is None else x * y # noqa: E731
return reduce(f, shape, 1)
def weight_name_to_layer_name(weight_name):
"""Convert the name of weights to the layer name."""
tokens = weight_name.split("_")
type_name = tokens[-1]
# Modern naming convention.
if type_name == "weights" or type_name == "bias":
if len(tokens) >= 3 and tokens[-3] == "input":
return weight_name[:weight_name.rfind("input") - 1]
return weight_name[:weight_name.rfind(type_name) - 1]
# Legacy.
if type_name == "im":
return weight_name[:-4]
if type_name == "pose":
return weight_name[:-6]
return weight_name[:-1]
| 31.107955 | 79 | 0.672511 | # -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Simple utility functions.
Authors
-------
Jeff Mahler, Vishal Satish, Lucas Manuelli
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
import os
import sys
import numpy as np
from autolab_core import Logger
from .enums import GripperMode
# Set up logger.
logger = Logger.get_logger("gqcnn/utils/utils.py")
def is_py2():
return sys.version[0] == "2"
def set_cuda_visible_devices(gpu_list):
"""Sets CUDA_VISIBLE_DEVICES environment variable to only show certain
gpus.
Note
----
If gpu_list is empty does nothing.
Parameters
----------
gpu_list : list
List of gpus to set as visible.
"""
if len(gpu_list) == 0:
return
cuda_visible_devices = ""
for gpu in gpu_list:
cuda_visible_devices += str(gpu) + ","
logger.info(
"Setting CUDA_VISIBLE_DEVICES = {}".format(cuda_visible_devices))
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
def pose_dim(gripper_mode):
"""Returns the dimensions of the pose vector for the given
gripper mode.
Parameters
----------
gripper_mode: :obj:`GripperMode`
Enum for gripper mode, see optimizer_constants.py for all possible
gripper modes.
Returns
-------
:obj:`numpy.ndarray`
Sliced pose_data corresponding to gripper mode.
"""
if gripper_mode == GripperMode.PARALLEL_JAW:
return 1
elif gripper_mode == GripperMode.SUCTION:
return 2
elif gripper_mode == GripperMode.MULTI_SUCTION:
return 1
elif gripper_mode == GripperMode.LEGACY_PARALLEL_JAW:
return 1
elif gripper_mode == GripperMode.LEGACY_SUCTION:
return 2
else:
raise ValueError(
"Gripper mode '{}' not supported.".format(gripper_mode))
def read_pose_data(pose_arr, gripper_mode):
"""Read the pose data and slice it according to the specified gripper mode.
Parameters
----------
pose_arr: :obj:`numpy.ndarray`
Full pose data array read in from file.
gripper_mode: :obj:`GripperMode`
Enum for gripper mode, see optimizer_constants.py for all possible
gripper modes.
Returns
-------
:obj:`numpy.ndarray`
Sliced pose_data corresponding to input data mode.
"""
if gripper_mode == GripperMode.PARALLEL_JAW:
if pose_arr.ndim == 1:
return pose_arr[2:3]
else:
return pose_arr[:, 2:3]
elif gripper_mode == GripperMode.SUCTION:
if pose_arr.ndim == 1:
return np.r_[pose_arr[2], pose_arr[4]]
else:
return np.c_[pose_arr[:, 2], pose_arr[:, 4]]
elif gripper_mode == GripperMode.MULTI_SUCTION:
if pose_arr.ndim == 1:
return pose_arr[2:3]
else:
return pose_arr[:, 2:3]
elif gripper_mode == GripperMode.LEGACY_PARALLEL_JAW:
if pose_arr.ndim == 1:
return pose_arr[2:3]
else:
return pose_arr[:, 2:3]
elif gripper_mode == GripperMode.LEGACY_SUCTION:
if pose_arr.ndim == 1:
return pose_arr[2:4]
else:
return pose_arr[:, 2:4]
else:
raise ValueError(
"Gripper mode '{}' not supported.".format(gripper_mode))
def reduce_shape(shape):
"""Get shape of a layer for flattening."""
shape = [x.value for x in shape[1:]]
f = lambda x, y: 1 if y is None else x * y # noqa: E731
return reduce(f, shape, 1)
def weight_name_to_layer_name(weight_name):
"""Convert the name of weights to the layer name."""
tokens = weight_name.split("_")
type_name = tokens[-1]
# Modern naming convention.
if type_name == "weights" or type_name == "bias":
if len(tokens) >= 3 and tokens[-3] == "input":
return weight_name[:weight_name.rfind("input") - 1]
return weight_name[:weight_name.rfind(type_name) - 1]
# Legacy.
if type_name == "im":
return weight_name[:-4]
if type_name == "pose":
return weight_name[:-6]
return weight_name[:-1]
| 25 | 0 | 23 |
3207da592d191636cb18572d9257809d83dc3f60 | 1,359 | py | Python | torchclassify/utils/test_tools.py | foolishflyfox/ClassifyFramework | e2c24664e91aed18b99e66ff5479d0aabf8f3418 | [
"MIT"
] | null | null | null | torchclassify/utils/test_tools.py | foolishflyfox/ClassifyFramework | e2c24664e91aed18b99e66ff5479d0aabf8f3418 | [
"MIT"
] | null | null | null | torchclassify/utils/test_tools.py | foolishflyfox/ClassifyFramework | e2c24664e91aed18b99e66ff5479d0aabf8f3418 | [
"MIT"
] | null | null | null | import torch.utils.data as data
from PIL import Image
import os
# refer to https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
| 32.357143 | 87 | 0.646799 | import torch.utils.data as data
from PIL import Image
import os
# refer to https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
class TestImageFolder(data.Dataset):
def __init__(self, root, transform=None, extensions=IMG_EXTENSIONS):
self.root = root
self.extensions = extensions
self.imgs = []
for tp_root, _, fnames in os.walk(root):
for fname in fnames:
if has_file_allowed_extension(fname, extensions):
self.imgs.append(os.path.join(tp_root, fname))
self.transform = transform
def __getitem__(self, index):
path = self.imgs[index]
img = Image.open(path)
if self.transform is not None:
img = self.transform(img)
return img, path
def __len__(self):
return len(self.imgs)
| 583 | 15 | 103 |
2adadc290ea9b4c98bb129853fa8eba0efa8e555 | 1,707 | py | Python | solutions/day19.py | rds504/AoC-2020 | 3901a22863ed4479a8cd02f2fa5ea55d5f1f5739 | [
"MIT"
] | null | null | null | solutions/day19.py | rds504/AoC-2020 | 3901a22863ed4479a8cd02f2fa5ea55d5f1f5739 | [
"MIT"
] | null | null | null | solutions/day19.py | rds504/AoC-2020 | 3901a22863ed4479a8cd02f2fa5ea55d5f1f5739 | [
"MIT"
] | null | null | null | import re
from tools.general import load_input
rules, messages = parse_input_data(load_input("day19.txt"))
pattern = re.compile("^" + resolve_rule(rules, 0) + "$")
print(f"Part 1 => {sum(1 for msg in messages if pattern.match(msg))}")
special_cases = {
8 : resolve_rule(rules, 42) + '+',
11 : '(' + '|'.join(
resolve_rule(rules, 42)
+ '{' + str(i) + '}'
+ resolve_rule(rules, 31)
+ '{' + str(i) + '}'
for i in range(1, 6)
) + ')'
}
pattern = re.compile("^" + resolve_rule(rules, 0, special_cases) + "$")
print(f"Part 2 => {sum(1 for msg in messages if pattern.match(msg))}")
| 27.983607 | 80 | 0.554189 | import re
from tools.general import load_input
def parse_input_data(input_data):
msg_rules = {}
msg_list = []
rule_pattern = re.compile(r"^([0-9]+): ([0-9ab\"\| ]+)$")
msg_pattern = re.compile(r"^[ab]+$")
for line in input_data.split('\n'):
rm = rule_pattern.match(line)
if rm:
msg_rules[int(rm.group(1))] = rm.group(2)
elif msg_pattern.match(line):
msg_list.append(line)
return msg_rules, msg_list
def resolve_rule(all_rules, rule_id, custom_rules = None):
if custom_rules and (rule_id in custom_rules):
return custom_rules[rule_id]
alterns = []
for alt in all_rules[rule_id].split('|'):
alt_body = ""
for add in alt.strip().split(' '):
add_body = add.strip('"')
if add_body in ('a', 'b'):
alt_body += add_body
else:
alt_body += resolve_rule(all_rules, int(add_body), custom_rules)
alterns.append(alt_body)
if len(alterns) == 1:
return alterns[0]
return '(' + '|'.join(alterns) + ')'
rules, messages = parse_input_data(load_input("day19.txt"))
pattern = re.compile("^" + resolve_rule(rules, 0) + "$")
print(f"Part 1 => {sum(1 for msg in messages if pattern.match(msg))}")
special_cases = {
8 : resolve_rule(rules, 42) + '+',
11 : '(' + '|'.join(
resolve_rule(rules, 42)
+ '{' + str(i) + '}'
+ resolve_rule(rules, 31)
+ '{' + str(i) + '}'
for i in range(1, 6)
) + ')'
}
pattern = re.compile("^" + resolve_rule(rules, 0, special_cases) + "$")
print(f"Part 2 => {sum(1 for msg in messages if pattern.match(msg))}")
| 1,005 | 0 | 46 |
b35df27f800f420be8f6c3c6d88d5bf55fbc9413 | 1,430 | py | Python | glouton/modules/polaris.py | deckbsd/glouton-satnogs-data-downloader | 9674081b669b0ca3c04513ede4127c6221962a73 | [
"MIT"
] | 13 | 2018-01-29T06:08:15.000Z | 2020-03-04T07:00:56.000Z | glouton/modules/polaris.py | deckbsd/glouton-satnogs-data-downloader | 9674081b669b0ca3c04513ede4127c6221962a73 | [
"MIT"
] | 10 | 2018-12-21T11:37:21.000Z | 2021-05-09T12:39:23.000Z | glouton/modules/polaris.py | deckbsd/glouton-satnogs-data-downloader | 9674081b669b0ca3c04513ede4127c6221962a73 | [
"MIT"
] | 4 | 2019-01-25T13:40:13.000Z | 2019-07-22T08:14:19.000Z |
from glouton.modules.telemetryModuleBase import TelemetryModuleBase
from glouton.shared.logger import logger
import json
import os
| 33.255814 | 79 | 0.523776 |
from glouton.modules.telemetryModuleBase import TelemetryModuleBase
from glouton.shared.logger import logger
import json
import os
class Polaris(TelemetryModuleBase):
def __init__(self, wdir):
TelemetryModuleBase.__init__(self, wdir)
self.count = 0
def runAfterDownload(self, frame, full_path, telemetry):
try:
timestamp = telemetry['timestamp']
if not frame:
print('no frame for ' + timestamp)
json_file = full_path + '/' + timestamp.replace(':', '-') + '.json'
if os.path.exists(json_file):
with open(json_file) as json_file_read:
telemetry = json.load(json_file_read)
telemetry['telemetry'].append({
"timestamp": timestamp,
"frame": frame
})
telemetry_obj = telemetry
else:
telemetry_obj = {"telemetry": [{
"timestamp": timestamp,
"frame": frame
}]}
json_telemetry = json.dumps(telemetry_obj, indent=4)
with open(json_file, 'w') as f:
f.write(json_telemetry)
self.count += 1
print('Timestamp ' + timestamp + ' Frame ' +
frame + ' count ' + str(self.count))
except Exception as ex:
logger.Error(ex)
| 1,207 | 14 | 76 |
7e9402cf36fe0f6142df8bffa5dc2f23541737cb | 531 | py | Python | L2/input_output.py | thebestday/python | 2efb7fbd5c4ee40c03233875c1989ce68aa0fe18 | [
"MIT"
] | null | null | null | L2/input_output.py | thebestday/python | 2efb7fbd5c4ee40c03233875c1989ce68aa0fe18 | [
"MIT"
] | null | null | null | L2/input_output.py | thebestday/python | 2efb7fbd5c4ee40c03233875c1989ce68aa0fe18 | [
"MIT"
] | null | null | null | # однострочный комментарий
'''
первая строка
вторая строка
'''
# Ввод / вывод информации
print('Hello!')
# более сложный выыод
print('Hello!', 'student!')
# вывод с разделитетем - sep
print('Hello!', 'student!', 123, sep='xxx')
# бывает важно что бы конец строки был другим символом - and
print('Hello!', 'student!', 123, sep='xxx', end='yyy')
print()
# Ввод
age = input('Input your age')
# Тип переменной которая возвращает input это всегда строка
# ОПЕРАЦИЯ ПРИВЕДЕНИЯ ТИПОВ ИЗ СТРОКИ СДЕЛАЛИ ЧИСЛО
print(age, type(int(age)))
| 23.086957 | 60 | 0.708098 | # однострочный комментарий
'''
первая строка
вторая строка
'''
# Ввод / вывод информации
print('Hello!')
# более сложный выыод
print('Hello!', 'student!')
# вывод с разделитетем - sep
print('Hello!', 'student!', 123, sep='xxx')
# бывает важно что бы конец строки был другим символом - and
print('Hello!', 'student!', 123, sep='xxx', end='yyy')
print()
# Ввод
age = input('Input your age')
# Тип переменной которая возвращает input это всегда строка
# ОПЕРАЦИЯ ПРИВЕДЕНИЯ ТИПОВ ИЗ СТРОКИ СДЕЛАЛИ ЧИСЛО
print(age, type(int(age)))
| 0 | 0 | 0 |
6fb19e45a5fa8163aba4e62cbbd5a8f36818a272 | 4,100 | py | Python | yyfeed/fetcher/base.py | moonfruit/yysite | 92196d76edb8a30afbb7c7a59cb95a1b909fa22a | [
"MIT"
] | null | null | null | yyfeed/fetcher/base.py | moonfruit/yysite | 92196d76edb8a30afbb7c7a59cb95a1b909fa22a | [
"MIT"
] | null | null | null | yyfeed/fetcher/base.py | moonfruit/yysite | 92196d76edb8a30afbb7c7a59cb95a1b909fa22a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Text
from bs4 import BeautifulSoup
from yyutil.cache import DummyCache
from yyutil.time import astimezone
from yyutil.url import UrlFetcher
Item = namedtuple('Item', 'id title publish_date link description')
| 26.973684 | 108 | 0.570244 | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Text
from bs4 import BeautifulSoup
from yyutil.cache import DummyCache
from yyutil.time import astimezone
from yyutil.url import UrlFetcher
Item = namedtuple('Item', 'id title publish_date link description')
class Fetcher(metaclass=ABCMeta):
def __init__(self):
self._fetcher = UrlFetcher()
self._cache = DummyCache()
@property
def fetcher(self):
return self._fetcher
@property
def cache(self):
return self._cache
@cache.setter
def cache(self, cache):
self._cache = cache
@abstractmethod
def fetch(self) -> Iterable[Item]:
pass
def cached_soup(self, url, parse_only=None):
key = self.__class__.__name__ + '+' + url
soup = self.cache.get(key)
if soup is not None:
return BeautifulSoup(soup, 'lxml')
if parse_only is None:
soup = self.fetcher.soup(url)
else:
soup = self.fetcher.soup(url, parse_only=parse_only)
self.cache.set(key, str(soup))
return soup
class FeedFetcher(Fetcher, metaclass=ABCMeta):
DATE_TZ_FORMAT = '%a, %d %b %Y %H:%M:%S %z'
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S'
callback = None
def fetch(self) -> Iterable[Item]:
root = self.fetcher.xml(self.url())
for item in root.iter('item'):
item = self.item(item)
if item:
yield item
def item(self, item) -> Optional[Item]:
result = self.build_result(item)
if callable(self.callback):
if not self.callback(result, item):
return None
result['description'] = self.cached_description(result['link'])
return Item(**result)
def build_result(self, item) -> Dict[Text, Any]:
result = {}
for child in item:
if child.tag == 'guid':
result['id'] = child.text
elif child.tag == 'title':
result['title'] = child.text
elif child.tag == 'link':
result['link'] = child.text
elif child.tag == 'pubDate':
try:
result['publish_date'] = datetime.strptime(child.text, self.DATE_TZ_FORMAT)
except ValueError:
try:
result['publish_date'] = astimezone(datetime.strptime(child.text, self.DATE_FORMAT))
except ValueError:
result['publish_date'] = None
elif child.tag == 'description':
result['description'] = child.text
return result
def cached_description(self, url):
data = self.cache.get(url)
if data is not None:
return data
data = self.description(url)
self.cache.set(url, data)
return data
@abstractmethod
def url(self) -> Text:
pass
@abstractmethod
def description(self, url) -> Text:
pass
class MultiFeedFetcher(FeedFetcher, metaclass=ABCMeta):
def fetch(self) -> Iterable[Item]:
root = self.fetcher.xml(self.url())
for item in root.iter('item'):
for element in self.items(item):
yield element
def items(self, item) -> Iterable[Item]:
result = self.build_result(item)
if callable(self.callback):
if not self.callback(result, item):
return
original_id = result['id']
original_title = result['title']
for index, description in enumerate(self.cached_description(result['link']), 1):
result['id'] = "%s+%03d+%03d" % (original_id, 1000 - index, index)
if index > 1:
result['title'] = "%s(%d)" % (original_title, index)
result['description'] = description
yield Item(**result)
@abstractmethod
def description(self, link) -> List[Text]:
pass
| 2,925 | 711 | 69 |
92c83d74383f55b74e691d298794128213e8c4df | 1,291 | py | Python | scripts/spinning/plot_just_pressure_vs_time.py | charlesblakemore/opt_lev_analysis | 704f174e9860907de349688ed82b5812bbb07c2d | [
"MIT"
] | null | null | null | scripts/spinning/plot_just_pressure_vs_time.py | charlesblakemore/opt_lev_analysis | 704f174e9860907de349688ed82b5812bbb07c2d | [
"MIT"
] | null | null | null | scripts/spinning/plot_just_pressure_vs_time.py | charlesblakemore/opt_lev_analysis | 704f174e9860907de349688ed82b5812bbb07c2d | [
"MIT"
] | 1 | 2019-11-27T19:10:25.000Z | 2019-11-27T19:10:25.000Z | import numpy as np
import matplotlib.pyplot as plt
from hs_digitizer import *
import glob
import scipy.signal as ss
from scipy.optimize import curve_fit
import re, sys
plt.rcParams.update({'font.size': 14})
paths = ['/daq2/20190618/pramp_tests/outgassing', \
#'/daq2/20190624/pramp/outgassing_test1', \
'/daq2/20190625/pramp/outgassing_test2', \
'/daq2/20190626/bead1/spinning/pramp/outgassing/50kHz_4Vpp']
labels = ['Old data', 'Reseated Window', 'Much Later']
chan_to_plot = 2
time_arrs = []
pressure_arrs = []
for path in paths:
files, lengths = bu.find_all_fnames(path, sort_time=True)
nfiles = len(files)
init_file = 0
times = []
pressures = []
for fileind, file in enumerate(files):
bu.progress_bar(fileind, nfiles)
obj = hsDat(file)
pressures.append(obj.attribs["pressures"])
times.append(obj.attribs["time"] * 1e-9)
pressures = np.array(pressures)
times = np.array(times) - times[0]
pressure_arrs.append(pressures)
time_arrs.append(times)
for pathind, path in enumerate(paths):
plt.plot(time_arrs[pathind], pressure_arrs[pathind][:,chan_to_plot], \
label=labels[pathind])
plt.xlabel('Time [s]')
plt.ylabel('Chamber Pressure [torr]')
plt.suptitle('Leak Got Better')
plt.legend()
plt.tight_layout()
plt.subplots_adjust(top=0.91)
plt.show()
| 23.472727 | 71 | 0.727343 | import numpy as np
import matplotlib.pyplot as plt
from hs_digitizer import *
import glob
import scipy.signal as ss
from scipy.optimize import curve_fit
import re, sys
plt.rcParams.update({'font.size': 14})
paths = ['/daq2/20190618/pramp_tests/outgassing', \
#'/daq2/20190624/pramp/outgassing_test1', \
'/daq2/20190625/pramp/outgassing_test2', \
'/daq2/20190626/bead1/spinning/pramp/outgassing/50kHz_4Vpp']
labels = ['Old data', 'Reseated Window', 'Much Later']
chan_to_plot = 2
time_arrs = []
pressure_arrs = []
for path in paths:
files, lengths = bu.find_all_fnames(path, sort_time=True)
nfiles = len(files)
init_file = 0
times = []
pressures = []
for fileind, file in enumerate(files):
bu.progress_bar(fileind, nfiles)
obj = hsDat(file)
pressures.append(obj.attribs["pressures"])
times.append(obj.attribs["time"] * 1e-9)
pressures = np.array(pressures)
times = np.array(times) - times[0]
pressure_arrs.append(pressures)
time_arrs.append(times)
for pathind, path in enumerate(paths):
plt.plot(time_arrs[pathind], pressure_arrs[pathind][:,chan_to_plot], \
label=labels[pathind])
plt.xlabel('Time [s]')
plt.ylabel('Chamber Pressure [torr]')
plt.suptitle('Leak Got Better')
plt.legend()
plt.tight_layout()
plt.subplots_adjust(top=0.91)
plt.show()
| 0 | 0 | 0 |
616096bca9b0495db10238f893e80f1606df620f | 8,979 | py | Python | pywrm/external_widgets/w2ui/w2ui_layout.py | schapman1974/pywrm | 706b45b5fe36c860fb3acf0117a528ff3f92ca75 | [
"BSD-3-Clause"
] | 2 | 2020-04-05T22:06:38.000Z | 2020-04-05T22:06:43.000Z | pywrm/external_widgets/w2ui/w2ui_layout.py | schapman1974/pywrm | 706b45b5fe36c860fb3acf0117a528ff3f92ca75 | [
"BSD-3-Clause"
] | null | null | null | pywrm/external_widgets/w2ui/w2ui_layout.py | schapman1974/pywrm | 706b45b5fe36c860fb3acf0117a528ff3f92ca75 | [
"BSD-3-Clause"
] | null | null | null | """
W2UI Layout Widget Implementation
"""
from .raw_w2ui_layout import Layout as w2ui_raw_layout
class Layout:
"""W2UI Layout class"""
@property
@property
def init_widget(self):
"""Initialize the layout to be displayed"""
if not self._has_panel:
self.content_top = {
"id": self.name,
"type": "main",
"height": "100%",
"resizable": False
}
self._build_config()
self._raw_layout.initLayout(self.config, name=self.name)
def attach_widget(self, widget_id, panel_id, config):
"""Attach a widget to a panel in the layout"""
self._build_config()
self._raw_layout.content(
cell_id=self._get_type(panel_id),
widget_id=widget_id,
config=config
)
def repaint(self):
"""Repaint the layout on the screen"""
self._raw_layout.refresh()
def hide_panel(self, panel):
"""Hide a panel on the layout"""
self._raw_layout.hide(self._get_type(panel), True)
def show_panel(self, panel):
"""Show a hidden panel on the layout"""
self._raw_layout.show(self._get_type(panel), True)
def toggle_panel(self, panel):
"""Toggle between hiding and showing a layout"""
self._raw_layout.toggle(self._get_type(panel))
def add_top_header(self, **kwargs):
"""Add the top header panel to the layout"""
self.top_header = _panel_config_update(kwargs, "height")
def add_bottom_footer(self, **kwargs):
"""Add the bottom footer panel to the layout"""
self.bottom_footer = _panel_config_update(kwargs, "height")
def add_left_side(self, **kwargs):
"""Add the left side panel to the layout"""
self.left_side = _panel_config_update(kwargs, "width")
def add_right_side(self, **kwargs):
"""Add the right side palen to the layout"""
self.right_side = _panel_config_update(kwargs, "width")
def add_content_top(self, **kwargs):
"""Add the content top panel to the layout"""
self.content_top = _panel_config_update(kwargs, "height")
def add_content_bottom(self, **kwargs):
"""Add the content bottom panel to the layout"""
self.content_bottom = _panel_config_update(kwargs, "height")
def on_panel_hide(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the panel hide event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.on_panel_hide_callable = event_callable
self._raw_layout.onHide(
self.on_panel_hide_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def on_panel_hide_return(self, event):
"""Panel hide event return"""
self.on_panel_hide_callable(event["target"])
def on_panel_show(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the panel show event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.on_panel_show_callable = event_callable
self._raw_layout.onShow(
self.on_panel_show_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def on_panel_show_return(self, event):
"""Panel show event return"""
self.on_panel_show_callable(event["target"])
def on_panel_resize(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the panel resize event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.on_panel_resize_callable = event_callable
self._raw_layout.resize(
self.on_panel_resize_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def on_panel_resize_return(self, event):
"""Panel resize event return"""
self.on_panel_resize_callable(event["target"])
def before_panel_resize(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the Before panel resize event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.before_panel_resize_callable = event_callable
self._raw_layout.beforeResizeStart(
self.before_panel_resize_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def before_panel_resize_return(self, event):
"""Before Panel resize event return"""
self.before_panel_resize_callable(event["target"])
| 35.630952 | 94 | 0.598396 | """
W2UI Layout Widget Implementation
"""
from .raw_w2ui_layout import Layout as w2ui_raw_layout
def _panel_config_update(config, size_type):
if "html" in config:
config["content"] = config["html"]
if size_type in config:
config["size"] = config[size_type]
return config
class Layout:
"""W2UI Layout class"""
def __init__(self, layout_id, session_id, parent):
self._raw_layout = w2ui_raw_layout(parent, session_id)
self.name = layout_id
self.session_id = session_id
self.config = None
# set empty settings for panels
self.content_top = {}
self.top_header = {}
self.content_bottom = {}
self.left_side = {}
self.right_side = {}
self.bottom_footer = {}
# callables for events
self.on_panel_show_callable = None
self.on_panel_hide_callable = None
self.on_panel_resize_callable = None
self.before_panel_resize_callable = None
style = ("background-color:white;padding:0px;color:rgba(0,0,0,0.7);"
"font-family:Roboto, Arial, Tahoma, Verdana, sans-serif;"
"font-weight:400;font-size:14px;line-height:20px;")
self.border_top = "border-top:1px solid #e4e4e4;"
self.border_bottom = "border-bottom:1px solid #e4e4e4;"
self.border_left = "border-left:1px solid #e4e4e4;"
self.border_right = "border-right:1px solid #e4e4e4;"
self.panel_type_info = {
"content_top":{
"panel_default_type": "main",
"panel_default_style": style,
"panel_default_size": "100%",
"panel_default_resizable": False,
},
"bottom_footer": {
"panel_default_type": "bottom",
"panel_default_style": style+self.border_top,
"panel_default_size": "15%",
"panel_default_resizable": False,
},
"content_bottom": {
"panel_default_type": "preview",
"panel_default_style": style+self.border_top,
"panel_default_size": "15%",
"panel_default_resizable": False,
},
"left_side": {
"panel_default_type": "left",
"panel_default_style": style+self.border_right,
"panel_default_size": "10%",
"panel_default_resizable": False,
},
"right_side": {
"panel_default_type": "right",
"panel_default_style": style+self.border_left,
"panel_default_size": "10%",
"panel_default_resizable": False,
},
"top_header": {
"panel_default_type": "top",
"panel_default_style": style+self.border_bottom,
"panel_default_size": "10%",
"panel_default_resizable": False,
},
}
def _build_config(self):
self.config = {
"name": self.name,
"panels": [
self.top_header,
self.content_top,
self.content_bottom,
self.left_side,
self.right_side,
self.bottom_footer
]
}
self.config["panels"] = [
panel
for panel in self.config["panels"]
if panel
]
return self.config
@property
def raw_widget(self):
return self._raw_layout
@property
def _has_panel(self):
return (
bool(self.content_top) +
bool(self.top_header) +
bool(self.content_bottom) +
bool(self.left_side) +
bool(self.right_side) +
bool(self.bottom_footer)
)
def init_widget(self):
"""Initialize the layout to be displayed"""
if not self._has_panel:
self.content_top = {
"id": self.name,
"type": "main",
"height": "100%",
"resizable": False
}
self._build_config()
self._raw_layout.initLayout(self.config, name=self.name)
def attach_widget(self, widget_id, panel_id, config):
"""Attach a widget to a panel in the layout"""
self._build_config()
self._raw_layout.content(
cell_id=self._get_type(panel_id),
widget_id=widget_id,
config=config
)
def repaint(self):
"""Repaint the layout on the screen"""
self._raw_layout.refresh()
def hide_panel(self, panel):
"""Hide a panel on the layout"""
self._raw_layout.hide(self._get_type(panel), True)
def show_panel(self, panel):
"""Show a hidden panel on the layout"""
self._raw_layout.show(self._get_type(panel), True)
def toggle_panel(self, panel):
"""Toggle between hiding and showing a layout"""
self._raw_layout.toggle(self._get_type(panel))
def add_top_header(self, **kwargs):
"""Add the top header panel to the layout"""
self.top_header = _panel_config_update(kwargs, "height")
def add_bottom_footer(self, **kwargs):
"""Add the bottom footer panel to the layout"""
self.bottom_footer = _panel_config_update(kwargs, "height")
def add_left_side(self, **kwargs):
"""Add the left side panel to the layout"""
self.left_side = _panel_config_update(kwargs, "width")
def add_right_side(self, **kwargs):
"""Add the right side palen to the layout"""
self.right_side = _panel_config_update(kwargs, "width")
def add_content_top(self, **kwargs):
"""Add the content top panel to the layout"""
self.content_top = _panel_config_update(kwargs, "height")
def add_content_bottom(self, **kwargs):
"""Add the content bottom panel to the layout"""
self.content_bottom = _panel_config_update(kwargs, "height")
def _get_type(self, panel_id):
for panel_type in self.panel_type_info.keys():
panel = self.__dict__[panel_type]
if "id" in panel and panel["id"] == panel_id:
return panel["type"]
return None
def _get_id(self, atype):
for panel_type in self.panel_type_info.keys():
panel = self.__dict__[panel_type]
if "type" in panel and panel["type"] == atype:
return panel["id"]
return None
def on_panel_hide(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the panel hide event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.on_panel_hide_callable = event_callable
self._raw_layout.onHide(
self.on_panel_hide_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def on_panel_hide_return(self, event):
"""Panel hide event return"""
self.on_panel_hide_callable(event["target"])
def on_panel_show(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the panel show event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.on_panel_show_callable = event_callable
self._raw_layout.onShow(
self.on_panel_show_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def on_panel_show_return(self, event):
"""Panel show event return"""
self.on_panel_show_callable(event["target"])
def on_panel_resize(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the panel resize event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.on_panel_resize_callable = event_callable
self._raw_layout.resize(
self.on_panel_resize_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def on_panel_resize_return(self, event):
"""Panel resize event return"""
self.on_panel_resize_callable(event["target"])
def before_panel_resize(self, event_callable, ret_widget_values=None, block_signal=False):
"""Hook to the Before panel resize event"""
#TODO Implementation of ret_widget_values
#TODO Implementation of block_signal?? or removal
self.before_panel_resize_callable = event_callable
self._raw_layout.beforeResizeStart(
self.before_panel_resize_return,
ret_widget_values=ret_widget_values,
block_signal=block_signal
)
def before_panel_resize_return(self, event):
"""Before Panel resize event return"""
self.before_panel_resize_callable(event["target"])
| 3,978 | 0 | 182 |
fde13694121e4ca8e5be4270e13a308ab3ed6971 | 1,112 | py | Python | 2019/08 August/dp08212019.py | vishalkarda/DailyPracticeProblemsDIP | 7e7f9a6ca8185fe34791519da3ff00bef3c76476 | [
"MIT"
] | 5 | 2019-08-06T02:34:41.000Z | 2022-01-08T03:03:16.000Z | 2019/08 August/dp08212019.py | ourangzeb/DailyPracticeProblemsDIP | 66c07af88754e5d59b243e3ee9f02db69f7c0a77 | [
"MIT"
] | 15 | 2021-06-01T14:04:16.000Z | 2022-03-08T21:17:22.000Z | 2019/08 August/dp08212019.py | ourangzeb/DailyPracticeProblemsDIP | 66c07af88754e5d59b243e3ee9f02db69f7c0a77 | [
"MIT"
] | 4 | 2019-09-19T20:00:05.000Z | 2021-08-16T11:31:51.000Z | # This problem was recently asked by Twitter:
# Given a string with the initial condition of dominoes, where:
# . represents that the domino is standing still
# L represents that the domino is falling to the left side
# R represents that the domino is falling to the right side
# Figure out the final position of the dominoes. If there are dominoes that get pushed on both ends, the force cancels out and that domino remains upright.
print (Solution().pushDominoes('..R...L..R.'))
# ..RR.LL..RR
| 37.066667 | 155 | 0.564748 | # This problem was recently asked by Twitter:
# Given a string with the initial condition of dominoes, where:
# . represents that the domino is standing still
# L represents that the domino is falling to the left side
# R represents that the domino is falling to the right side
# Figure out the final position of the dominoes. If there are dominoes that get pushed on both ends, the force cancels out and that domino remains upright.
class Solution(object):
def pushDominoes(self, dominoes):
# Fill this in.
symbols = [(i, x) for i, x in enumerate(dominoes) if x != '.']
symbols = [(-1, 'L')] + symbols + [(len(dominoes), 'R')]
# print(symbols)
ans = list(dominoes)
for (i, x), (j, y) in zip(symbols, symbols[1:]):
if x == y:
for k in range(i+1, j):
ans[k] = x
elif x > y: #RL
for k in range(i+1, j):
ans[k] = '.LR'[(k-i > j-k) - (k-i < j-k)] # cmp : (a > b) - (a < b)
return "".join(ans)
print (Solution().pushDominoes('..R...L..R.'))
# ..RR.LL..RR
| 562 | 2 | 49 |
33fbb704a8237ebad702f535b5918ac1d9a1d729 | 837 | py | Python | src/wan_agent/larger_cfg/gen_id_port.py | equ-0/cascade | 993797e3ea43727fe2f9b0b9b0b7125de3d8c08a | [
"BSD-3-Clause"
] | null | null | null | src/wan_agent/larger_cfg/gen_id_port.py | equ-0/cascade | 993797e3ea43727fe2f9b0b9b0b7125de3d8c08a | [
"BSD-3-Clause"
] | null | null | null | src/wan_agent/larger_cfg/gen_id_port.py | equ-0/cascade | 993797e3ea43727fe2f9b0b9b0b7125de3d8c08a | [
"BSD-3-Clause"
] | 2 | 2020-11-11T04:04:17.000Z | 2021-01-28T07:08:41.000Z | base_id = 0
base_gms = 23580
base_state_transfer = 28366
base_sst = 37683
base_rdmc = 31675
base_external = 32645
num_senders = 8
num_clients = 2
for i in range(num_senders + num_clients):
print('\n\nfor id {}:\n'.format(i))
print('# my local id - each node should have a different id')
print('local_id = {}'.format(base_id + i))
print('# my local ip address')
print('local_ip = 127.0.0.1')
print('# derecho gms port')
print('gms_port = {}'.format(base_gms + i))
print('# derecho rpc port')
print('state_transfer_port = {}'.format(base_state_transfer + i))
print('# sst tcp port')
print('sst_port = {}'.format(base_sst + i))
print('# rdmc tcp port')
print('rdmc_port = {}'.format(base_rdmc + i))
print('# external port')
print('external_port = {}'.format(base_external + i)) | 31 | 69 | 0.642772 | base_id = 0
base_gms = 23580
base_state_transfer = 28366
base_sst = 37683
base_rdmc = 31675
base_external = 32645
num_senders = 8
num_clients = 2
for i in range(num_senders + num_clients):
print('\n\nfor id {}:\n'.format(i))
print('# my local id - each node should have a different id')
print('local_id = {}'.format(base_id + i))
print('# my local ip address')
print('local_ip = 127.0.0.1')
print('# derecho gms port')
print('gms_port = {}'.format(base_gms + i))
print('# derecho rpc port')
print('state_transfer_port = {}'.format(base_state_transfer + i))
print('# sst tcp port')
print('sst_port = {}'.format(base_sst + i))
print('# rdmc tcp port')
print('rdmc_port = {}'.format(base_rdmc + i))
print('# external port')
print('external_port = {}'.format(base_external + i)) | 0 | 0 | 0 |
a172186533318a329de3454b66e3413ce5d39a4a | 59 | py | Python | tests/check_jsonschema_test.py | KeltonKarboviak/pre-commit-hooks | 1e26be7baf8757bb454edd8853e42fab14fe6cfa | [
"MIT"
] | null | null | null | tests/check_jsonschema_test.py | KeltonKarboviak/pre-commit-hooks | 1e26be7baf8757bb454edd8853e42fab14fe6cfa | [
"MIT"
] | null | null | null | tests/check_jsonschema_test.py | KeltonKarboviak/pre-commit-hooks | 1e26be7baf8757bb454edd8853e42fab14fe6cfa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
| 9.833333 | 23 | 0.542373 | # -*- coding: utf-8 -*-
def test_main():
assert True
| 11 | 0 | 23 |
b5449c0b45a7da18f0f4ecb1dde64382c4f1b458 | 225 | py | Python | analyze-intraday-stocks/config.py | timescale/examples | 130681e4d65e7cb5c4a50a708d225c0aeaa7c7e8 | [
"Apache-2.0"
] | 93 | 2019-06-27T16:34:24.000Z | 2022-03-29T23:16:42.000Z | analyze-intraday-stocks/config.py | timescale/examples | 130681e4d65e7cb5c4a50a708d225c0aeaa7c7e8 | [
"Apache-2.0"
] | 5 | 2019-07-22T18:39:39.000Z | 2021-06-15T09:57:06.000Z | analyze-intraday-stocks/config.py | timescale/examples | 130681e4d65e7cb5c4a50a708d225c0aeaa7c7e8 | [
"Apache-2.0"
] | 45 | 2019-09-18T20:39:59.000Z | 2022-03-12T23:56:17.000Z | # Make sure to edit this configuration file with your database connection details
# and Alpha Vantage API key
DB_USER = 'user'
DB_PASS = 'passwd'
DB_HOST = 'host'
DB_PORT = '000'
DB_NAME = 'db'
APIKEY = 'alpha_vantage_apikey' | 28.125 | 81 | 0.751111 | # Make sure to edit this configuration file with your database connection details
# and Alpha Vantage API key
DB_USER = 'user'
DB_PASS = 'passwd'
DB_HOST = 'host'
DB_PORT = '000'
DB_NAME = 'db'
APIKEY = 'alpha_vantage_apikey' | 0 | 0 | 0 |
b8697986f5a639082c728ef3596017986a777340 | 1,935 | py | Python | src/dl_plus/backend.py | un-def/dl-plus | 1f5198043bd0885e666c2880a8486e8075e4a0c2 | [
"MIT"
] | 30 | 2020-10-24T16:35:48.000Z | 2021-11-11T11:04:12.000Z | src/dl_plus/backend.py | un-def/dl-plus | 1f5198043bd0885e666c2880a8486e8075e4a0c2 | [
"MIT"
] | null | null | null | src/dl_plus/backend.py | un-def/dl-plus | 1f5198043bd0885e666c2880a8486e8075e4a0c2 | [
"MIT"
] | 3 | 2020-11-30T07:11:44.000Z | 2021-01-26T08:05:13.000Z | import sys
from collections import namedtuple
from pathlib import Path
from dl_plus import ytdl
from dl_plus.config import get_config_home
from dl_plus.exceptions import DLPlusException
from dl_plus.pypi import load_metadata
backends_dir = get_config_home() / 'backends'
BackendInfo = namedtuple(
'BackendInfo', 'import_name,version,path,is_managed,metadata')
| 26.506849 | 70 | 0.696124 | import sys
from collections import namedtuple
from pathlib import Path
from dl_plus import ytdl
from dl_plus.config import get_config_home
from dl_plus.exceptions import DLPlusException
from dl_plus.pypi import load_metadata
backends_dir = get_config_home() / 'backends'
BackendInfo = namedtuple(
'BackendInfo', 'import_name,version,path,is_managed,metadata')
class BackendError(DLPlusException):
pass
def _is_managed(location: Path) -> bool:
try:
location.relative_to(backends_dir)
return True
except ValueError:
return False
def _normalize(string: str) -> str:
return string.replace('-', '_')
def get_backend_dir(backend: str) -> Path:
return backends_dir / _normalize(backend)
def parse_backend_string(backend_string: str):
if '/' in backend_string:
backend, _, package_name = backend_string.partition('/')
backend_dir = get_backend_dir(backend)
if not backend_dir.is_dir():
raise BackendError(
f'{backend_dir} does not exist or is not a directory')
else:
package_name = backend_string
backend_dir = get_backend_dir(backend_string)
if not backend_dir.is_dir():
backend_dir = None
return backend_dir, _normalize(package_name)
def init_backend(backend_string: str) -> BackendInfo:
backend_dir, package_name = parse_backend_string(backend_string)
if backend_dir:
sys.path.insert(0, str(backend_dir))
ytdl.init(package_name)
ytdl_module = ytdl.get_ytdl_module()
path = Path(ytdl_module.__path__[0])
is_managed = _is_managed(path)
if is_managed:
metadata = load_metadata(backend_dir)
else:
metadata = None
return BackendInfo(
import_name=ytdl_module.__name__,
version=ytdl.import_from('version', '__version__'),
path=path,
is_managed=is_managed,
metadata=metadata,
)
| 1,397 | 25 | 138 |
76f3367ceae22e36060bbdd9f5c62233f09ff9c9 | 2,821 | py | Python | Sync-in-Janus-bunch.py | shahmari/Synchronization-in-janus-bunch | a3b68928d94e0ecc2f6795429a498cc0fb8a7111 | [
"MIT"
] | null | null | null | Sync-in-Janus-bunch.py | shahmari/Synchronization-in-janus-bunch | a3b68928d94e0ecc2f6795429a498cc0fb8a7111 | [
"MIT"
] | null | null | null | Sync-in-Janus-bunch.py | shahmari/Synchronization-in-janus-bunch | a3b68928d94e0ecc2f6795429a498cc0fb8a7111 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import math
from numpy import random
omega = np.array([-1/2, 1/2])
beta = 1/4
sigma = 1
N = 16
ND = 30
dt = 0.01
T_list = np.linspace(0, ND, num=int(ND/dt))
(res, phase) = solve_diffeqs(N, sigma, beta, omega, ND, dt)
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, res[:, i, 0], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel('θ(t)')
#plt.legend(loc=1)
plt.savefig('1')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, res[:, i, 1], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel('φ(t)')
plt.savefig('2')
#plt.legend(loc=1)
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, phase[:, i, 0], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel("θ'(t)")
#plt.legend(loc=0)
plt.savefig('3')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, phase[:, i, 1], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel("φ'(t)")
#plt.legend(loc=0)
plt.savefig('4')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(res[:, i, 0], phase[:, i, 0], label='Oscillator Number={i}')
plt.xlabel('θ(t)')
plt.ylabel("θ'(t)")
#plt.legend(loc=0)
plt.savefig('5')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(res[:, i, 1], phase[:, i, 1], label='Oscillator Number={i}')
plt.xlabel('φ(t)')
plt.ylabel("φ'(t)")
plt.savefig('6')
#plt.legend(loc=0)
#plt.show()
| 26.866667 | 77 | 0.573201 | import numpy as np
import matplotlib.pyplot as plt
import math
from numpy import random
def diff_eqs(f, n):
Y = np.zeros(2)
if n != N-1:
Y[0] = omega[0] + beta * \
math.sin(f[n][1]-f[n][0]) + sigma * math.sin(f[n+1][1] - f[n][0])
Y[1] = omega[1] + beta * \
math.sin(f[n][0]-f[n][1]) + sigma * math.sin(f[n-1][0] - f[n][1])
else:
Y[0] = omega[0] + beta * \
math.sin(f[n][1]-f[n][0]) + sigma * math.sin(f[0][1] - f[n][0])
Y[1] = omega[1] + beta * \
math.sin(f[n][0]-f[n][1]) + sigma * math.sin(f[n-1][0] - f[n][1])
return Y
def solve_diffeqs(N, sigma, beta, omega, ND, dt):
NT = int(ND/dt)
INP = random.uniform(low=0, high=2*np.pi, size=(N, 2))
RES = np.zeros((NT, N, 2))
Phase = np.zeros((NT, N, 2))
for i in range(NT):
INP_Copy = INP
for j in range(N):
INP[j] += dt*diff_eqs(INP_Copy, j)
Phase[i][j] = diff_eqs(INP_Copy, j)
RES[i] = INP_Copy
return (RES, Phase)
omega = np.array([-1/2, 1/2])
beta = 1/4
sigma = 1
N = 16
ND = 30
dt = 0.01
T_list = np.linspace(0, ND, num=int(ND/dt))
(res, phase) = solve_diffeqs(N, sigma, beta, omega, ND, dt)
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, res[:, i, 0], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel('θ(t)')
#plt.legend(loc=1)
plt.savefig('1')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, res[:, i, 1], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel('φ(t)')
plt.savefig('2')
#plt.legend(loc=1)
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, phase[:, i, 0], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel("θ'(t)")
#plt.legend(loc=0)
plt.savefig('3')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(T_list, phase[:, i, 1], label='Oscillator Number={i}')
plt.xlabel('Time(sec)')
plt.ylabel("φ'(t)")
#plt.legend(loc=0)
plt.savefig('4')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(res[:, i, 0], phase[:, i, 0], label='Oscillator Number={i}')
plt.xlabel('θ(t)')
plt.ylabel("θ'(t)")
#plt.legend(loc=0)
plt.savefig('5')
#plt.show()
plt.figure(figsize=(10, 7), dpi=100)
plt.grid(color='k', linestyle='--', linewidth=0.5)
for i in range(N):
plt.plot(res[:, i, 1], phase[:, i, 1], label='Oscillator Number={i}')
plt.xlabel('φ(t)')
plt.ylabel("φ'(t)")
plt.savefig('6')
#plt.legend(loc=0)
#plt.show()
| 899 | 0 | 46 |
50c767b00bdf412f6414d81aaa122a7748c13a62 | 11,163 | py | Python | examples/reinforcement_learning/tutorial_DDPG.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | 1 | 2019-12-30T03:16:26.000Z | 2019-12-30T03:16:26.000Z | examples/reinforcement_learning/tutorial_DDPG.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | null | null | null | examples/reinforcement_learning/tutorial_DDPG.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | null | null | null | """
Deep Deterministic Policy Gradient (DDPG)
-----------------------------------------
An algorithm concurrently learns a Q-function and a policy.
It uses off-policy data and the Bellman equation to learn the Q-function,
and uses the Q-function to learn the policy.
Reference
---------
Deterministic Policy Gradient Algorithms, Silver et al. 2014
Continuous Control With Deep Reinforcement Learning, Lillicrap et al. 2016
MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/
Environment
-----------
Openai Gym Pendulum-v0, continual action space
Prerequisites
-------------
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
To run
------
python tutorial_DDPG.py --train/test
"""
import argparse
import os
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorlayer as tl
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='train', action='store_false')
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 1 # random seed
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
MEMORY_CAPACITY = 10000 # size of replay buffer
BATCH_SIZE = 32 # update batchsize
MAX_EPISODES = 200 # total number of episodes for training
MAX_EP_STEPS = 200 # total number of steps for each episode
TEST_PER_EPISODES = 10 # test the model per episodes
VAR = 3 # control exploration
############################### DDPG ####################################
class DDPG(object):
"""
DDPG class
"""
def ema_update(self):
"""
Soft updating by exponential smoothing
:return: None
"""
paras = self.actor.trainable_weights + self.critic.trainable_weights
self.ema.apply(paras)
for i, j in zip(self.actor_target.trainable_weights + self.critic_target.trainable_weights, paras):
i.assign(self.ema.average(j))
def choose_action(self, s):
"""
Choose action
:param s: state
:return: act
"""
return self.actor(np.array([s], dtype=np.float32))[0]
def learn(self):
"""
Update parameters
:return: None
"""
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim:self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1:-self.s_dim]
bs_ = bt[:, -self.s_dim:]
with tf.GradientTape() as tape:
a_ = self.actor_target(bs_)
q_ = self.critic_target([bs_, a_])
y = br + GAMMA * q_
q = self.critic([bs, ba])
td_error = tf.losses.mean_squared_error(y, q)
c_grads = tape.gradient(td_error, self.critic.trainable_weights)
self.critic_opt.apply_gradients(zip(c_grads, self.critic.trainable_weights))
with tf.GradientTape() as tape:
a = self.actor(bs)
q = self.critic([bs, a])
a_loss = -tf.reduce_mean(q) # maximize the q
a_grads = tape.gradient(a_loss, self.actor.trainable_weights)
self.actor_opt.apply_gradients(zip(a_grads, self.actor.trainable_weights))
self.ema_update()
def store_transition(self, s, a, r, s_):
"""
Store data in data buffer
:param s: state
:param a: act
:param r: reward
:param s_: next state
:return: None
"""
s = s.astype(np.float32)
s_ = s_.astype(np.float32)
transition = np.hstack((s, a, [r], s_))
index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def save_ckpt(self):
"""
save trained weights
:return: None
"""
if not os.path.exists('model'):
os.makedirs('model')
tl.files.save_weights_to_hdf5('model/ddpg_actor.hdf5', self.actor)
tl.files.save_weights_to_hdf5('model/ddpg_actor_target.hdf5', self.actor_target)
tl.files.save_weights_to_hdf5('model/ddpg_critic.hdf5', self.critic)
tl.files.save_weights_to_hdf5('model/ddpg_critic_target.hdf5', self.critic_target)
def load_ckpt(self):
"""
load trained weights
:return: None
"""
tl.files.load_hdf5_to_weights_in_order('model/ddpg_actor.hdf5', self.actor)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_actor_target.hdf5', self.actor_target)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_critic.hdf5', self.critic)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_critic_target.hdf5', self.critic_target)
if __name__ == '__main__':
env = gym.make(ENV_NAME)
env = env.unwrapped
# reproducible
env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
tf.random.set_seed(RANDOMSEED)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(a_dim, s_dim, a_bound)
if args.train: # train
reward_buffer = []
t0 = time.time()
for i in range(MAX_EPISODES):
t1 = time.time()
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
# Add exploration noise
a = ddpg.choose_action(s)
a = np.clip(np.random.normal(a, VAR), -2, 2) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
ddpg.store_transition(s, a, r / 10, s_)
if ddpg.pointer > MEMORY_CAPACITY:
ddpg.learn()
s = s_
ep_reward += r
if j == MAX_EP_STEPS - 1:
print(
'\rEpisode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i, MAX_EPISODES, ep_reward,
time.time() - t1
), end=''
)
plt.show()
# test
if i and not i % TEST_PER_EPISODES:
t1 = time.time()
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
a = ddpg.choose_action(s) # without exploration noise
s_, r, done, info = env.step(a)
s = s_
ep_reward += r
if j == MAX_EP_STEPS - 1:
print(
'\rEpisode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i, MAX_EPISODES, ep_reward,
time.time() - t1
)
)
reward_buffer.append(ep_reward)
if reward_buffer:
plt.ion()
plt.cla()
plt.title('DDPG')
plt.plot(np.array(range(len(reward_buffer))) * TEST_PER_EPISODES, reward_buffer) # plot the episode vt
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.ylim(-2000, 0)
plt.show()
plt.pause(0.1)
plt.ioff()
plt.show()
print('\nRunning time: ', time.time() - t0)
ddpg.save_ckpt()
# test
ddpg.load_ckpt()
while True:
s = env.reset()
for i in range(MAX_EP_STEPS):
env.render()
s, r, done, info = env.step(ddpg.choose_action(s))
if done:
break
| 35.325949 | 120 | 0.552002 | """
Deep Deterministic Policy Gradient (DDPG)
-----------------------------------------
An algorithm concurrently learns a Q-function and a policy.
It uses off-policy data and the Bellman equation to learn the Q-function,
and uses the Q-function to learn the policy.
Reference
---------
Deterministic Policy Gradient Algorithms, Silver et al. 2014
Continuous Control With Deep Reinforcement Learning, Lillicrap et al. 2016
MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/
Environment
-----------
Openai Gym Pendulum-v0, continual action space
Prerequisites
-------------
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
To run
------
python tutorial_DDPG.py --train/test
"""
import argparse
import os
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorlayer as tl
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='train', action='store_false')
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 1 # random seed
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
MEMORY_CAPACITY = 10000 # size of replay buffer
BATCH_SIZE = 32 # update batchsize
MAX_EPISODES = 200 # total number of episodes for training
MAX_EP_STEPS = 200 # total number of steps for each episode
TEST_PER_EPISODES = 10 # test the model per episodes
VAR = 3 # control exploration
############################### DDPG ####################################
class DDPG(object):
"""
DDPG class
"""
def __init__(self, a_dim, s_dim, a_bound):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
self.pointer = 0
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound
W_init = tf.random_normal_initializer(mean=0, stddev=0.3)
b_init = tf.constant_initializer(0.1)
def get_actor(input_state_shape, name=''):
"""
Build actor network
:param input_state_shape: state
:param name: name
:return: act
"""
inputs = tl.layers.Input(input_state_shape, name='A_input')
x = tl.layers.Dense(n_units=30, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='A_l1')(inputs)
x = tl.layers.Dense(n_units=a_dim, act=tf.nn.tanh, W_init=W_init, b_init=b_init, name='A_a')(x)
x = tl.layers.Lambda(lambda x: np.array(a_bound) * x)(x)
return tl.models.Model(inputs=inputs, outputs=x, name='Actor' + name)
def get_critic(input_state_shape, input_action_shape, name=''):
"""
Build critic network
:param input_state_shape: state
:param input_action_shape: act
:param name: name
:return: Q value Q(s,a)
"""
s = tl.layers.Input(input_state_shape, name='C_s_input')
a = tl.layers.Input(input_action_shape, name='C_a_input')
x = tl.layers.Concat(1)([s, a])
x = tl.layers.Dense(n_units=60, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='C_l1')(x)
x = tl.layers.Dense(n_units=1, W_init=W_init, b_init=b_init, name='C_out')(x)
return tl.models.Model(inputs=[s, a], outputs=x, name='Critic' + name)
self.actor = get_actor([None, s_dim])
self.critic = get_critic([None, s_dim], [None, a_dim])
self.actor.train()
self.critic.train()
def copy_para(from_model, to_model):
"""
Copy parameters for soft updating
:param from_model: latest model
:param to_model: target model
:return: None
"""
for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):
j.assign(i)
self.actor_target = get_actor([None, s_dim], name='_target')
copy_para(self.actor, self.actor_target)
self.actor_target.eval()
self.critic_target = get_critic([None, s_dim], [None, a_dim], name='_target')
copy_para(self.critic, self.critic_target)
self.critic_target.eval()
self.R = tl.layers.Input([None, 1], tf.float32, 'r')
self.ema = tf.train.ExponentialMovingAverage(decay=1 - TAU) # soft replacement
self.actor_opt = tf.optimizers.Adam(LR_A)
self.critic_opt = tf.optimizers.Adam(LR_C)
def ema_update(self):
"""
Soft updating by exponential smoothing
:return: None
"""
paras = self.actor.trainable_weights + self.critic.trainable_weights
self.ema.apply(paras)
for i, j in zip(self.actor_target.trainable_weights + self.critic_target.trainable_weights, paras):
i.assign(self.ema.average(j))
def choose_action(self, s):
"""
Choose action
:param s: state
:return: act
"""
return self.actor(np.array([s], dtype=np.float32))[0]
def learn(self):
"""
Update parameters
:return: None
"""
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim:self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1:-self.s_dim]
bs_ = bt[:, -self.s_dim:]
with tf.GradientTape() as tape:
a_ = self.actor_target(bs_)
q_ = self.critic_target([bs_, a_])
y = br + GAMMA * q_
q = self.critic([bs, ba])
td_error = tf.losses.mean_squared_error(y, q)
c_grads = tape.gradient(td_error, self.critic.trainable_weights)
self.critic_opt.apply_gradients(zip(c_grads, self.critic.trainable_weights))
with tf.GradientTape() as tape:
a = self.actor(bs)
q = self.critic([bs, a])
a_loss = -tf.reduce_mean(q) # maximize the q
a_grads = tape.gradient(a_loss, self.actor.trainable_weights)
self.actor_opt.apply_gradients(zip(a_grads, self.actor.trainable_weights))
self.ema_update()
def store_transition(self, s, a, r, s_):
"""
Store data in data buffer
:param s: state
:param a: act
:param r: reward
:param s_: next state
:return: None
"""
s = s.astype(np.float32)
s_ = s_.astype(np.float32)
transition = np.hstack((s, a, [r], s_))
index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def save_ckpt(self):
"""
save trained weights
:return: None
"""
if not os.path.exists('model'):
os.makedirs('model')
tl.files.save_weights_to_hdf5('model/ddpg_actor.hdf5', self.actor)
tl.files.save_weights_to_hdf5('model/ddpg_actor_target.hdf5', self.actor_target)
tl.files.save_weights_to_hdf5('model/ddpg_critic.hdf5', self.critic)
tl.files.save_weights_to_hdf5('model/ddpg_critic_target.hdf5', self.critic_target)
def load_ckpt(self):
"""
load trained weights
:return: None
"""
tl.files.load_hdf5_to_weights_in_order('model/ddpg_actor.hdf5', self.actor)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_actor_target.hdf5', self.actor_target)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_critic.hdf5', self.critic)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_critic_target.hdf5', self.critic_target)
if __name__ == '__main__':
env = gym.make(ENV_NAME)
env = env.unwrapped
# reproducible
env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
tf.random.set_seed(RANDOMSEED)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(a_dim, s_dim, a_bound)
if args.train: # train
reward_buffer = []
t0 = time.time()
for i in range(MAX_EPISODES):
t1 = time.time()
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
# Add exploration noise
a = ddpg.choose_action(s)
a = np.clip(np.random.normal(a, VAR), -2, 2) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
ddpg.store_transition(s, a, r / 10, s_)
if ddpg.pointer > MEMORY_CAPACITY:
ddpg.learn()
s = s_
ep_reward += r
if j == MAX_EP_STEPS - 1:
print(
'\rEpisode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i, MAX_EPISODES, ep_reward,
time.time() - t1
), end=''
)
plt.show()
# test
if i and not i % TEST_PER_EPISODES:
t1 = time.time()
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
a = ddpg.choose_action(s) # without exploration noise
s_, r, done, info = env.step(a)
s = s_
ep_reward += r
if j == MAX_EP_STEPS - 1:
print(
'\rEpisode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i, MAX_EPISODES, ep_reward,
time.time() - t1
)
)
reward_buffer.append(ep_reward)
if reward_buffer:
plt.ion()
plt.cla()
plt.title('DDPG')
plt.plot(np.array(range(len(reward_buffer))) * TEST_PER_EPISODES, reward_buffer) # plot the episode vt
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.ylim(-2000, 0)
plt.show()
plt.pause(0.1)
plt.ioff()
plt.show()
print('\nRunning time: ', time.time() - t0)
ddpg.save_ckpt()
# test
ddpg.load_ckpt()
while True:
s = env.reset()
for i in range(MAX_EP_STEPS):
env.render()
s, r, done, info = env.step(ddpg.choose_action(s))
if done:
break
| 2,886 | 0 | 29 |
2bc898ed23235b9d5067d84f005eaba60ccd680e | 1,096 | py | Python | moderation/forms.py | daynejones/django-moderation | 565481f1832114da2d0c48b0c23977d4d3a9b914 | [
"BSD-3-Clause"
] | 1 | 2019-06-06T17:56:23.000Z | 2019-06-06T17:56:23.000Z | moderation/forms.py | daynejones/django-moderation | 565481f1832114da2d0c48b0c23977d4d3a9b914 | [
"BSD-3-Clause"
] | 1 | 2020-01-31T20:37:53.000Z | 2020-01-31T20:37:53.000Z | moderation/forms.py | daynejones/django-moderation | 565481f1832114da2d0c48b0c23977d4d3a9b914 | [
"BSD-3-Clause"
] | 1 | 2019-06-06T19:27:58.000Z | 2019-06-06T19:27:58.000Z | from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.forms.models import ModelForm, model_to_dict
from .constants import (MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED)
from .utils import django_17
| 34.25 | 79 | 0.635036 | from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.forms.models import ModelForm, model_to_dict
from .constants import (MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED)
from .utils import django_17
class BaseModeratedObjectForm(ModelForm):
class Meta:
if django_17():
exclude = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
if instance:
try:
if instance.moderated_object.status in\
[MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED] and\
not instance.moderated_object.moderator.\
visible_until_rejected:
initial = model_to_dict(
instance.moderated_object.changed_object)
kwargs.setdefault('initial', {})
kwargs['initial'].update(initial)
except ObjectDoesNotExist:
pass
super(BaseModeratedObjectForm, self).__init__(*args, **kwargs)
| 692 | 119 | 23 |
27d76a4db10c403158e50ce1af15da574b71df9e | 10,153 | py | Python | src/systemstatus.py | Haptein/powerplan | a166deb46a473f19d0ad2f7b59f88ec430b51764 | [
"BSD-3-Clause"
] | 20 | 2021-10-08T20:27:43.000Z | 2022-03-13T15:02:01.000Z | src/systemstatus.py | Haptein/powerplan | a166deb46a473f19d0ad2f7b59f88ec430b51764 | [
"BSD-3-Clause"
] | 8 | 2021-10-09T02:24:14.000Z | 2021-12-31T05:23:04.000Z | src/systemstatus.py | Haptein/powerplan | a166deb46a473f19d0ad2f7b59f88ec430b51764 | [
"BSD-3-Clause"
] | null | null | null | import csv
import platform
from time import time
from statistics import mean
from collections import deque
from datetime import datetime
import psutil
import log
from cpu import Cpu
from __init__ import __version__
from process import ProcessReader
class History(deque):
'''
A double ended queue with methods for streaming data,
change detection and delta computation
'''
def update(self, value):
'''Stream value into deque'''
if self.__len__() == self.maxlen:
_ = self.popleft()
self.append(value)
else:
self.append(value)
def delta(self):
'''Returns the difference between the last and first value'''
if self.__len__() > 1:
return self[-1] - self[0]
else:
return None
def changed(self) -> bool:
'''Returns true if field's last value is different from the second last one'''
if self.__len__() > 1:
return self[-1] != self[-2]
else:
return None
| 38.604563 | 120 | 0.591943 | import csv
import platform
from time import time
from statistics import mean
from collections import deque
from datetime import datetime
import psutil
import log
from cpu import Cpu
from __init__ import __version__
from process import ProcessReader
def time_stamp():
return datetime.now().strftime('%H:%M:%S.%f')[:-3]
class System:
def __init__(self, cpu: Cpu, powersupply):
self.cpu = cpu
self.powersupply = powersupply
self.info = self.system_info(self.cpu.spec, self.powersupply)
def system_info(self, cpuspec, powersupply) -> str:
'''Generate system info string'''
# Variable string, None's will get filtered out
info = ('\n'+' '*4).join(filter(None, (
' '*4+'System',
f'OS:\t\t\t{platform.platform()}',
f'powerplan:\t\t{__version__} running on Python{platform.python_version()} with psutil{psutil.__version__}',
f'CPU model:\t\t{cpuspec.name}',
f'Core configuraton:\t{cpuspec.physical_cores}/{cpuspec.logical_cores} {cpuspec.sibling_cores_repr}',
f'Frequency range:\t{cpuspec.freq_range_repr}',
f'Driver:\t\t{cpuspec.driver_repr}',
f'Turbo:\t\t{cpuspec.turbo_path}',
f'Governors:\t\t{cpuspec.governors_repr}',
f'Policies:\t\t{cpuspec.policies_repr}' if cpuspec.policies else None,
f'Temperature:\t{cpuspec.temp_sensor_repr}',
f'AC adapter:\t\t{powersupply.ac_adapter.name}' if powersupply.ac_adapter.name else None,
f'Battery:\t\t{powersupply.battery.name}' if powersupply.battery.name else None
)))
return info
class History(deque):
'''
A double ended queue with methods for streaming data,
change detection and delta computation
'''
def __init__(self, maxlen=2):
super().__init__(maxlen=maxlen)
def update(self, value):
'''Stream value into deque'''
if self.__len__() == self.maxlen:
_ = self.popleft()
self.append(value)
else:
self.append(value)
def delta(self):
'''Returns the difference between the last and first value'''
if self.__len__() > 1:
return self[-1] - self[0]
else:
return None
def changed(self) -> bool:
'''Returns true if field's last value is different from the second last one'''
if self.__len__() > 1:
return self[-1] != self[-2]
else:
return None
class SystemStatus():
def __init__(self, system: System, profiles: dict,
fields: list, custom_fields: dict = None, history_len=2):
'''
Initializes dict of deques of length history_len
'''
assert history_len > 0 or history_len is None
# Hardware components
self.system = system
self.cpu: Cpu = system.cpu
self.rapl = system.cpu.rapl
self.powersupply = system.powersupply
self.battery = system.powersupply.battery
self.process_reader = ProcessReader(profiles=profiles)
# Setup fields' history objects
self._check_custom_fields(custom_fields)
self.field_methods = self._get_field_methods(fields=fields, custom_fields=custom_fields)
self.fields = set(self.field_methods.keys())
self.history = {field: History(maxlen=history_len) for field in self.field_methods}
self.history_len = history_len
self.partially_updated = set()
def __getitem__(self, field):
'''Get latest value of field'''
assert field in self.fields
return self.history[field][-1]
def _check_custom_fields(self, custom_fields: dict):
'''
check that custom fields are correct
'''
if custom_fields is None:
return
if type(custom_fields) is not dict:
log.error('custom_fields must be of type dict.')
for key in custom_fields:
value = custom_fields[key]
if (type(value) is not tuple) or (len(value) != 2) or (not callable(value[1])):
log.error(f'Value of {key} in custom_fields must be a tuple of length 2 (name, callable).')
if type(value[0]) is not str:
log.error('Keys in custom_fields must be of type str.')
def _get_field_methods(self, fields: list, custom_fields: dict) -> dict:
# dict{field:(function, kwargs)}
builtin_fields = dict(
time=(time, {}),
time_stamp=(time_stamp, {}),
triggered_profile=(self.process_reader.triggered_profile, {}),
# Battery
ac_power=(self.powersupply.ac_power, {}),
battery_draw=(self.battery.power_draw, {}),
battery_charge_left=(self.battery.charge_left, {}),
battery_energy_left=(self.battery.energy_left, {}),
# RAPL
package_temp=(self.cpu.read_temperature, {}),
package_power=(self.rapl.read_power, {}),
core_power=(self.rapl.read_power, {'name': 'core'}),
dram_power=(self.rapl.read_power, {'name': 'dram'}),
uncore_power=(self.rapl.read_power, {'name': 'uncore'}),
# Configurables
frequency=(self.cpu.read_current_freq, {}),
governor=(self.cpu.read_governor, {}),
policy=(self.cpu.read_policy, {}),
cores_online=(self.cpu.list_cores, {'status': 'online'}),
turbo=(self.cpu.read_turbo_state, {}),
cpu_util_all=(self.cpu.read_cpu_utilization, {'mode': 'all'}),
cpu_util_avg=(self.cpu.read_cpu_utilization, {'mode': 'avg'}),
cpu_util_max=(self.cpu.read_cpu_utilization, {'mode': 'max'}),
# Split read freq range in min and max
frequency_range_max=(lambda: self.cpu.read_freq_range()[1], {}),
frequency_avg=(lambda: int(mean(self.cpu.read_current_freq().values())), {}),
frequency_max=(lambda: int(max(self.cpu.read_current_freq().values())), {})
)
field_methods = {key: builtin_fields[key] for key in builtin_fields if key in fields}
if custom_fields is not None:
field_methods.update(custom_fields)
return field_methods
def reset(self, profiles=None):
'''Resets internal processReader, needed for hot reloading'''
if profiles is not None:
self.process_reader.reset(profiles)
self.partially_updated = set()
def update(self):
'''
Updates all fielfds' history
'''
for key in self.history:
func, kwargs = self.field_methods[key]
self.history[key].update(func(**kwargs))
self.partially_updated = set()
def partial_update(self, fields: list = None):
'''
Updates specified fields' values
if fields==None, updates all the fields that haven't been partially updated
'''
# Default updates all fields not updated
if fields is None:
fields = [field for field in self.history if field not in self.partially_updated]
self.partially_updated = set()
else:
self.partially_updated.update(fields)
if self.partially_updated == self.fields:
self.partially_updated = set()
# Update status of fields
for key in fields:
func, kwargs = self.field_methods[key]
self.history[key].update(func(**kwargs))
def manual_partial_update(self, field_values: dict):
'''Updates fields' with provided values'''
assert all([key in self.history for key in field_values])
for key, value in field_values.items():
self.history[key].update(value)
# Update partially updated set and reset it if al fields have been updated
self.partially_updated.update(field_values.keys())
if self.partially_updated == self.fields:
self.partially_updated = set()
def changed(self, fields: list = None) -> bool:
'''
Returns true if at least one field's last value is different from the second last one
'''
if fields is None:
return any([self.history[field].changed() for field in self.field_methods])
else:
return any([self.history[field].changed() for field in fields])
def query(self, field):
'''
Return latest value in history of field
'''
field_history = self.history[field]
if field_history:
return field_history[-1]
else:
return None
def save(self, file_name: str):
'''
Saves history as a CSV file
'''
with open(file_name, 'w', newline='') as file:
writer = csv.writer(file, delimiter=',')
# Write header
writer.writerow(self.history.keys())
# and data
writer.writerows(zip(*self.history.values()))
class StatusMinimal(SystemStatus):
def __init__(self, system: System, profiles: dict):
fields = ('triggered_profile', 'ac_power')
super().__init__(system, profiles, fields)
class StatusMonitor(SystemStatus):
def __init__(self, system: System, profiles: dict):
fields = ['time_stamp',
'frequency',
'triggered_profile',
'ac_power',
'governor',
'policy',
'cores_online',
'turbo',
'package_power',
'battery_draw',
'package_temp']
super().__init__(system, profiles, fields)
class StatusLog(SystemStatus):
def __init__(self, system: System, profiles: dict):
fields = ['time',
'cores_online',
'frequency',
'ac_power',
'governor',
'policy',
'cores_online',
'turbo',
'package_power',
'battery_draw',
'package_temp']
super().__init__(system, profiles, fields)
| 3,155 | 5,720 | 242 |
d0b4a412452f77db4f7fb992c5e39e6b1df4714d | 838 | py | Python | tests/dummy_repo/tvm/python/tvm/expr.py | csullivan/ffi-navigator | ed47678f9cb8c6d3637bf3219d3cf7b2754b84bb | [
"Apache-2.0"
] | 148 | 2019-12-28T19:02:17.000Z | 2022-03-27T07:30:13.000Z | tests/dummy_repo/tvm/python/tvm/expr.py | csullivan/ffi-navigator | ed47678f9cb8c6d3637bf3219d3cf7b2754b84bb | [
"Apache-2.0"
] | 21 | 2019-12-28T17:29:24.000Z | 2021-11-24T09:59:35.000Z | tests/dummy_repo/tvm/python/tvm/expr.py | csullivan/ffi-navigator | ed47678f9cb8c6d3637bf3219d3cf7b2754b84bb | [
"Apache-2.0"
] | 17 | 2019-12-29T01:46:13.000Z | 2022-01-10T09:56:46.000Z | from __future__ import absolute_import as _abs
from ._ffi.node import NodeBase, NodeGeneric, register_node
from ._ffi.runtime_ctypes import TVMType, TypeCode
from . import make as _make
from . import generic as _generic
from . import _api_internal
class Expr(ExprOp, NodeBase):
"""Base class of all tvm Expressions"""
# In Python3, We have to explicitly tell interpreter to retain __hash__ if we overide __eq__
# https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = NodeBase.__hash__
@register_node("Variable")
class Var(Expr):
"""Symbolic variable.
Parameters
----------
name : str
The name
dtype : int
The data type
"""
| 27.032258 | 96 | 0.700477 | from __future__ import absolute_import as _abs
from ._ffi.node import NodeBase, NodeGeneric, register_node
from ._ffi.runtime_ctypes import TVMType, TypeCode
from . import make as _make
from . import generic as _generic
from . import _api_internal
class Expr(ExprOp, NodeBase):
"""Base class of all tvm Expressions"""
# In Python3, We have to explicitly tell interpreter to retain __hash__ if we overide __eq__
# https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = NodeBase.__hash__
@register_node("Variable")
class Var(Expr):
"""Symbolic variable.
Parameters
----------
name : str
The name
dtype : int
The data type
"""
def __init__(self, name, dtype):
self.__init_handle_by_constructor__(
_api_internal._Var, name, dtype)
| 101 | 0 | 26 |
3ba89e64d5ea14c6d1f17c3f936df9ae046d5bd8 | 848 | py | Python | projects/gym_baselines/experiments/gym_mujoco_base.py | brandontrabucco/allenact | 0f323ac6f67a84a9de76359f5506c44eff64e0a1 | [
"MIT"
] | 187 | 2020-08-28T16:59:41.000Z | 2022-03-27T19:10:11.000Z | projects/gym_baselines/experiments/gym_mujoco_base.py | brandontrabucco/allenact | 0f323ac6f67a84a9de76359f5506c44eff64e0a1 | [
"MIT"
] | 120 | 2020-08-28T15:30:36.000Z | 2022-03-13T00:38:44.000Z | projects/gym_baselines/experiments/gym_mujoco_base.py | brandontrabucco/allenact | 0f323ac6f67a84a9de76359f5506c44eff64e0a1 | [
"MIT"
] | 45 | 2020-08-28T18:30:04.000Z | 2022-03-29T11:13:28.000Z | from abc import ABC
from typing import Dict, Any
from allenact.utils.viz_utils import VizSuite, AgentViewViz
from projects.gym_baselines.experiments.gym_base import GymBaseConfig
| 30.285714 | 73 | 0.54717 | from abc import ABC
from typing import Dict, Any
from allenact.utils.viz_utils import VizSuite, AgentViewViz
from projects.gym_baselines.experiments.gym_base import GymBaseConfig
class GymMoJoCoBaseConfig(GymBaseConfig, ABC):
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
visualizer = None
if mode == "test":
visualizer = VizSuite(
mode=mode,
video_viz=AgentViewViz(
label="episode_vid",
max_clip_length=400,
vector_task_source=("render", {"mode": "rgb_array"}),
fps=30,
),
)
return {
"nprocesses": 8 if mode == "train" else 1, # rollout
"devices": [],
"visualizer": visualizer,
}
| 575 | 68 | 23 |
a8ffd1ab6ad19f1f3e16bc1bc256a246fab56e4d | 2,728 | py | Python | main.py | erikanfox/WeatherAdvisor | f463611172b07886dbe7cc10f9c402ad6a49ab83 | [
"CC0-1.0"
] | 1 | 2022-02-10T19:29:05.000Z | 2022-02-10T19:29:05.000Z | main.py | erikanfox/WeatherAdvisor | f463611172b07886dbe7cc10f9c402ad6a49ab83 | [
"CC0-1.0"
] | null | null | null | main.py | erikanfox/WeatherAdvisor | f463611172b07886dbe7cc10f9c402ad6a49ab83 | [
"CC0-1.0"
] | null | null | null | import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from fastapi import FastAPI
import uvicorn
data = pd.read_csv("clothing_weather.csv")
app = FastAPI()
@app.get("/")
async def root():
"""Weather Advisor Welcome"""
return {"message": "Hello, welcome to Weather advisor! Enter a temp, whether there is a chance of rain, and whether there is a chance of snow in the format temp/rain/snow."}
@app.get("/weatheradvisor/{temp}/{rain}/{snow}")
if __name__ == '__main__':
uvicorn.run(app, port=8080, host='0.0.0.0')
| 35.428571 | 177 | 0.668255 | import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from fastapi import FastAPI
import uvicorn
data = pd.read_csv("clothing_weather.csv")
app = FastAPI()
def predictOutfit(temp: int,rain:int,snow:int):
data["rain"] = data["rain"].replace("no", 0)
data["rain"] = data["rain"].replace("yes", 1)
data["snow"] = data["rain"].replace("no", 0)
data["snow"] = data["rain"].replace("yes", 1)
feature_cols = ['temp_f','rain','snow']
X = data[feature_cols] # Features
y = data.overall # Target variabley
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
clf = DecisionTreeClassifier(criterion="entropy", max_depth=4)
# Train Decision Tree Classifer
clf = clf.fit(X_train.values,y_train)
#Predict the response for test dataset
y_pred = clf.predict(X_test.values)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
y_pred = clf.predict([[temp,rain,snow]])
#print the predicted outfit code
return y_pred
def getMessage(pred, rain, snow):
ans=""
outfit_code = {
1: "a short sleeve shirt and shorts.",
2: "a short sleeve shirt and long pants.",
3: "a short sleeve shirt, shorts and a light jacket or sweatshirt.",
4: "a short sleeve shirt, long pants, and a light jacket or sweatshirt.",
5: "a long sleeve shirt, long pants, and a light jacket or sweatshirt.",
6: "a short sleeve shirt, long pants, and a heavy jacket.",
7: "a long sleeve shirt or sweater, long pants, and a heavy jacket.",
8: "a long sleeve shirt and shorts."
}
if pred in outfit_code:
ans=ans+outfit_code[pred]
else:
return "an error occurred"
if rain == 1:
ans=ans+ " You may also want a rain jacket, rain boots, and/or an umbrella."
if snow == 1:
ans=ans+ " You should also bring a scarf, gloves, and snow boots!"
return ans
@app.get("/")
async def root():
"""Weather Advisor Welcome"""
return {"message": "Hello, welcome to Weather advisor! Enter a temp, whether there is a chance of rain, and whether there is a chance of snow in the format temp/rain/snow."}
@app.get("/weatheradvisor/{temp}/{rain}/{snow}")
async def weatheradvisor(temp: int,rain:int,snow:int):
y=predictOutfit(temp,rain,snow)
message=getMessage(y[0], rain, snow)
return {"You should wear": message}
if __name__ == '__main__':
uvicorn.run(app, port=8080, host='0.0.0.0')
| 1,895 | 0 | 76 |
95e59f08aef99a2dbe353f52524987e4c8e5fead | 2,941 | py | Python | app/portal/cms/migrations/0004_auto_20210611_1847.py | Ecotrust/OH4S_Proteins | 52ad588ef071064abc5c3f43aa125ad97bff26c4 | [
"Apache-2.0"
] | null | null | null | app/portal/cms/migrations/0004_auto_20210611_1847.py | Ecotrust/OH4S_Proteins | 52ad588ef071064abc5c3f43aa125ad97bff26c4 | [
"Apache-2.0"
] | 185 | 2019-01-23T21:05:15.000Z | 2021-07-01T01:29:14.000Z | app/portal/cms/migrations/0004_auto_20210611_1847.py | Ecotrust/OH4S_Proteins | 52ad588ef071064abc5c3f43aa125ad97bff26c4 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2 on 2021-06-11 18:47
from django.db import migrations, models
import wagtail.core.fields
| 46.68254 | 561 | 0.641618 | # Generated by Django 3.2 on 2021-06-11 18:47
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20210527_2112'),
]
operations = [
migrations.RemoveField(
model_name='homepage',
name='image',
),
migrations.RemoveField(
model_name='resultspage',
name='results_count_message',
),
migrations.AddField(
model_name='resultspage',
name='filter_advice',
field=wagtail.core.fields.RichTextField(blank=True, default='<p>Try removing filters to see more results</p>', help_text='Helpful advice for users confused by their results'),
),
migrations.AddField(
model_name='resultspage',
name='results_count_message_after',
field=models.CharField(blank=True, default='producers that meet your criteria', help_text='Text after result count', max_length=255),
),
migrations.AddField(
model_name='resultspage',
name='results_count_message_before',
field=models.CharField(blank=True, default='We found', help_text='Text before result count', max_length=255),
),
migrations.AlterField(
model_name='homepage',
name='categories_header',
field=models.CharField(default='View all suppliers for specific product categories', help_text='Header above categories', max_length=255),
),
migrations.AlterField(
model_name='homepage',
name='filter_prompt',
field=models.CharField(default='Search By Filtering', help_text='Language directing users to use filters', max_length=255),
),
migrations.AlterField(
model_name='homepage',
name='welcome',
field=models.CharField(default='Welcome', max_length=255, verbose_name='Welcome Title'),
),
migrations.AlterField(
model_name='homepage',
name='welcome_text',
field=wagtail.core.fields.RichTextField(blank=True, default='<p>We connect school food buyers with Oregon producers who are ready to sell to schools. We have over 75 producers offering a wide variety of products. You can search by product type, producer identity, location, and much more.</p><p>Need help using the site? <a href="https://vimeo.com/352842407" target="_blank">Watch our short how-to video</a> or <a href="/contact">contact us here</a>. Know a food producer who should be here? Thanks for visiting.</p>', verbose_name='Welcome Text'),
),
migrations.AlterField(
model_name='resultspage',
name='filter_prompt',
field=models.CharField(default='Add Filters', help_text='Language directing users to use filters', max_length=255),
),
]
| 0 | 2,802 | 23 |
b58f162dfbb09da8fc82b13996616ab07c962f1a | 840 | py | Python | Task/Balanced-brackets/Python/balanced-brackets-1.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 5 | 2021-01-29T20:08:05.000Z | 2022-03-22T06:16:05.000Z | Task/Balanced-brackets/Python/balanced-brackets-1.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Balanced-brackets/Python/balanced-brackets-1.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | ... txt = ['[', ']'] * N
... random.shuffle( txt )
... return ''.join(txt)
...
... braced = 0
... for ch in txt:
... if ch == '[': braced += 1
... if ch == ']':
... braced -= 1
... if braced < 0: return False
... return braced == 0
...
>>> for txt in (gen(N) for N in range(10)):
... print ("%-22r is%s balanced" % (txt, '' if balanced(txt) else ' not'))
...
'' is balanced
'[]' is balanced
'[][]' is balanced
'][[[]]' is not balanced
'[]][[][]' is not balanced
'[][[][]]][' is not balanced
'][]][][[]][[' is not balanced
'[[]]]]][]][[[[' is not balanced
'[[[[]][]]][[][]]' is balanced
'][[][[]]][]]][[[[]' is not balanced
| 30 | 78 | 0.370238 | >>> def gen(N):
... txt = ['[', ']'] * N
... random.shuffle( txt )
... return ''.join(txt)
...
>>> def balanced(txt):
... braced = 0
... for ch in txt:
... if ch == '[': braced += 1
... if ch == ']':
... braced -= 1
... if braced < 0: return False
... return braced == 0
...
>>> for txt in (gen(N) for N in range(10)):
... print ("%-22r is%s balanced" % (txt, '' if balanced(txt) else ' not'))
...
'' is balanced
'[]' is balanced
'[][]' is balanced
'][[[]]' is not balanced
'[]][[][]' is not balanced
'[][[][]]][' is not balanced
'][]][][[]][[' is not balanced
'[[]]]]][]][[[[' is not balanced
'[[[[]][]]][[][]]' is balanced
'][[][[]]][]]][[[[]' is not balanced
| -13 | 0 | 52 |
177131c752000cddfb2658dd5c97369412be122f | 674 | py | Python | chapter03/_3_5_.py | megmogmog1965/PythonMachineLearning | 7d1bc2a28722a4faa8ffd924cfeaaef2524a8f34 | [
"MIT"
] | 1 | 2017-12-23T14:13:25.000Z | 2017-12-23T14:13:25.000Z | chapter03/_3_5_.py | megmogmog1965/PythonMachineLearning | 7d1bc2a28722a4faa8ffd924cfeaaef2524a8f34 | [
"MIT"
] | null | null | null | chapter03/_3_5_.py | megmogmog1965/PythonMachineLearning | 7d1bc2a28722a4faa8ffd924cfeaaef2524a8f34 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
'''
Created on Mar 12, 2017
@author: Yusuke Kawatsu
'''
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
plot()
| 18.216216 | 88 | 0.543027 | #!/usr/bin/env python
# encoding: utf-8
'''
Created on Mar 12, 2017
@author: Yusuke Kawatsu
'''
import numpy as np
import matplotlib.pyplot as plt
def plot():
np.random.seed(0)
# X.
X_xor = np.random.randn(200, 2)
# y.
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
# plot.
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1], c='r', marker='s', label='-1')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend(loc='best')
plt.show()
if __name__ == '__main__':
plot()
| 460 | 0 | 23 |
ef06a141c4791a9c8f548939256de04776d8de1c | 1,319 | py | Python | faceRecognition/utils/debug.py | UESTCYangHR/face_recognition | d7c671da2f2b8d0e9298036791dbff3651185cb7 | [
"MIT"
] | 1 | 2020-11-30T10:56:58.000Z | 2020-11-30T10:56:58.000Z | faceRecognition/utils/debug.py | UESTCYangHR/face_recognition | d7c671da2f2b8d0e9298036791dbff3651185cb7 | [
"MIT"
] | null | null | null | faceRecognition/utils/debug.py | UESTCYangHR/face_recognition | d7c671da2f2b8d0e9298036791dbff3651185cb7 | [
"MIT"
] | null | null | null | # -*-coding: utf-8 -*-
"""
@Project: tools
@File : debug.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2019-05-10 16:24:49
"""
import datetime
import logging
import sys
import time
'''
url:https://cuiqingcai.com/6080.html
level级别:debug、info、warning、error以及critical
'''
# logging.basicConfig(level=logging.DEBUG,
# filename='output.log',
# datefmt='%Y/%m/%d %H:%M:%S',
# format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s - %(funcName)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def RUN_TIME(deta_time):
'''
返回毫秒,deta_time.seconds获得秒数=1000ms,deta_time.microseconds获得微妙数=1/1000ms
:param deta_time: ms
:return:
'''
time_ = deta_time.seconds * 1000 + deta_time.microseconds / 1000.0
return time_
if __name__ == '__main__':
T0 = TIME()
# do something
time.sleep(5)
T1 = TIME()
print("rum time:{}ms".format(RUN_TIME(T1 - T0)))
logger.info('This is a log info')
logger.debug('Debugging')
logger.warning('Warning exists')
logger.error('Finish')
| 25.365385 | 110 | 0.603487 | # -*-coding: utf-8 -*-
"""
@Project: tools
@File : debug.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2019-05-10 16:24:49
"""
import datetime
import logging
import sys
import time
'''
url:https://cuiqingcai.com/6080.html
level级别:debug、info、warning、error以及critical
'''
# logging.basicConfig(level=logging.DEBUG,
# filename='output.log',
# datefmt='%Y/%m/%d %H:%M:%S',
# format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s - %(funcName)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def RUN_TIME(deta_time):
'''
返回毫秒,deta_time.seconds获得秒数=1000ms,deta_time.microseconds获得微妙数=1/1000ms
:param deta_time: ms
:return:
'''
time_ = deta_time.seconds * 1000 + deta_time.microseconds / 1000.0
return time_
def TIME():
return datetime.datetime.now()
if __name__ == '__main__':
T0 = TIME()
# do something
time.sleep(5)
T1 = TIME()
print("rum time:{}ms".format(RUN_TIME(T1 - T0)))
logger.info('This is a log info')
logger.debug('Debugging')
logger.warning('Warning exists')
logger.error('Finish')
| 25 | 0 | 23 |
c59cfc168ca0b50ba230bafe9fc5f7c758576560 | 10,755 | py | Python | src/dynamic_graph/entity.py | Rascof/dynamic-graph-python | 348f8d9291fe1314c86f1db88833d2531af8bf11 | [
"BSD-2-Clause"
] | null | null | null | src/dynamic_graph/entity.py | Rascof/dynamic-graph-python | 348f8d9291fe1314c86f1db88833d2531af8bf11 | [
"BSD-2-Clause"
] | null | null | null | src/dynamic_graph/entity.py | Rascof/dynamic-graph-python | 348f8d9291fe1314c86f1db88833d2531af8bf11 | [
"BSD-2-Clause"
] | null | null | null | """
Copyright (C) 2010 CNRS
Author: Florent Lamiraux, Nicolas Mansard
"""
from __future__ import print_function
import types
from enum import Enum
from . import signal_base, wrap
from .attrpath import setattrpath
if 'display' not in globals().keys():
# --- FACTORY ------------------------------------------------------------------
class PyEntityFactoryClass(type):
"""
The class build dynamically a new class type, and return the reference
on the class-type object. The class type is not added to any context.
"""
def PyEntityFactory(className, context):
"""
Build a new class type by calling the factory, and add it
to the given context.
"""
EntityClass = PyEntityFactoryClass(className)
context[className] = EntityClass
return EntityClass
def updateEntityClasses(dictionary):
"""
For all c++entity types that are not in the pyentity class list
(entityClassNameList) run the factory and store the new type in the given
context (dictionary).
"""
cxx_entityList = wrap.factory_get_entity_class_list()
for e in filter(lambda x: x not in Entity.entityClassNameList, cxx_entityList):
# Store new class in dictionary with class name
PyEntityFactory(e, dictionary)
# Store class name in local list
Entity.entityClassNameList.append(e)
# --- ENTITY -------------------------------------------------------------------
class VerbosityLevel(Enum):
"""
Enum class for setVerbosityLevel
"""
VERBOSITY_ALL = 8
VERBOSITY_INFO_WARNING_ERROR = 4
VERBOSITY_WARNING_ERROR = 2
VERBOSITY_ERROR = 1
VERBOSITY_NONE = 0
class Entity(object):
"""
This class binds dynamicgraph::Entity C++ class
"""
obj = None
"""
Store list of entities created via python
"""
entities = dict()
def __init__(self, className, instanceName):
"""
Constructor: if not called by a child class, create and store a pointer
to a C++ Entity object.
"""
object.__setattr__(self, 'obj', wrap.create_entity(className, instanceName))
Entity.entities[instanceName] = self
@staticmethod
def initEntity(self, name):
"""
Common constructor of specialized Entity classes. This function is bound
by the factory to each new class derivated from the Entity class as the
constructor of the new class.
"""
Entity.__init__(self, self.className, name)
if not self.__class__.commandCreated:
self.boundClassCommands()
self.__class__.__doc__ = wrap.entity_get_docstring(self.obj)
self.__class__.commandCreated = True
@property
@property
def signal(self, name):
"""
Get a signal of the entity from signal name
"""
signalPt = wrap.entity_get_signal(self.obj, name)
return signal_base.SignalBase(name="", obj=signalPt)
def hasSignal(self, name):
"""
Indicates if a signal with the given name exists in the entity
"""
return wrap.entity_has_signal(self.obj, name)
def displaySignals(self):
"""
Print the list of signals into standard output: temporary.
"""
signals = list(self.signals())
if len(signals) == 0:
display("--- <" + self.name + "> has no signal")
else:
display("--- <" + self.name + "> signal list: ")
for s in signals[:-1]:
display(" |-- <" + str(s))
display(" `-- <" + str(signals[-1]))
def signals(self):
"""
Return the list of signals
"""
sl = wrap.entity_list_signals(self.obj)
return map(lambda pyObj: signal_base.SignalBase(obj=pyObj), sl)
def commands(self):
"""
Return the list of commands.
"""
return wrap.entity_list_commands(self.obj)
def globalHelp(self):
"""
Print a short description of each command.
"""
if self.__doc__:
print(self.__doc__)
print("List of commands:")
print("-----------------")
for cstr in self.commands():
ctitle = cstr + ':'
for i in range(len(cstr), 15):
ctitle += ' '
for docstr in wrap.entity_get_command_docstring(self.obj, cstr).split('\n'):
if (len(docstr) > 0) and (not docstr.isspace()):
display(ctitle + "\t" + docstr)
break
def help(self, comm=None):
"""
With no arg, print the global help. With arg the name of
a specific command, print the help associated to the command.
"""
if comm is None:
self.globalHelp()
else:
display(comm + ":\n" + wrap.entity_get_command_docstring(self.obj, comm))
# --- COMMANDS BINDER -----------------------------------------------------
# List of all the entity classes from the c++ factory, that have been bound
# bind the py factory.
entityClassNameList = []
# This function dynamically create the function object that runs the command.
@staticmethod
def boundClassCommands(self):
"""
This static function has to be called from a class heritating from Entity.
It should be called only once. It parses the list of commands obtained from
c++, and bind each of them to a python class method.
"""
# Get list of commands of the Entity object
commands = wrap.entity_list_commands(self.obj)
# for each command, add a method with the name of the command
for cmdstr in commands:
docstr = wrap.entity_get_command_docstring(self.obj, cmdstr)
cmdpy = Entity.createCommandBind(cmdstr, docstr)
setattrpath(self.__class__, cmdstr, cmdpy)
def boundNewCommand(self, cmdName):
"""
At construction, all existing commands are bound directly in the class.
This method enables to bound new commands dynamically. These new bounds
are not made with the class, but directly with the object instance.
"""
if (cmdName in self.__dict__) | (cmdName in self.__class__.__dict__):
print("Warning: command ", cmdName, " will overwrite an object attribute.")
docstring = wrap.entity_get_command_docstring(self.obj, cmdName)
cmd = Entity.createCommandBind(cmdName, docstring)
# Limitation (todo): does not handle for path attribute name (see setattrpath).
setattr(self, cmdName, types.MethodType(cmd, self))
def boundAllNewCommands(self):
"""
For all commands that are not attribute of the object instance nor of the
class, a new attribute of the instance is created to bound the command.
"""
cmdList = wrap.entity_list_commands(self.obj)
cmdList = filter(lambda x: x not in self.__dict__, cmdList)
cmdList = filter(lambda x: x not in self.__class__.__dict__, cmdList)
for cmd in cmdList:
self.boundNewCommand(cmd)
def setLoggerVerbosityLevel(self, verbosity):
"""
Specify for the entity the verbosity level.
- param verbosity should be one of the attribute of the enum
dynamic_graph.entity.VerbosityLevel
"""
return wrap.entity_set_logger_verbosity(self.obj, verbosity)
def getLoggerVerbosityLevel(self):
"""
Returns the entity's verbosity level (as a dynamic_graph.entity.VerbosityLevel)
"""
r = wrap.entity_get_logger_verbosity(self.obj)
if r == 8:
return VerbosityLevel.VERBOSITY_ALL
elif r == 4:
return VerbosityLevel.VERBOSITY_INFO_WARNING_ERROR
elif r == 2:
return VerbosityLevel.VERBOSITY_WARNING_ERROR
elif r == 1:
return VerbosityLevel.VERBOSITY_ERROR
return VerbosityLevel.VERBOSITY_NONE
def setTimeSample(self, timeSample):
"""
Specify for the entity the time at which call is counted.
"""
return wrap.entity_set_time_sample(self.obj, timeSample)
def getTimeSample(self):
"""
Returns for the entity the time at which call is counted.
"""
return wrap.entity_get_time_sample(self.obj)
def setStreamPrintPeriod(self, streamPrintPeriod):
"""
Specify for the entity the period at which debugging information is printed
"""
return wrap.entity_set_stream_print_period(self.obj, streamPrintPeriod)
def getStreamPrintPeriod(self):
"""
Returns for the entity the period at which debugging information is printed
"""
return wrap.entity_get_stream_print_period(self.obj)
| 35.032573 | 103 | 0.605672 | """
Copyright (C) 2010 CNRS
Author: Florent Lamiraux, Nicolas Mansard
"""
from __future__ import print_function
import types
from enum import Enum
from . import signal_base, wrap
from .attrpath import setattrpath
if 'display' not in globals().keys():
def display(s):
print(s)
# --- FACTORY ------------------------------------------------------------------
class PyEntityFactoryClass(type):
"""
The class build dynamically a new class type, and return the reference
on the class-type object. The class type is not added to any context.
"""
def __new__(factory, className, bases=(), dict={}):
if len(bases) == 0:
# Initialize a basic Entity class
EntityClass = type.__new__(factory, className, (Entity, ), dict)
EntityClass.className = className
EntityClass.__init__ = Entity.initEntity
else:
# Initialize a heritated class
EntityClass = type.__new__(factory, className, bases, dict)
for c in bases:
if issubclass(c, Entity):
EntityClass.className = c.className
break
EntityClass.commandCreated = False
return EntityClass
def PyEntityFactory(className, context):
"""
Build a new class type by calling the factory, and add it
to the given context.
"""
EntityClass = PyEntityFactoryClass(className)
context[className] = EntityClass
return EntityClass
def updateEntityClasses(dictionary):
"""
For all c++entity types that are not in the pyentity class list
(entityClassNameList) run the factory and store the new type in the given
context (dictionary).
"""
cxx_entityList = wrap.factory_get_entity_class_list()
for e in filter(lambda x: x not in Entity.entityClassNameList, cxx_entityList):
# Store new class in dictionary with class name
PyEntityFactory(e, dictionary)
# Store class name in local list
Entity.entityClassNameList.append(e)
# --- ENTITY -------------------------------------------------------------------
class VerbosityLevel(Enum):
"""
Enum class for setVerbosityLevel
"""
VERBOSITY_ALL = 8
VERBOSITY_INFO_WARNING_ERROR = 4
VERBOSITY_WARNING_ERROR = 2
VERBOSITY_ERROR = 1
VERBOSITY_NONE = 0
class Entity(object):
"""
This class binds dynamicgraph::Entity C++ class
"""
obj = None
"""
Store list of entities created via python
"""
entities = dict()
def __init__(self, className, instanceName):
"""
Constructor: if not called by a child class, create and store a pointer
to a C++ Entity object.
"""
object.__setattr__(self, 'obj', wrap.create_entity(className, instanceName))
Entity.entities[instanceName] = self
@staticmethod
def initEntity(self, name):
"""
Common constructor of specialized Entity classes. This function is bound
by the factory to each new class derivated from the Entity class as the
constructor of the new class.
"""
Entity.__init__(self, self.className, name)
if not self.__class__.commandCreated:
self.boundClassCommands()
self.__class__.__doc__ = wrap.entity_get_docstring(self.obj)
self.__class__.commandCreated = True
@property
def name(self):
return wrap.entity_get_name(self.obj)
@property
def className(self):
return wrap.entity_get_class_name(self.obj)
def __str__(self):
return wrap.display_entity(self.obj)
def signal(self, name):
"""
Get a signal of the entity from signal name
"""
signalPt = wrap.entity_get_signal(self.obj, name)
return signal_base.SignalBase(name="", obj=signalPt)
def hasSignal(self, name):
"""
Indicates if a signal with the given name exists in the entity
"""
return wrap.entity_has_signal(self.obj, name)
def displaySignals(self):
"""
Print the list of signals into standard output: temporary.
"""
signals = list(self.signals())
if len(signals) == 0:
display("--- <" + self.name + "> has no signal")
else:
display("--- <" + self.name + "> signal list: ")
for s in signals[:-1]:
display(" |-- <" + str(s))
display(" `-- <" + str(signals[-1]))
def signals(self):
"""
Return the list of signals
"""
sl = wrap.entity_list_signals(self.obj)
return map(lambda pyObj: signal_base.SignalBase(obj=pyObj), sl)
def commands(self):
"""
Return the list of commands.
"""
return wrap.entity_list_commands(self.obj)
def globalHelp(self):
"""
Print a short description of each command.
"""
if self.__doc__:
print(self.__doc__)
print("List of commands:")
print("-----------------")
for cstr in self.commands():
ctitle = cstr + ':'
for i in range(len(cstr), 15):
ctitle += ' '
for docstr in wrap.entity_get_command_docstring(self.obj, cstr).split('\n'):
if (len(docstr) > 0) and (not docstr.isspace()):
display(ctitle + "\t" + docstr)
break
def help(self, comm=None):
"""
With no arg, print the global help. With arg the name of
a specific command, print the help associated to the command.
"""
if comm is None:
self.globalHelp()
else:
display(comm + ":\n" + wrap.entity_get_command_docstring(self.obj, comm))
def __getattr__(self, name):
try:
return self.signal(name)
except Exception:
try:
object.__getattr__(self, name)
except AttributeError:
raise AttributeError("'%s' entity has no attribute %s\n" % (self.name, name) +
' entity attributes are usually either\n' + ' - commands,\n' +
' - signals or,\n' + ' - user defined attributes')
def __setattr__(self, name, value):
if name in map(lambda s: s.getName().split(':')[-1], self.signals()):
raise NameError(name + " already designates a signal. "
"It is not advised to set a new attribute of the same name.")
object.__setattr__(self, name, value)
# --- COMMANDS BINDER -----------------------------------------------------
# List of all the entity classes from the c++ factory, that have been bound
# bind the py factory.
entityClassNameList = []
# This function dynamically create the function object that runs the command.
@staticmethod
def createCommandBind(name, docstring):
def commandBind(self, *arg):
return wrap.entity_execute_command(self.obj, name, arg)
commandBind.__doc__ = docstring
return commandBind
def boundClassCommands(self):
"""
This static function has to be called from a class heritating from Entity.
It should be called only once. It parses the list of commands obtained from
c++, and bind each of them to a python class method.
"""
# Get list of commands of the Entity object
commands = wrap.entity_list_commands(self.obj)
# for each command, add a method with the name of the command
for cmdstr in commands:
docstr = wrap.entity_get_command_docstring(self.obj, cmdstr)
cmdpy = Entity.createCommandBind(cmdstr, docstr)
setattrpath(self.__class__, cmdstr, cmdpy)
def boundNewCommand(self, cmdName):
"""
At construction, all existing commands are bound directly in the class.
This method enables to bound new commands dynamically. These new bounds
are not made with the class, but directly with the object instance.
"""
if (cmdName in self.__dict__) | (cmdName in self.__class__.__dict__):
print("Warning: command ", cmdName, " will overwrite an object attribute.")
docstring = wrap.entity_get_command_docstring(self.obj, cmdName)
cmd = Entity.createCommandBind(cmdName, docstring)
# Limitation (todo): does not handle for path attribute name (see setattrpath).
setattr(self, cmdName, types.MethodType(cmd, self))
def boundAllNewCommands(self):
"""
For all commands that are not attribute of the object instance nor of the
class, a new attribute of the instance is created to bound the command.
"""
cmdList = wrap.entity_list_commands(self.obj)
cmdList = filter(lambda x: x not in self.__dict__, cmdList)
cmdList = filter(lambda x: x not in self.__class__.__dict__, cmdList)
for cmd in cmdList:
self.boundNewCommand(cmd)
def setLoggerVerbosityLevel(self, verbosity):
"""
Specify for the entity the verbosity level.
- param verbosity should be one of the attribute of the enum
dynamic_graph.entity.VerbosityLevel
"""
return wrap.entity_set_logger_verbosity(self.obj, verbosity)
def getLoggerVerbosityLevel(self):
"""
Returns the entity's verbosity level (as a dynamic_graph.entity.VerbosityLevel)
"""
r = wrap.entity_get_logger_verbosity(self.obj)
if r == 8:
return VerbosityLevel.VERBOSITY_ALL
elif r == 4:
return VerbosityLevel.VERBOSITY_INFO_WARNING_ERROR
elif r == 2:
return VerbosityLevel.VERBOSITY_WARNING_ERROR
elif r == 1:
return VerbosityLevel.VERBOSITY_ERROR
return VerbosityLevel.VERBOSITY_NONE
def setTimeSample(self, timeSample):
"""
Specify for the entity the time at which call is counted.
"""
return wrap.entity_set_time_sample(self.obj, timeSample)
def getTimeSample(self):
"""
Returns for the entity the time at which call is counted.
"""
return wrap.entity_get_time_sample(self.obj)
def setStreamPrintPeriod(self, streamPrintPeriod):
"""
Specify for the entity the period at which debugging information is printed
"""
return wrap.entity_set_stream_print_period(self.obj, streamPrintPeriod)
def getStreamPrintPeriod(self):
"""
Returns for the entity the period at which debugging information is printed
"""
return wrap.entity_get_stream_print_period(self.obj)
| 1,737 | 0 | 212 |
404276692c56c935cd37ae093b0b952ab8b39d81 | 1,413 | py | Python | reinvent_models/link_invent/model_vocabulary/model_vocabulary.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | reinvent_models/link_invent/model_vocabulary/model_vocabulary.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | 1 | 2022-03-07T12:18:00.000Z | 2022-03-07T12:18:00.000Z | reinvent_models/link_invent/model_vocabulary/model_vocabulary.py | GT4SD/reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | from typing import List
from reinvent_models.link_invent.model_vocabulary.vocabulary import Vocabulary, SMILESTokenizer, create_vocabulary
| 31.4 | 115 | 0.642604 | from typing import List
from reinvent_models.link_invent.model_vocabulary.vocabulary import Vocabulary, SMILESTokenizer, create_vocabulary
class ModelVocabulary:
def __init__(self, vocabulary: Vocabulary, tokenizer: SMILESTokenizer):
self.vocabulary = vocabulary
self.tokenizer = tokenizer
def __len__(self) -> int:
"""" Returns the length of the vocabulary """
return len(self.vocabulary)
def encode(self, smiles_str: str):
"""
Encode a smiles str
:param smiles_str:
:return: An one-hot-encoded vector
"""
return self.vocabulary.encode(self.tokenizer.tokenize(smiles_str))
def decode(self, encoded_vector) -> str:
"""
Decodes the encoded vector.
:param encoded_vector: A one-hot encoded version of the target.
:return : A SMILES of the target.
"""
return self.tokenizer.untokenize(self.vocabulary.decode(encoded_vector))
@classmethod
def from_list(cls, smiles_list: List[str]):
"""
Creates the ModelVocabulary form a list of smiles
:param smiles_list: A list of smiles
:return : A ModelVocabulary instance
"""
tokenizer = SMILESTokenizer()
vocabulary = create_vocabulary(smiles_list, tokenizer)
return ModelVocabulary(vocabulary, tokenizer)
| 124 | 1,119 | 25 |
3d2e7973fe19d9c696f5d3caaaf66a668abe03db | 665 | py | Python | Chapter11/booktracker/application/booktracker/blueprints/frontend/view.py | maggias/Python-Web-Development-with-Sanic | a0337b9324b4d898f79c5621705b7d0171eeb21d | [
"MIT"
] | 16 | 2022-01-03T22:17:20.000Z | 2022-03-26T09:41:35.000Z | Chapter11/booktracker/application/booktracker/blueprints/frontend/view.py | maggias/Python-Web-Development-with-Sanic | a0337b9324b4d898f79c5621705b7d0171eeb21d | [
"MIT"
] | 3 | 2022-01-18T11:21:56.000Z | 2022-03-13T08:51:14.000Z | Chapter11/booktracker/application/booktracker/blueprints/frontend/view.py | maggias/Python-Web-Development-with-Sanic | a0337b9324b4d898f79c5621705b7d0171eeb21d | [
"MIT"
] | 4 | 2022-01-11T21:16:07.000Z | 2022-03-30T06:24:30.000Z | from logging import getLogger
from pathlib import Path
from sanic import Blueprint, HTTPResponse, Request
from sanic.response import file
from .reload import setup_livereload
logger = getLogger("booktracker")
bp = Blueprint("Frontend")
setup_livereload(bp)
@bp.get("/<path:path>")
| 26.6 | 75 | 0.714286 | from logging import getLogger
from pathlib import Path
from sanic import Blueprint, HTTPResponse, Request
from sanic.response import file
from .reload import setup_livereload
logger = getLogger("booktracker")
bp = Blueprint("Frontend")
setup_livereload(bp)
@bp.get("/<path:path>")
async def index(request: Request, path: str) -> HTTPResponse:
base: Path = request.app.config.UI_DIR / "public"
requested_path = base / path
logger.debug(f"Checking for {requested_path}")
html = (
requested_path
if path and requested_path.exists() and not requested_path.is_dir()
else base / "index.html"
)
return await file(html)
| 357 | 0 | 22 |
8454fab6099ee4685be09d38f42338f240b0fbe5 | 710 | py | Python | setup.py | mmyros/hdestimator | 8a6da9ef513a3bd1ba0e8bbc1a46a2beb4fee69b | [
"BSD-3-Clause"
] | null | null | null | setup.py | mmyros/hdestimator | 8a6da9ef513a3bd1ba0e8bbc1a46a2beb4fee69b | [
"BSD-3-Clause"
] | null | null | null | setup.py | mmyros/hdestimator | 8a6da9ef513a3bd1ba0e8bbc1a46a2beb4fee69b | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
from Cython.Build import cythonize
import numpy
setup(
name='hdestimator',
version='0.9',
install_requires=[
'h5py',
'pyyaml',
'numpy',
'scipy',
'mpmath',
'matplotlib',
'cython',
],
packages=find_packages(),
include_package_data=True,
py_modules=['hdestimator'],
url='',
license='MIT',
author='',
author_email='',
description='The history dependence estimator tool',
ext_modules=cythonize(["src/hde_fast_embedding.pyx"],
# "hde_fast_utils.pyx"],
annotate=False),
include_dirs=[numpy.get_include()]
)
| 22.903226 | 57 | 0.576056 | from setuptools import setup, find_packages
from Cython.Build import cythonize
import numpy
setup(
name='hdestimator',
version='0.9',
install_requires=[
'h5py',
'pyyaml',
'numpy',
'scipy',
'mpmath',
'matplotlib',
'cython',
],
packages=find_packages(),
include_package_data=True,
py_modules=['hdestimator'],
url='',
license='MIT',
author='',
author_email='',
description='The history dependence estimator tool',
ext_modules=cythonize(["src/hde_fast_embedding.pyx"],
# "hde_fast_utils.pyx"],
annotate=False),
include_dirs=[numpy.get_include()]
)
| 0 | 0 | 0 |
abd3009d9cb4a62dd5238f8813d5c3e64387589c | 488 | py | Python | roi.py | vibhavnirmal/Lane-Detection | 684b85f32232c9f4a3fc7455189c24ecff0ed3ff | [
"MIT"
] | 1 | 2020-01-09T08:55:52.000Z | 2020-01-09T08:55:52.000Z | roi.py | vibhavnirmal/Lane-Detection | 684b85f32232c9f4a3fc7455189c24ecff0ed3ff | [
"MIT"
] | null | null | null | roi.py | vibhavnirmal/Lane-Detection | 684b85f32232c9f4a3fc7455189c24ecff0ed3ff | [
"MIT"
] | 1 | 2021-12-22T07:11:18.000Z | 2021-12-22T07:11:18.000Z | import cv2
import numpy as np
class roi:
"""docstring for roi."""
| 22.181818 | 56 | 0.55123 | import cv2
import numpy as np
class roi:
"""docstring for roi."""
def __init__(self):
super(roi, self).__init__()
def roi(self, image):
height = image.shape[0]
width = image.shape[1]
# print(width)
poly = np.array([
[(100, height), (width, height), (530, 350)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, poly, 255)
masked_img = cv2.bitwise_and(image, mask)
return masked_img
| 362 | 0 | 54 |
f497ce3169fb9d26b0b2b44f8fdfd44cd2585491 | 3,458 | py | Python | code/enrich2.py | boelkn/nn4dms | 1f1cf1fd2bc89c2ff8236bf8842decb771702fef | [
"MIT"
] | 29 | 2020-10-26T02:32:37.000Z | 2022-01-24T02:02:48.000Z | code/enrich2.py | boelkn/nn4dms | 1f1cf1fd2bc89c2ff8236bf8842decb771702fef | [
"MIT"
] | 2 | 2020-10-26T23:03:11.000Z | 2021-09-29T20:57:52.000Z | code/enrich2.py | boelkn/nn4dms | 1f1cf1fd2bc89c2ff8236bf8842decb771702fef | [
"MIT"
] | 7 | 2020-11-20T02:10:05.000Z | 2022-02-01T07:16:49.000Z | """ Common functions for creating enrich2 datasets (for GB1 and Bgl3) """
from os.path import join
from subprocess import call
import pandas as pd
import utils
def create_e2_config_file(inp_fn, sel_fn, e2_output_dir, config_file_save_dir):
""" create a config file specifying parameters for enrich2 """
text = """{"libraries": [
{
"counts file": "INP_COUNTS_FN",
"identifiers": {},
"name": "T0",
"report filtered reads": false,
"timepoint": 0
},
{
"counts file": "SEL_COUNTS_FN",
"identifiers": {},
"name": "T1",
"report filtered reads": false,
"timepoint": 1
}
],
"name": "C1",
"output directory": "OUTPUT_DIR"}"""
text = text.replace("INP_COUNTS_FN", inp_fn)
text = text.replace("SEL_COUNTS_FN", sel_fn)
text = text.replace("OUTPUT_DIR", e2_output_dir)
with open(join(config_file_save_dir, "e2_config"), "w") as f:
f.write(text)
return join(config_file_save_dir, "e2_config")
def create_tsv_dataset(e2_scores_fn, save_fn):
""" create a simple tsv dataset file using the output from enrich2 """
e2_data = pd.HDFStore(e2_scores_fn)
# get the e2 scores, removing the wild-type and moving the variant index into a column
e2_scores = e2_data.select("/main/identifiers/scores")
e2_scores = e2_scores.loc[e2_scores.index != "_wt"]
e2_scores.reset_index(inplace=True)
# get the input and selected counts
e2_counts = e2_data.select("/main/identifiers/counts")
if "_wt" in e2_counts.index:
e2_counts = e2_counts.drop("_wt")
variants = e2_scores["index"].values
num_mutations = e2_scores["index"].apply(lambda x: len(x.split(","))).values
scores = e2_scores["score"].values
inp = e2_counts["c_0"].values
sel = e2_counts["c_1"].values
cols = ["variant", "num_mutations", "inp", "sel", "score"]
data = {"variant": variants, "num_mutations": num_mutations, "inp": inp, "sel": sel, "score": scores}
df = pd.DataFrame(data, columns=cols)
if save_fn is not None:
df.to_csv(save_fn, sep="\t", index=False)
return df
def create_e2_dataset(count_df, output_dir, output_fn=None):
""" creates an enrich2 dataset (saves input files for enrich2, runs enrich2, then converts to my format) """
# create a special output directory for enrich2
e2_output_dir = join(output_dir, "e2_output")
utils.ensure_dir_exists(e2_output_dir)
# create enrich2 input files
inp = count_df["inp"]
sel = count_df["sel"]
inp_fn = join(e2_output_dir, "idents_inp.tsv")
sel_fn = join(e2_output_dir, "idents_sel.tsv")
inp.to_csv(inp_fn, sep="\t", header=["count"], index_label=False)
sel.to_csv(sel_fn, sep="\t", header=["count"], index_label=False)
# create enrich2 config file for this ds
e2_config_fn = create_e2_config_file(inp_fn, sel_fn, e2_output_dir, e2_output_dir)
# run e2
call(['conda', 'run', '-n', 'enrich2', 'enrich_cmd', '--no-plots', '--no-tsv', e2_config_fn, 'ratios', 'wt'])
if output_fn is None:
output_fn = "dataset.tsv"
create_tsv_dataset(join(e2_output_dir, "C1_sel.h5"), save_fn=join(output_dir, output_fn))
| 36.020833 | 113 | 0.613939 | """ Common functions for creating enrich2 datasets (for GB1 and Bgl3) """
from os.path import join
from subprocess import call
import pandas as pd
import utils
def create_e2_config_file(inp_fn, sel_fn, e2_output_dir, config_file_save_dir):
""" create a config file specifying parameters for enrich2 """
text = """{"libraries": [
{
"counts file": "INP_COUNTS_FN",
"identifiers": {},
"name": "T0",
"report filtered reads": false,
"timepoint": 0
},
{
"counts file": "SEL_COUNTS_FN",
"identifiers": {},
"name": "T1",
"report filtered reads": false,
"timepoint": 1
}
],
"name": "C1",
"output directory": "OUTPUT_DIR"}"""
text = text.replace("INP_COUNTS_FN", inp_fn)
text = text.replace("SEL_COUNTS_FN", sel_fn)
text = text.replace("OUTPUT_DIR", e2_output_dir)
with open(join(config_file_save_dir, "e2_config"), "w") as f:
f.write(text)
return join(config_file_save_dir, "e2_config")
def create_tsv_dataset(e2_scores_fn, save_fn):
""" create a simple tsv dataset file using the output from enrich2 """
e2_data = pd.HDFStore(e2_scores_fn)
# get the e2 scores, removing the wild-type and moving the variant index into a column
e2_scores = e2_data.select("/main/identifiers/scores")
e2_scores = e2_scores.loc[e2_scores.index != "_wt"]
e2_scores.reset_index(inplace=True)
# get the input and selected counts
e2_counts = e2_data.select("/main/identifiers/counts")
if "_wt" in e2_counts.index:
e2_counts = e2_counts.drop("_wt")
variants = e2_scores["index"].values
num_mutations = e2_scores["index"].apply(lambda x: len(x.split(","))).values
scores = e2_scores["score"].values
inp = e2_counts["c_0"].values
sel = e2_counts["c_1"].values
cols = ["variant", "num_mutations", "inp", "sel", "score"]
data = {"variant": variants, "num_mutations": num_mutations, "inp": inp, "sel": sel, "score": scores}
df = pd.DataFrame(data, columns=cols)
if save_fn is not None:
df.to_csv(save_fn, sep="\t", index=False)
return df
def create_e2_dataset(count_df, output_dir, output_fn=None):
""" creates an enrich2 dataset (saves input files for enrich2, runs enrich2, then converts to my format) """
# create a special output directory for enrich2
e2_output_dir = join(output_dir, "e2_output")
utils.ensure_dir_exists(e2_output_dir)
# create enrich2 input files
inp = count_df["inp"]
sel = count_df["sel"]
inp_fn = join(e2_output_dir, "idents_inp.tsv")
sel_fn = join(e2_output_dir, "idents_sel.tsv")
inp.to_csv(inp_fn, sep="\t", header=["count"], index_label=False)
sel.to_csv(sel_fn, sep="\t", header=["count"], index_label=False)
# create enrich2 config file for this ds
e2_config_fn = create_e2_config_file(inp_fn, sel_fn, e2_output_dir, e2_output_dir)
# run e2
call(['conda', 'run', '-n', 'enrich2', 'enrich_cmd', '--no-plots', '--no-tsv', e2_config_fn, 'ratios', 'wt'])
if output_fn is None:
output_fn = "dataset.tsv"
create_tsv_dataset(join(e2_output_dir, "C1_sel.h5"), save_fn=join(output_dir, output_fn))
| 0 | 0 | 0 |
6308b48a76ae02a6048621dc056714df4d1af33e | 549 | py | Python | mydb/migrations/0008_auto_20201229_2130.py | crogar/MyPassVault | b76811f5362e7b0e922a0869f237a223db2b8331 | [
"Unlicense"
] | 1 | 2021-09-16T19:23:16.000Z | 2021-09-16T19:23:16.000Z | mydb/migrations/0008_auto_20201229_2130.py | crogar/MyPassVault | b76811f5362e7b0e922a0869f237a223db2b8331 | [
"Unlicense"
] | null | null | null | mydb/migrations/0008_auto_20201229_2130.py | crogar/MyPassVault | b76811f5362e7b0e922a0869f237a223db2b8331 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-30 02:30
from django.db import migrations, models
| 22.875 | 68 | 0.568306 | # Generated by Django 3.1.4 on 2020-12-30 02:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mydb', '0007_user_user_name'),
]
operations = [
migrations.AddField(
model_name='user',
name='password',
field=models.CharField(default='@+Pass', max_length=50),
),
migrations.AlterField(
model_name='user',
name='user_name',
field=models.CharField(max_length=50),
),
]
| 0 | 435 | 23 |
6d7a42ea8be0c33fc9ace49fb978d28c99be4732 | 4,616 | py | Python | app/test/test_model/charts/test_base_charts.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 1 | 2019-07-25T21:15:05.000Z | 2019-07-25T21:15:05.000Z | app/test/test_model/charts/test_base_charts.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 44 | 2019-08-05T15:24:00.000Z | 2022-01-31T23:11:31.000Z | app/test/test_model/charts/test_base_charts.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 1 | 2021-05-11T07:49:51.000Z | 2021-05-11T07:49:51.000Z | '''Main tests in API'''
import unittest
import pandas as pd
from model.charts.base import BaseChart
class BaseChartGetFillColorTest():
# class BaseChartGetFillColorTest(unittest.TestCase):
''' Test behaviours linked to getting the fill color from a color array '''
def test_get_fill_color(self):
''' Tests if an element from collor array is correctly fetched '''
options_from_yaml = {'chart_options': {'colorArray': ['red', 'yellow', 'green']}}
self.assertEqual(
BaseChart.get_fill_color(1, options_from_yaml),
'yellow'
)
class BaseChartLegendNamesTest():
# class BaseChartLegendNamesTest(unittest.TestCase):
''' Test behaviours linked to fetching legend names for series '''
def test_legend_names_no_options(self):
''' Tests if no legend names are returned when there's no option '''
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, None),
{}
)
def test_legend_names_no_chart_options(self):
''' Tests if no legend names are returned when there's no chart_options '''
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, {}),
{}
)
def test_legend_names_no_dataframe(self):
''' Tests if no legend names are returned when there's no dataframe '''
options_from_yaml = {'chart_options': {'id': 'idx'}}
self.assertEqual(
BaseChart.get_legend_names(None, options_from_yaml),
{}
)
def test_legend_names_empty_dataframe(self):
''' Tests if no legend names are returned when the dataframe is empty '''
options_from_yaml = {'chart_options': {'id': 'idx'}}
self.assertEqual(
BaseChart.get_legend_names(pd.DataFrame([]), options_from_yaml),
{}
)
def test_legend_names_no_id_field(self):
''' Tests if no legend names are returned when no ID field is given '''
options_from_yaml = {'chart_options': {'legend_field': 'lgnd'}}
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, options_from_yaml),
{}
)
def test_legend_names_no_label_field(self):
''' Tests if legend names are built from series ID in the dataframe '''
options_from_yaml = {'chart_options': {'id': 'idx'}}
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, options_from_yaml),
{'A': 'A', 'B': 'B'}
)
def test_legend_names(self):
''' Tests if legend names are built from series ID in the dataframe, with
a mirror legend name specified '''
options_from_yaml = {'chart_options': {'legend_field': 'lgnd', 'id': 'idx'}}
dataframe = pd.DataFrame([
{'idx': 'A', 'lgnd': 'A_lbl'},
{'idx': 'B', 'lgnd': 'B_lbl'}
])
self.assertEqual(
BaseChart.get_legend_names(dataframe, options_from_yaml),
{'A': 'A_lbl', 'B': 'B_lbl'}
)
class BaseChartTooltipTest():
# class BaseChartTooltipTest(unittest.TestCase):
''' Test behaviours linked to creating tooltip structure '''
def test_tooltip_no_option(self):
''' Tests if default tooltip is returned when no option is given '''
self.assertEqual(
BaseChart.build_tooltip(None),
'Tooltip!'
)
def test_tooltip_no_headers(self):
''' Tests if default tooltip is returned when no headers are given '''
self.assertEqual(
BaseChart.build_tooltip({}),
'Tooltip!'
)
def test_tooltip(self):
''' Tests if tooltips are built correctly '''
options_from_yaml = {'headers': [
{'text': 'Value A:', 'value': 'field_a'},
{'text': 'Value B:', 'value': 'field_b'}
]}
self.assertEqual(
BaseChart.build_tooltip(options_from_yaml),
'<table>'
'<tr style="text-align: left;">'
'<th style="padding: 4px; padding-right: 10px;">Value A:</th>'
'<td style="padding: 4px;">@field_a</td>'
'</tr>'
'<tr style="text-align: left;">'
'<th style="padding: 4px; padding-right: 10px;">Value B:</th>'
'<td style="padding: 4px;">@field_b</td>'
'</tr>'
'</table>'
)
| 38.789916 | 89 | 0.585789 | '''Main tests in API'''
import unittest
import pandas as pd
from model.charts.base import BaseChart
class BaseChartGetFillColorTest():
# class BaseChartGetFillColorTest(unittest.TestCase):
''' Test behaviours linked to getting the fill color from a color array '''
def test_get_fill_color(self):
''' Tests if an element from collor array is correctly fetched '''
options_from_yaml = {'chart_options': {'colorArray': ['red', 'yellow', 'green']}}
self.assertEqual(
BaseChart.get_fill_color(1, options_from_yaml),
'yellow'
)
class BaseChartLegendNamesTest():
# class BaseChartLegendNamesTest(unittest.TestCase):
''' Test behaviours linked to fetching legend names for series '''
def test_legend_names_no_options(self):
''' Tests if no legend names are returned when there's no option '''
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, None),
{}
)
def test_legend_names_no_chart_options(self):
''' Tests if no legend names are returned when there's no chart_options '''
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, {}),
{}
)
def test_legend_names_no_dataframe(self):
''' Tests if no legend names are returned when there's no dataframe '''
options_from_yaml = {'chart_options': {'id': 'idx'}}
self.assertEqual(
BaseChart.get_legend_names(None, options_from_yaml),
{}
)
def test_legend_names_empty_dataframe(self):
''' Tests if no legend names are returned when the dataframe is empty '''
options_from_yaml = {'chart_options': {'id': 'idx'}}
self.assertEqual(
BaseChart.get_legend_names(pd.DataFrame([]), options_from_yaml),
{}
)
def test_legend_names_no_id_field(self):
''' Tests if no legend names are returned when no ID field is given '''
options_from_yaml = {'chart_options': {'legend_field': 'lgnd'}}
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, options_from_yaml),
{}
)
def test_legend_names_no_label_field(self):
''' Tests if legend names are built from series ID in the dataframe '''
options_from_yaml = {'chart_options': {'id': 'idx'}}
dataframe = pd.DataFrame([{'idx': 'A'}, {'idx': 'B'}])
self.assertEqual(
BaseChart.get_legend_names(dataframe, options_from_yaml),
{'A': 'A', 'B': 'B'}
)
def test_legend_names(self):
''' Tests if legend names are built from series ID in the dataframe, with
a mirror legend name specified '''
options_from_yaml = {'chart_options': {'legend_field': 'lgnd', 'id': 'idx'}}
dataframe = pd.DataFrame([
{'idx': 'A', 'lgnd': 'A_lbl'},
{'idx': 'B', 'lgnd': 'B_lbl'}
])
self.assertEqual(
BaseChart.get_legend_names(dataframe, options_from_yaml),
{'A': 'A_lbl', 'B': 'B_lbl'}
)
class BaseChartTooltipTest():
# class BaseChartTooltipTest(unittest.TestCase):
''' Test behaviours linked to creating tooltip structure '''
def test_tooltip_no_option(self):
''' Tests if default tooltip is returned when no option is given '''
self.assertEqual(
BaseChart.build_tooltip(None),
'Tooltip!'
)
def test_tooltip_no_headers(self):
''' Tests if default tooltip is returned when no headers are given '''
self.assertEqual(
BaseChart.build_tooltip({}),
'Tooltip!'
)
def test_tooltip(self):
''' Tests if tooltips are built correctly '''
options_from_yaml = {'headers': [
{'text': 'Value A:', 'value': 'field_a'},
{'text': 'Value B:', 'value': 'field_b'}
]}
self.assertEqual(
BaseChart.build_tooltip(options_from_yaml),
'<table>'
'<tr style="text-align: left;">'
'<th style="padding: 4px; padding-right: 10px;">Value A:</th>'
'<td style="padding: 4px;">@field_a</td>'
'</tr>'
'<tr style="text-align: left;">'
'<th style="padding: 4px; padding-right: 10px;">Value B:</th>'
'<td style="padding: 4px;">@field_b</td>'
'</tr>'
'</table>'
)
| 0 | 0 | 0 |
5e84c564bdbb0fce6589d7f9791128a3f6d89b8b | 622 | py | Python | techblog/urls.py | keyurr2/tech-blog-portal | d5ba9d3793f62563bd47d38b9491f93f01dfb18c | [
"MIT"
] | null | null | null | techblog/urls.py | keyurr2/tech-blog-portal | d5ba9d3793f62563bd47d38b9491f93f01dfb18c | [
"MIT"
] | null | null | null | techblog/urls.py | keyurr2/tech-blog-portal | d5ba9d3793f62563bd47d38b9491f93f01dfb18c | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
import django
if django.VERSION >= (1, 8):
urlpatterns = [
url(r'^', include('blog.urls', namespace="blog")),
url(r'^admin/', include(admin.site.urls)),
url(r'^ckeditor/', include('libs.ckeditor_uploader.urls')),
]
else:
from django.conf.urls import patterns
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^', include('blog.urls', namespace="blog")),
url(r'^admin/', include(admin.site.urls)),
url(r'^ckeditor/', include('libs.ckeditor_uploader.urls')),
)
| 28.272727 | 67 | 0.617363 | from django.conf.urls import include, url
from django.contrib import admin
import django
if django.VERSION >= (1, 8):
urlpatterns = [
url(r'^', include('blog.urls', namespace="blog")),
url(r'^admin/', include(admin.site.urls)),
url(r'^ckeditor/', include('libs.ckeditor_uploader.urls')),
]
else:
from django.conf.urls import patterns
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^', include('blog.urls', namespace="blog")),
url(r'^admin/', include(admin.site.urls)),
url(r'^ckeditor/', include('libs.ckeditor_uploader.urls')),
)
| 0 | 0 | 0 |
8cbe3aea0d8868ad0de011142284d254291c3184 | 683 | py | Python | test/test_users.py | hichris1234/ChatExchange | d7c84e99221795653d1476a8498a08ceb891e11c | [
"Apache-2.0",
"MIT"
] | null | null | null | test/test_users.py | hichris1234/ChatExchange | d7c84e99221795653d1476a8498a08ceb891e11c | [
"Apache-2.0",
"MIT"
] | null | null | null | test/test_users.py | hichris1234/ChatExchange | d7c84e99221795653d1476a8498a08ceb891e11c | [
"Apache-2.0",
"MIT"
] | null | null | null | import chatexchange
import live_testing
if live_testing.enabled:
| 27.32 | 57 | 0.635432 | import chatexchange
import live_testing
if live_testing.enabled:
def test_user_info():
client = chatexchange.Client('stackexchange.com')
user = client.get_user(-2)
assert user.id == -2
assert not user.is_moderator
assert user.name == "StackExchange"
assert user.room_count >= 18
assert user.message_count >= 129810
assert user.reputation == -1
user = client.get_user(31768)
assert user.id == 31768
assert user.is_moderator
assert user.name == "ManishEarth"
assert user.room_count >= 222
assert user.message_count >= 89093
assert user.reputation > 115000
| 589 | 0 | 26 |
a387a5f5fcb08b149cb3a599dbc0c4b65d4795e9 | 481 | py | Python | Experiments/catherine_note.py | snoop2head/catherine_crawling | 0a4fa108c07bbe5241c19e000f9f5a3fd9e90f99 | [
"MIT"
] | null | null | null | Experiments/catherine_note.py | snoop2head/catherine_crawling | 0a4fa108c07bbe5241c19e000f9f5a3fd9e90f99 | [
"MIT"
] | 2 | 2021-04-30T21:06:26.000Z | 2021-06-02T00:57:47.000Z | Experiments/catherine_note.py | snoop2head/danawa.com_crawling | 0a4fa108c07bbe5241c19e000f9f5a3fd9e90f99 | [
"MIT"
] | null | null | null |
'''
for i in all_title:
individual_title = i.text
print(individual_title)
x +=1
sheet1.write(x+1,1,individual_title)
wb.save('냉장고2.xls')
'''
'''
for j in all_price:
individual_price = re.sub("[^0-9]", "", j.text)
print(individual_price)
x +=1
sheet1.write(x+1,2,individual_title)
wb.save('냉장고.xls')
b =[]
def remove_tag(content):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', content)
return cleantext
'''
| 15.516129 | 51 | 0.598753 |
'''
for i in all_title:
individual_title = i.text
print(individual_title)
x +=1
sheet1.write(x+1,1,individual_title)
wb.save('냉장고2.xls')
'''
'''
for j in all_price:
individual_price = re.sub("[^0-9]", "", j.text)
print(individual_price)
x +=1
sheet1.write(x+1,2,individual_title)
wb.save('냉장고.xls')
b =[]
def remove_tag(content):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', content)
return cleantext
'''
| 0 | 0 | 0 |
402929118183075de86df707320a63b02dcc341a | 25 | py | Python | test/fixtures/many_files/module2.py | kynikos/report-todo | 2810a1c84f4ae81c0eccf8480d180ea4c60e4f36 | [
"MIT"
] | 2 | 2020-05-29T21:17:49.000Z | 2021-08-12T15:03:07.000Z | test/fixtures/many_files/module2.py | kynikos/report-todo | 2810a1c84f4ae81c0eccf8480d180ea4c60e4f36 | [
"MIT"
] | 4 | 2021-03-10T10:23:45.000Z | 2021-09-21T07:58:31.000Z | test/fixtures/many_files/module2.py | kynikos/report-todo | 2810a1c84f4ae81c0eccf8480d180ea4c60e4f36 | [
"MIT"
] | null | null | null | sth = 0
# BUG: solve me
| 6.25 | 15 | 0.56 | sth = 0
# BUG: solve me
| 0 | 0 | 0 |
40a94dcc64cf288fe47b1c37866ff55ff468b72e | 5,080 | py | Python | activecampaign/ActiveCampaign.py | drewjaja/active-campaign-python | 07b07485776ceb893d95ff4bac67572f3c9b21a5 | [
"MIT"
] | 4 | 2018-03-20T15:21:49.000Z | 2018-10-10T20:52:47.000Z | activecampaign/ActiveCampaign.py | drewjaja/active-campaign-python | 07b07485776ceb893d95ff4bac67572f3c9b21a5 | [
"MIT"
] | 1 | 2018-03-20T17:54:22.000Z | 2018-03-20T19:58:01.000Z | activecampaign/ActiveCampaign.py | drewjaja/active-campaign-python | 07b07485776ceb893d95ff4bac67572f3c9b21a5 | [
"MIT"
] | 3 | 2018-03-20T14:47:43.000Z | 2018-07-17T06:56:27.000Z | from importlib import import_module
from .Connector import Connector
# formatters for making life easier, don't you want it that way?
fmt_params = '{}&api_action={}&api_output={}&{}'.format
fmt_noparams = '{}&api_action={}&api_output={}'.format
| 29.364162 | 78 | 0.556693 | from importlib import import_module
from .Connector import Connector
# formatters for making life easier, don't you want it that way?
fmt_params = '{}&api_action={}&api_output={}&{}'.format
fmt_noparams = '{}&api_action={}&api_output={}'.format
def get_mod(cls, parent):
source_module = import_module(".{}".format(cls), parent)
class1 = getattr(source_module, cls) # get Subscriber
return class1
class ActiveCampaign(Connector):
def __init__(self, url, api_key, api_user='', api_pass=''):
self.url = url
self.api_key = api_key
self.URL = url
self.APIKEY = api_key
super(ActiveCampaign, self).__init__(url, api_key, api_user, api_pass)
def api(self, path, post_data={}):
# IE: "contact/view"
components = path.split('/')
component = components[0]
if '?' in components[1]:
# query params appended to method
# IE: contact/edit?overwrite=0
method_arr = components[1].split('?')
method = method_arr[0]
params = method_arr[1]
else:
# just a method provided
# IE: "contact/view
if components[1]:
method = components[1]
params = ''
else:
return 'Invalid method.'
# adjustments
if component == 'branding':
# reserved word
component = 'design'
elif component == 'sync':
component = 'contact'
method = 'sync'
elif component == 'singlesignon':
component = 'auth'
# "contact" becomes "Contact"
class1 = '{}'.format(component.capitalize())
class1 = get_mod(class1, 'activecampaign')
class1 = class1(self.URL, self.APIKEY) # Subscriber()
if method == 'list':
# reserved word
method = 'list_'
if method in dir(class1):
if post_data:
return getattr(class1, method)(params, post_data)
else:
return getattr(class1, method)(params)
return None
# extra methods I created for shorthand scripting
# get
def _get_contact_by_id(self, cid=None):
"""Get a contact by ID
Arguments:
cid:str contact/subscriber ID
"""
response = self.api('contact/view?id={}'.format(cid))
return response
def _get_contact_by_email(self, email=None):
"""Get a contact by email
Arguments:
email:str contact/company Email
"""
response = self.api('contact/view?email={}'.format(email))
return response
# delete
def _delete_contact_by_id(self, cid=None):
"""Delete a contact/company
Arguments:
cid:str contact/susbscriber ID
"""
response = self.api('contact/delete?id={}'.format(cid))
return response
# create
def _create_contact(self, data=None):
"""Create a contact/company
Arguments:
data:dict proper definition of a contact dict
"""
response = self.api('contact/ad', post_data=data)
return response
# end contact section
# create
def _add_tags_by_id(self, cid=None, tag=None):
""" Add tags by id for company/contact
Arguments:
cid:str contact/susbscriber ID
tags:list(str) list of tags as strings
"""
response = self.api('contact/tag_add',
post_data={'id': cid, 'tags':tag})
return response
# delete
def _delete_tags_by_id(self, cid=None, tag=None):
""" Delete a tag by a contact/susbscriber ID
Arguments:
cid:str contact/susbscriber ID
tags:list(str) list of tags as strings
"""
response = self.api('contact/tag_remove',
post_data={'id': cid, 'tags':tag})
return response
def _add_note_by_id(self, cid=None, note=""):
""" Add a Note for a contact/company
Arguments:
cid:str contact/susbscriber ID
note:str a note
"""
data = {"id": cid, "note": note}
response = self.api('contact/note_add?id={}'.format(cid),
post_data=data)
return response
# delete
def _delete_note_by_id(self, nid=None):
""" Delete a note by a note ID
Arguments:
nid:str note ID to delete
"""
response = self.api('contact/note_delete?noteid={}'.format(nid))
return response
# end contact components (properties, tags...)
# list contacts
def _list_contacts(self):
"""List all contacts
"""
response = self.api('contact/list?ids=all')
return response
# list organizations, not very usefull but still
def _list_orgs(self):
"""List all organizations
"""
response = self.api('organization/list')
return response
| 1,763 | 3,023 | 46 |
4b3ca263799e0ba9ec9077cf644928083178fdfc | 2,067 | py | Python | 3. Least squares/least_squares_GD.py | haitaozhao/PRSL | c81d64d1d2968af8ba5f34ce0ecfed32007822f1 | [
"MIT"
] | 5 | 2022-02-27T08:35:44.000Z | 2022-03-12T07:53:53.000Z | 3. Least squares/least_squares_GD.py | haitaozhao/PRSL | c81d64d1d2968af8ba5f34ce0ecfed32007822f1 | [
"MIT"
] | null | null | null | 3. Least squares/least_squares_GD.py | haitaozhao/PRSL | c81d64d1d2968af8ba5f34ce0ecfed32007822f1 | [
"MIT"
] | null | null | null |
# 读入数据
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# 直接解法
a = np.ones(len(X_train))
XX_train = np.c_[a,X_train]
y_mytest = np.c_[np.ones(len(X_test)),X_test].dot(np.linalg.pinv(XX_train).dot(y_train.reshape(-1,1)))
# 计算直接解法 RMSE
rmse = np.sqrt(1/len(X_test)*np.sum((y_test.reshape(-1,1)-y_mytest)**2))
print('the rmse of least squares is %f \n' %rmse )
## -------------------梯度下降法----------------------------
# 使用 RMSProp
# 初始化参数 Beta 和 学习率 alpha,mu, vt
row,col = XX_train.shape
Beta = np.random.random([col,1])
vt = np.ones([col,1])
alpha = 0.5
mu = 0.9
# 梯度的负方向
delta_Beta = 2/row *( XX_train.T.dot(y_train.reshape(row,1)) - XX_train.T.dot(XX_train).dot(Beta))
# 更新Beta 运用RMSProp
new_vt = mu*vt + (1-mu)* delta_Beta**2
new_Beta = Beta + alpha * delta_Beta/(np.sqrt(new_vt)+np.spacing(1))
loss = []
for idx in range(100000):
tmp_loss = 1/row * np.linalg.norm(y_train.reshape(row,1)-XX_train.dot(Beta))**2
loss.append(tmp_loss)
if tmp_loss < 50:
print(idx)
break
else:
Beta = new_Beta
vt = new_vt
delta_Beta = 2/row *( XX_train.T.dot(y_train.reshape(row,1)) - XX_train.T.dot(XX_train).dot(Beta))
new_vt = mu*vt + (1-mu)* delta_Beta**2
new_Beta = Beta + alpha * delta_Beta/(np.sqrt(new_vt)+np.spacing(1))
## -------------------End----------------------------
# 打印 直接解法和迭代解法的解
print('The direct solution of Beta is: \n')
print(Beta)
print('\n\n')
print('The solution of Gradient Descent is: \n')
print(np.linalg.pinv(XX_train).dot(y_train).reshape(col,1))
print('\n')
y_mytest_GD = np.c_[np.ones(len(X_test)),X_test].dot(Beta)
rmse_GD = np.sqrt(1/len(X_test)*np.sum((y_test.reshape(-1,1)-y_mytest_GD.reshape(-1,1))**2))
print('the rmse of least squares with Gradient Descent is %f' %rmse_GD)
| 28.315068 | 106 | 0.651185 |
# 读入数据
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# 直接解法
a = np.ones(len(X_train))
XX_train = np.c_[a,X_train]
y_mytest = np.c_[np.ones(len(X_test)),X_test].dot(np.linalg.pinv(XX_train).dot(y_train.reshape(-1,1)))
# 计算直接解法 RMSE
rmse = np.sqrt(1/len(X_test)*np.sum((y_test.reshape(-1,1)-y_mytest)**2))
print('the rmse of least squares is %f \n' %rmse )
## -------------------梯度下降法----------------------------
# 使用 RMSProp
# 初始化参数 Beta 和 学习率 alpha,mu, vt
row,col = XX_train.shape
Beta = np.random.random([col,1])
vt = np.ones([col,1])
alpha = 0.5
mu = 0.9
# 梯度的负方向
delta_Beta = 2/row *( XX_train.T.dot(y_train.reshape(row,1)) - XX_train.T.dot(XX_train).dot(Beta))
# 更新Beta 运用RMSProp
new_vt = mu*vt + (1-mu)* delta_Beta**2
new_Beta = Beta + alpha * delta_Beta/(np.sqrt(new_vt)+np.spacing(1))
loss = []
for idx in range(100000):
tmp_loss = 1/row * np.linalg.norm(y_train.reshape(row,1)-XX_train.dot(Beta))**2
loss.append(tmp_loss)
if tmp_loss < 50:
print(idx)
break
else:
Beta = new_Beta
vt = new_vt
delta_Beta = 2/row *( XX_train.T.dot(y_train.reshape(row,1)) - XX_train.T.dot(XX_train).dot(Beta))
new_vt = mu*vt + (1-mu)* delta_Beta**2
new_Beta = Beta + alpha * delta_Beta/(np.sqrt(new_vt)+np.spacing(1))
## -------------------End----------------------------
# 打印 直接解法和迭代解法的解
print('The direct solution of Beta is: \n')
print(Beta)
print('\n\n')
print('The solution of Gradient Descent is: \n')
print(np.linalg.pinv(XX_train).dot(y_train).reshape(col,1))
print('\n')
y_mytest_GD = np.c_[np.ones(len(X_test)),X_test].dot(Beta)
rmse_GD = np.sqrt(1/len(X_test)*np.sum((y_test.reshape(-1,1)-y_mytest_GD.reshape(-1,1))**2))
print('the rmse of least squares with Gradient Descent is %f' %rmse_GD)
| 0 | 0 | 0 |
3f919b8542ac0c22e1b88fed90caadc99ba43fde | 15,016 | py | Python | solutions/3rd-place/tasks/landmark_detector.py | henriquesimoes/humpback | ba687a71f95ef9c9c30426eefae11a69efd6f942 | [
"BSD-3-Clause"
] | 167 | 2019-03-08T03:34:48.000Z | 2022-03-24T06:01:17.000Z | tasks/landmark_detector.py | zhenlan0426/kaggle-humpback | c975332a99bec9c2485fea17c831f52f9a77736f | [
"BSD-2-Clause"
] | 2 | 2019-03-17T07:38:26.000Z | 2020-08-24T11:20:46.000Z | tasks/landmark_detector.py | zhenlan0426/kaggle-humpback | c975332a99bec9c2485fea17c831f52f9a77736f | [
"BSD-2-Clause"
] | 50 | 2019-03-09T00:16:56.000Z | 2022-03-27T11:38:48.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import cv2
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import get_model
from losses import get_loss
from transforms import from_norm_bgr
if __name__ == '__main__':
import cProfile
| 41.027322 | 102 | 0.561068 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import cv2
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import get_model
from losses import get_loss
from transforms import from_norm_bgr
class LandmarkDetector(object):
# (X, Y, W, H)
SCALE = [0.5, 1.0, 2.0]
RATIO = [[0.30, 0.30],
[0.60, 0.15],
[0.15, 0.60]]
NUM_OUTPUTS = 1+4+2*4
def __init__(self, config):
self.anchors = [np.array(LandmarkDetector.RATIO) * s for s in LandmarkDetector.SCALE]
self.anchors = np.concatenate(self.anchors, axis=0)
assert self.anchors.shape == (len(LandmarkDetector.SCALE) * len(LandmarkDetector.RATIO), 2)
self.feature_size = config.model.params.feature_size
self.num_anchors = len(LandmarkDetector.SCALE) * len(LandmarkDetector.RATIO)
num_outputs = LandmarkDetector.NUM_OUTPUTS
self.model = get_model(config, num_outputs=num_outputs)
self.model.avgpool = nn.AdaptiveAvgPool2d(self.feature_size)
in_features = self.model.last_linear.in_features
self.model.last_linear = nn.Conv2d(in_channels=in_features,
out_channels=len(self.anchors)*num_outputs,
kernel_size=1)
def logits(self, features):
x = self.avgpool(features)
x = self.last_linear(x)
return x
self.model.logits = types.MethodType(logits, self.model)
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model = self.model.cuda()
self.preprocess_opt = {'mean': self.model.mean,
'std': self.model.std,
'input_range': self.model.input_range,
'input_space': self.model.input_space}
self.criterion = get_loss(config)
self.cls_criterion = F.binary_cross_entropy_with_logits
def get_model(self):
return self.model
def get_preprocess_opt(self):
return self.preprocess_opt
def forward(self, images, labels=None, **_):
return self.model(images)
def inference(self, images=None, outputs=None, labels=None, **_):
if outputs is None:
assert images is not None
outputs = self.model(images)
num_outputs = LandmarkDetector.NUM_OUTPUTS
outputs = outputs.view(-1,num_outputs,self.num_anchors,self.feature_size,self.feature_size)
anchors = self._get_anchors()
B,C,A,H,W = outputs.size()
outputs = outputs.view(B,C,A*H*W)
anchors = torch.stack([anchors]*B, dim=0)
anchors = anchors.view(B,-1,A*H*W)
scores, indices = torch.max(outputs[:,0], dim=1)
outputs = outputs[torch.arange(B), :, indices]
anchors = anchors[torch.arange(B), :, indices]
boxes = self._targets_to_boxes(outputs[:,1:5], anchors)
landmarks = self._targets_to_landmarks(outputs[:,5:], anchors)
probabilities = F.sigmoid(scores)
return {'boxes': boxes, 'landmarks': landmarks, 'probabilities': probabilities}
def _get_anchors(self):
anchors = []
denom = self.feature_size*2
for y in np.arange(1/denom, 1.0, 2/denom):
for x in np.arange(1/denom, 1.0, 2/denom):
for w, h in self.anchors:
anchors.append([x, y, w, h])
# row x column x num_anchors x 4
# 5 x 5 x 9 x 4
anchors = np.array(anchors).reshape((self.feature_size,self.feature_size,self.num_anchors,4))
# row x column x num_anchors x 4 => 4 x num_anchors x row x col
anchors = np.transpose(anchors, (3,2,0,1))
anchors = torch.FloatTensor(anchors).cuda()
assert anchors.size() == (4,self.num_anchors,self.feature_size,self.feature_size)
return anchors
def loss(self, outputs, labels, **_):
num_outputs = LandmarkDetector.NUM_OUTPUTS
outputs = outputs.view(-1,num_outputs,self.num_anchors,self.feature_size,self.feature_size)
anchors = self._get_anchors()
output_boxes = self._targets_to_boxes(outputs[:,1:5], anchors.unsqueeze(0))
output_landmarks = self._targets_to_landmarks(outputs[:,5:], anchors.unsqueeze(0))
box_targets = self._boxes_to_targets(labels[:,:4], anchors)
landmark_targets = self._landmarks_to_targets(labels[:,4:], anchors)
cls_targets, target_on_off = self._get_cls_targets(labels, anchors.unsqueeze(0))
assert cls_targets.size() == target_on_off.size()
assert cls_targets.size() == outputs[:,:1].size()
outputs = outputs * target_on_off
loss_box = self.criterion(outputs[:,1:5], box_targets)
loss_landmark = self.criterion(outputs[:,5:], landmark_targets)
loss_cls = self.cls_criterion(outputs[:,:1], cls_targets)
return (loss_box + loss_landmark) * 5 + loss_cls * 0.5
def metrics(self, boxes, landmarks, probabilities, labels, **_):
iou = torch.mean(self._get_iou(boxes, labels[:,:4])).item()
l2 = torch.mean(torch.sqrt(torch.sum(torch.pow(landmarks - labels[:,4:], 2), dim=1)))
return {'score': iou, 'iou': iou, 'l2': l2}
def annotate_to_images(self, images, labels, predicts, **_):
assert images.dim() == 4
assert labels.dim() == 2
boxes = predicts['boxes']
landmarks = predicts['landmarks']
probabilities = predicts['probabilities']
ious = self._get_iou(boxes, labels[:,:4])
iou_1, indices_1 = torch.topk(ious, 2, largest=False)
iou_2, indices_2 = torch.topk(ious, 2, largest=True)
indices = torch.cat([indices_1, indices_2], dim=0)
images = images.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
boxes = boxes.detach().cpu().numpy()
landmarks = landmarks.detach().cpu().numpy()
probabilities = probabilities.detach().cpu().numpy()
ious = ious.detach().cpu().numpy()
indices = indices.detach().cpu().numpy()
images = images[indices]
labels = labels[indices]
boxes = boxes[indices]
landmarks = landmarks[indices]
probabilities = probabilities[indices]
ious = ious[indices]
annotated_images = []
for item in zip(images, labels, boxes, landmarks, probabilities, ious):
image, label, box, landmark, probability, iou = item
if image.shape[0] == 3:
image = np.transpose(image, [1,2,0])
H, W, _ = image.shape
label = label * [W,H,W,H, W,H,W,H,W,H,W,H]
label = label.astype(np.int32)
box = box * [W,H,W,H]
box = box.astype(np.int32)
landmark = landmark * [W,H,W,H,W,H,W,H]
landmark = landmark.astype(np.int32)
label_box_x1 = int(label[0] - label[2] / 2)
label_box_y1 = int(label[1] - label[3] / 2)
label_box_x2 = int(label[0] + label[2] / 2)
label_box_y2 = int(label[1] + label[3] / 2)
predict_box_x1 = int(box[0] - box[2] / 2)
predict_box_y1 = int(box[1] - box[3] / 2)
predict_box_x2 = int(box[0] + box[2] / 2)
predict_box_y2 = int(box[1] + box[3] / 2)
label_landmarks = [(int(label[4]), int(label[5])),
(int(label[6]), int(label[7])),
(int(label[8]), int(label[9])),
(int(label[10]), int(label[11]))]
predict_landmarks = [(int(landmark[0]), int(landmark[1])),
(int(landmark[2]), int(landmark[3])),
(int(landmark[4]), int(landmark[5])),
(int(landmark[6]), int(landmark[7]))]
image = from_norm_bgr(image, **self.preprocess_opt)
image = image.astype('uint8')
image = image.copy()
cv2.rectangle(image,
(label_box_x1, label_box_y1), (label_box_x2, label_box_y2),
(0,0,255), thickness=3)
cv2.rectangle(image,
(predict_box_x1, predict_box_y1), (predict_box_x2, predict_box_y2),
(255,0,0), thickness=3)
for i, (x, y) in enumerate(label_landmarks):
if i == 0:
cv2.circle(image, (x,y), 4, (0,255,0), thickness=-1)
elif i == 2:
cv2.circle(image, (x,y), 4, (0,0,255), thickness=-1)
else:
cv2.circle(image, (x,y), 4, (0,255,255), thickness=-1)
for x, y in predict_landmarks:
cv2.circle(image, (x,y), 4, (255,0,0), thickness=-1)
image = image.copy()
cv2.putText(image, '{:.04f}, {:.04f}'.format(iou, probability),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 0, 0), lineType=cv2.LINE_AA)
image = np.array(image)
image = np.transpose(image, [2,0,1])
annotated_images.append(image)
return annotated_images
def to_dataframe(self, key_list, boxes, probabilities):
print(len(key_list), len(boxes), len(probabilities))
print(key_list[0])
print(boxes[0])
print(probabilities[0])
records = []
for key, box, probability in zip(key_list, boxes, probabilities):
x, y, w, h = box
records.append((key, x, y, w, h, probability))
df = pd.DataFrame.from_records(
records, columns=['key', 'x', 'y', 'w', 'h', 'probability'])
df = df.set_index('key')
return df
def _get_cls_targets(self, labels, anchors):
# assert labels.size() == anchors.size()[:2], '{} vs {}'.format(labels.size(), anchors.size())
B, _ = labels.size()
ious = torch.zeros((labels.size(0), anchors.size(2), anchors.size(3), anchors.size(4))).cuda()
for i in range(anchors.size(2)):
for y in range(anchors.size(3)):
for x in range(anchors.size(4)):
ious[:,i,y,x] = self._get_iou(labels, anchors[:,:,i,y,x])
# ious = (B,9,9,9)
ious_max, _ = torch.max(ious, dim=1, keepdim=False)
# ious_max: (B,9,9)
ious_max = ious_max.view(B, -1)
_, ious_max_indices = torch.max(ious_max, dim=1, keepdim=False)
# targets: 1 if ious > 0.75 else 0
# ious Bx1x9x9
targets = torch.zeros_like(ious)
on_off = torch.zeros_like(ious)
thres_pos = 0.75
thres_neg = 0.40
targets[ious > thres_pos] = 1.0
on_off[ious > thres_pos] = 1.0
on_off[ious < thres_neg] = 1.0
targets = targets.float()
on_off = on_off.float()
return (targets.view(labels.size(0),1,anchors.size(2),anchors.size(3),anchors.size(4)),
on_off.view(labels.size(0),1,anchors.size(2),anchors.size(3),anchors.size(4)))
def _boxes_to_targets(self, boxes, anchors):
if len(boxes.size()) == 2:
assert boxes.size(1) == anchors.size(0)
boxes = boxes.view(boxes.size(0), boxes.size(1), 1, 1, 1)
tx = (boxes[:,0,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:]
ty = (boxes[:,1,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:]
tw = torch.log(boxes[:,2,:,:,:] / anchors[2,:,:,:])
th = torch.log(boxes[:,3,:,:,:] / anchors[3,:,:,:])
return torch.stack([tx,ty,tw,th], dim=1)
def _targets_to_boxes(self, targets, anchors):
x = anchors[:,2] * targets[:,0] + anchors[:,0]
y = anchors[:,3] * targets[:,1] + anchors[:,1]
w = anchors[:,2] * torch.exp(targets[:,2])
h = anchors[:,3] * torch.exp(targets[:,3])
return torch.stack([x,y,w,h], dim=1)
def _landmarks_to_targets(self, landmarks, anchors):
if len(landmarks.size()) == 2:
assert landmarks.size(1) == 8
landmarks = landmarks.view(landmarks.size(0), landmarks.size(1), 1, 1, 1)
points = [
(landmarks[:,0,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,1,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:],
(landmarks[:,2,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,3,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:],
(landmarks[:,4,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,5,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:],
(landmarks[:,6,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,7,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:]]
return torch.stack(points, dim=1)
def _targets_to_landmarks(self, targets, anchors):
points = [
anchors[:,2] * targets[:,0] + anchors[:,0],
anchors[:,3] * targets[:,1] + anchors[:,1],
anchors[:,2] * targets[:,2] + anchors[:,0],
anchors[:,3] * targets[:,3] + anchors[:,1],
anchors[:,2] * targets[:,4] + anchors[:,0],
anchors[:,3] * targets[:,5] + anchors[:,1],
anchors[:,2] * targets[:,6] + anchors[:,0],
anchors[:,3] * targets[:,7] + anchors[:,1]]
return torch.stack(points, dim=1)
def _get_iou(self, coords_a, coords_b):
def clamp(v):
return torch.clamp(v, min=0.0, max=1.0)
area_a = coords_a[:,2] * coords_b[:,3]
area_b = coords_b[:,2] * coords_b[:,3]
left_tops_x_a = clamp(coords_a[:,0] - coords_a[:,2] / 2)
left_tops_y_a = clamp(coords_a[:,1] - coords_a[:,3] / 2)
right_bottoms_x_a = clamp(coords_a[:,0] + coords_a[:,2] / 2)
right_bottoms_y_a = clamp(coords_a[:,1] + coords_a[:,3] / 2)
left_tops_x_b = clamp(coords_b[:,0] - coords_b[:,2] / 2)
left_tops_y_b = clamp(coords_b[:,1] - coords_b[:,3] / 2)
right_bottoms_x_b = clamp(coords_b[:,0] + coords_b[:,2] / 2)
right_bottoms_y_b = clamp(coords_b[:,1] + coords_b[:,3] / 2)
left_tops_x = torch.max(left_tops_x_a, left_tops_x_b)
left_tops_y = torch.max(left_tops_y_a, left_tops_y_b)
right_bottoms_x = torch.min(right_bottoms_x_a, right_bottoms_x_b)
right_bottoms_y = torch.min(right_bottoms_y_a, right_bottoms_y_b)
width = clamp(right_bottoms_x - left_tops_x)
height = clamp(right_bottoms_y - left_tops_y)
intersection = width * height
return intersection / (area_a + area_b - intersection)
def main():
print('main')
from utils.config import _get_default_config
config = _get_default_config()
config.model.params.num_outputs = 4
config.loss.name = 'mse_loss'
box_regressor = LandmarkDetector(config)
if __name__ == '__main__':
import cProfile
| 13,984 | 598 | 46 |
28de5bd400219bc8561a85b91908150136994bc2 | 1,702 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Attributes/Propellants/Jet_A.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Attributes/Propellants/Jet_A.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Attributes/Propellants/Jet_A.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | ## @ingroup Attributes-Propellants
#Jet A
#
# Created: Unk 2013, SUAVE TEAM
# Modified: Feb 2016, M. Vegh
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from .Propellant import Propellant
from SUAVE.Core import Data
# ----------------------------------------------------------------------
# Jet_A Propellant Class
# ----------------------------------------------------------------------
## @ingroup Attributes-Propellants
class Jet_A(Propellant):
"""Holds values for this propellant
Assumptions:
None
Source:
None
"""
def __defaults__(self):
"""This sets the default values.
Assumptions:
None
Source:
Values commonly available
Inputs:
None
Outputs:
None
Properties Used:
None
"""
self.tag = 'Jet_A'
self.reactant = 'O2'
self.density = 820.0 # kg/m^3 (15 C, 1 atm)
self.specific_energy = 43.02e6 # J/kg
self.energy_density = 35276.4e6 # J/m^3
self.max_mass_fraction = Data({'Air' : 0.0633,'O2' : 0.3022}) # kg propellant / kg oxidizer
# critical temperatures
self.temperatures.flash = 311.15 # K
self.temperatures.autoignition = 483.15 # K
self.temperatures.freeze = 233.15 # K
self.temperatures.boiling = 0.0 # K | 29.859649 | 109 | 0.40423 | ## @ingroup Attributes-Propellants
#Jet A
#
# Created: Unk 2013, SUAVE TEAM
# Modified: Feb 2016, M. Vegh
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from .Propellant import Propellant
from SUAVE.Core import Data
# ----------------------------------------------------------------------
# Jet_A Propellant Class
# ----------------------------------------------------------------------
## @ingroup Attributes-Propellants
class Jet_A(Propellant):
"""Holds values for this propellant
Assumptions:
None
Source:
None
"""
def __defaults__(self):
"""This sets the default values.
Assumptions:
None
Source:
Values commonly available
Inputs:
None
Outputs:
None
Properties Used:
None
"""
self.tag = 'Jet_A'
self.reactant = 'O2'
self.density = 820.0 # kg/m^3 (15 C, 1 atm)
self.specific_energy = 43.02e6 # J/kg
self.energy_density = 35276.4e6 # J/m^3
self.max_mass_fraction = Data({'Air' : 0.0633,'O2' : 0.3022}) # kg propellant / kg oxidizer
# critical temperatures
self.temperatures.flash = 311.15 # K
self.temperatures.autoignition = 483.15 # K
self.temperatures.freeze = 233.15 # K
self.temperatures.boiling = 0.0 # K | 0 | 0 | 0 |
16debd4d29462d434b807ba86b378c378d06005d | 4,253 | py | Python | classo/tests/test_stability_selection.py | muellsen/classo | d86ddeb3fe3fd00b955340fbdf9bfd802b64f566 | [
"MIT"
] | 20 | 2020-10-01T08:18:08.000Z | 2021-07-30T09:21:23.000Z | classo/tests/test_stability_selection.py | muellsen/classo | d86ddeb3fe3fd00b955340fbdf9bfd802b64f566 | [
"MIT"
] | 14 | 2020-11-12T14:39:20.000Z | 2021-01-06T15:59:14.000Z | classo/tests/test_stability_selection.py | muellsen/classo | d86ddeb3fe3fd00b955340fbdf9bfd802b64f566 | [
"MIT"
] | 5 | 2020-09-27T20:22:01.000Z | 2021-01-17T18:41:50.000Z | """
done
"""
import numpy as np
from ..misc_functions import random_data
from ..stability_selection import (
stability,
biggest_indexes,
selected_param,
build_submatrix,
build_subset,
)
| 21.265 | 87 | 0.565013 | """
done
"""
import numpy as np
from ..misc_functions import random_data
from ..stability_selection import (
stability,
biggest_indexes,
selected_param,
build_submatrix,
build_subset,
)
def test_biggest_indexes_empty():
value = np.array([], dtype=float)
exp = []
result = biggest_indexes(value, 0)
assert set(exp) == set(result)
assert len(exp) == len(result)
def test_biggest_indexes_empty_big_q():
value = np.array([], dtype=float)
exp = []
result = biggest_indexes(value, 4)
assert set(exp) == set(result)
assert len(exp) == len(result)
def test_biggest_indexes_len_less_than_q():
value = np.array([4, -1, 2], dtype=float)
exp = [0, 1, 2]
result = biggest_indexes(value, 4)
assert set(exp) == set(result)
assert len(exp) == len(result)
def test_biggest_indexes_len_more_than_q():
value = np.array([4, -1, 2, 0, -6, 10], dtype=float)
exp = [5, 4, 0]
result = biggest_indexes(value, 3)
assert set(exp) == set(result)
assert len(exp) == len(result)
def test_biggest_indexes_negative_q():
value = np.array([1, 2, 3], dtype=float)
exp = []
result = biggest_indexes(value, -5)
assert set(exp) == set(result)
assert len(exp) == len(result)
def test_build_subset():
value_n = 20
value_nS = 5
result = build_subset(value_n, value_nS)
assert len(result) == value_nS
for i in result:
assert i in range(value_n)
def test_build_submatrix():
matrix = (np.array([[1, 3, 2], [5, 6, -2]]), np.array([3, 6, 0]), np.array([1, 2]))
subset = np.array([1])
result = build_submatrix(matrix, subset)
exp = (np.array([[5, 6, -2]]), np.array([3, 6, 0]), np.array([2]))
assert np.all(result[0] == exp[0])
assert np.all(result[1] == exp[1])
assert np.all(result[2] == exp[2])
def test_selected_param():
distribution = np.array([2.0, 0.1, 7.0, 14])
threshold = 10.0
threshold_label = 0.3
result1, result2 = selected_param(distribution, threshold, threshold_label)
exp1 = np.array([False, False, False, True])
exp2 = np.array([True, False, True, True])
assert np.all(result1 == exp1)
assert np.all(result2 == exp2)
def test_stability_lam_R1_parameters_independance_and_seed_dependance():
A = np.ones((10, 30)) + np.arange(-15, 15) + np.arange(-5, 5)[:, np.newaxis]
C = np.zeros((2, 30))
y = np.arange(10)
matrix = (A, C, y)
result1 = stability(
matrix,
StabSelmethod="lam",
q=3,
B=20,
lam=0.01,
percent_nS=0.2,
formulation="R1",
seed=14,
rho=6.7,
rho_classification=-26.0,
true_lam=False,
e=24.0,
)
result2 = stability(
matrix,
StabSelmethod="lam",
q=3,
B=20,
lam=0.01,
percent_nS=0.2,
formulation="R1",
seed=14,
rho=1.2345,
rho_classification=-3.0,
true_lam=False,
e=3.0,
)
print(result1)
print(result2)
assert np.all(result1 == result2)
def test_stability_max_R2_between_0_and_1():
A = np.ones((10, 30)) + np.arange(-15, 15) + np.arange(-5, 5)[:, np.newaxis]
C = np.zeros((2, 30))
y = np.arange(10)
matrix = (A, C, y)
result = stability(
matrix,
StabSelmethod="max",
numerical_method="DR",
q=3,
B=20,
percent_nS=0.2,
formulation="R2",
seed=24,
rho=1.5,
rho_classification=-26.0,
true_lam=True,
e=24.0,
)
assert np.all(result <= 1.0)
assert np.all(result >= 0.0)
def test_stability_first_C1_not_too_high_distribution():
A = np.ones((25, 30))
A = A + np.arange(-15, 15)
A = A + np.arange(-10, 15)[:, np.newaxis]
C = np.zeros((2, 30))
y = np.array([1, -1, 1, 1, -1] * 5)
matrix = (A, C, y)
q = 5
result, _, _ = stability(
matrix,
StabSelmethod="first",
numerical_method="P-PDS",
q=q,
B=10,
percent_nS=0.4,
formulation="C1",
seed=24,
rho=6.7,
rho_classification=-26.0,
true_lam=True,
e=24.0,
)
assert np.sum(result) <= q
| 3,780 | 0 | 253 |
414b8a5e4144f82f23e5fd6e14cc032c37be389b | 1,226 | py | Python | plantuml2freemind/generators/plantuml.py | teners/plantuml2freemind | 9b2235315f68830aa06c96913b33729e3876878a | [
"MIT"
] | 6 | 2019-11-03T09:08:47.000Z | 2021-02-17T18:23:40.000Z | plantuml2freemind/generators/plantuml.py | teners/plantuml2freemind | 9b2235315f68830aa06c96913b33729e3876878a | [
"MIT"
] | 20 | 2019-10-26T10:02:38.000Z | 2021-01-18T19:00:39.000Z | plantuml2freemind/generators/plantuml.py | teners/plantuml2freemind | 9b2235315f68830aa06c96913b33729e3876878a | [
"MIT"
] | 4 | 2019-11-03T15:15:22.000Z | 2021-08-13T09:49:52.000Z | from typing import Union
from plantuml2freemind.custom_types import ChildNode, RootNode
AnyNode = Union[RootNode, ChildNode]
def convert_tree_into_puml(root: RootNode) -> str:
"""Generate PlantUML mindmap from tree."""
puml = generate_puml_node(root, 1)
if root.right:
puml = concat_pumls(puml, generate_branch(root.right, 2))
puml += f"\nleft side"
if root.left:
puml = concat_pumls(puml, generate_branch(root.left, 2))
return "\n".join(("@startmindmap", puml, "@endmindmap"))
| 26.652174 | 68 | 0.676998 | from typing import Union
from plantuml2freemind.custom_types import ChildNode, RootNode
AnyNode = Union[RootNode, ChildNode]
def entry(tree: RootNode) -> str:
return convert_tree_into_puml(root=tree)
def concat_pumls(lhs: str, rhs: str):
return f"{lhs}\n{rhs}"
def generate_markdown(node: AnyNode) -> str:
return f"[[{node.link} {node.text}]]"
def generate_puml_node(node: AnyNode, level: int) -> str:
prefix = "*" * level
text = node.text if not node.link else generate_markdown(node)
style = "_" if node.style == "fork" else ""
return f"{prefix}{style} {text}"
def generate_branch(subtree: ChildNode, level) -> str:
node = generate_puml_node(subtree, level)
for child in subtree.children:
node = concat_pumls(node, generate_branch(child, level + 1))
return node
def convert_tree_into_puml(root: RootNode) -> str:
"""Generate PlantUML mindmap from tree."""
puml = generate_puml_node(root, 1)
if root.right:
puml = concat_pumls(puml, generate_branch(root.right, 2))
puml += f"\nleft side"
if root.left:
puml = concat_pumls(puml, generate_branch(root.left, 2))
return "\n".join(("@startmindmap", puml, "@endmindmap"))
| 577 | 0 | 115 |
a6e309d759882ae214533e71454929bcf33ff790 | 1,308 | py | Python | scregmin/market/base.py | IBM/supply-chain-regret-minimization | 2f08612e0e7d91a3b890cc5e4f4ee6f2df36fe97 | [
"Apache-2.0"
] | 1 | 2021-09-23T10:14:37.000Z | 2021-09-23T10:14:37.000Z | scregmin/market/base.py | IBM/supply-chain-regret-minimization | 2f08612e0e7d91a3b890cc5e4f4ee6f2df36fe97 | [
"Apache-2.0"
] | null | null | null | scregmin/market/base.py | IBM/supply-chain-regret-minimization | 2f08612e0e7d91a3b890cc5e4f4ee6f2df36fe97 | [
"Apache-2.0"
] | 1 | 2022-02-16T17:58:49.000Z | 2022-02-16T17:58:49.000Z | #
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
from typing import Optional, Callable
import numpy as np
from abc import ABC, abstractmethod
| 23.781818 | 66 | 0.647554 | #
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
from typing import Optional, Callable
import numpy as np
from abc import ABC, abstractmethod
class BaseMarket(ABC):
def __init__(self, seed: Optional[int] = None):
if seed is None:
seed = sum([ord(s) for s in "market"])
self.seed = seed
self.reset_rng()
def reset_rng(self, seed: Optional[int] = None):
if seed is None:
self.rng = np.random.RandomState(self.seed)
else:
self.rng = np.random.RandomState(seed)
@abstractmethod
def act(self, retail_price: float) -> float:
raise NotImplementedError
class RandomMarket(BaseMarket):
def act(self, retail_price: float) -> float:
return self.rng.random()
class ConstantMarket(BaseMarket):
def __init__(self, demand: float):
self.demand = demand
super().__init__()
def act(self, retail_price: float) -> float:
return self.demand
class DeterministicMarket(BaseMarket):
def __init__(self, demand_function: Callable[[float], float]):
self.demand_function = demand_function
super().__init__()
def act(self, retail_price: float) -> float:
return self.demand_function(retail_price)
| 747 | 149 | 227 |
5d24f0ed413a689ffc32f747c1458d00713d4d16 | 1,645 | py | Python | name_normalization.py | datatemplar/name_normalization | d5e30fda092ecb23d8e50f8f19ac794d583b6ffb | [
"MIT"
] | null | null | null | name_normalization.py | datatemplar/name_normalization | d5e30fda092ecb23d8e50f8f19ac794d583b6ffb | [
"MIT"
] | null | null | null | name_normalization.py | datatemplar/name_normalization | d5e30fda092ecb23d8e50f8f19ac794d583b6ffb | [
"MIT"
] | null | null | null | ## Created the 05/03/19
## By Data Templar
##
## The purpose of this script is to remove all space in all files
## and folders' name. So we can share the "server url" directly
try:
from tqdm import tqdm
tqdmimport = True
except ImportError as e:
tqdmimport = False
print("enable to import the progress bar value. Please install tqdm if you want a progress bar")
import argparse, sys, re, os, time
# Argument creation
parser = argparse.ArgumentParser()
parser.add_argument("--begin", help="The folder you want to make a modification by default its the current folder", default=os.getcwd())
args = parser.parse_args()
root_dir = args.begin
print(root_dir)
with open(root_dir+"\log.csv",'w',encoding='utf-8') as target:
scanRecurse(root_dir,target)
| 19.583333 | 136 | 0.64924 | ## Created the 05/03/19
## By Data Templar
##
## The purpose of this script is to remove all space in all files
## and folders' name. So we can share the "server url" directly
try:
from tqdm import tqdm
tqdmimport = True
except ImportError as e:
tqdmimport = False
print("enable to import the progress bar value. Please install tqdm if you want a progress bar")
import argparse, sys, re, os, time
# Argument creation
parser = argparse.ArgumentParser()
parser.add_argument("--begin", help="The folder you want to make a modification by default its the current folder", default=os.getcwd())
args = parser.parse_args()
root_dir = args.begin
print(root_dir)
def removeescape(dir,file,log):
filename = file
filename = filename.replace(" -","-")
filename = filename.replace("- ","-")
filename = filename.replace(" _","_")
filename = filename.replace("_ ","_")
filename = filename.replace(" ","_")
#print("Transformation of %s in %s" % (file,filename))
try:
os.rename(os.path.join(dir,file),os.path.join(dir,filename))
except Exception as e:
print("The file %s is open and can't be modified" % (file))
print("The %s is open and can't be modified,%s,%s" % (file,os.path.join(dir,filename),e),file=log)
def scanRecurse(dir,log):
for entry in os.scandir(dir):
if entry.is_file():
removeescape(dir,entry.name,log)
else:
scanRecurse(entry.path,log)
removeescape(dir,entry.name,log)
with open(root_dir+"\log.csv",'w',encoding='utf-8') as target:
scanRecurse(root_dir,target)
| 806 | 0 | 46 |
a717e02a010955c24e30086ae9f1b8f5f03bb753 | 3,780 | py | Python | turbo-codes/src/channelcoding/interleavers.py | tripods-xai/isit-2022 | 024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd | [
"MIT"
] | 1 | 2022-02-23T14:59:14.000Z | 2022-02-23T14:59:14.000Z | turbo-codes/src/channelcoding/interleavers.py | tripods-xai/isit-2022 | 024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd | [
"MIT"
] | null | null | null | turbo-codes/src/channelcoding/interleavers.py | tripods-xai/isit-2022 | 024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd | [
"MIT"
] | null | null | null | import abc
import tensorflow as tf
from tensor_annotations import tensorflow as ttf
from tensor_annotations import axes
from src.channelcoding.dataclasses import FixedPermuteInterleaverSettings, RandomPermuteInterleaverSettings
from .codes import Code
from .types import Batch, Time, Channels
| 36.346154 | 132 | 0.680159 | import abc
import tensorflow as tf
from tensor_annotations import tensorflow as ttf
from tensor_annotations import axes
from src.channelcoding.dataclasses import FixedPermuteInterleaverSettings, RandomPermuteInterleaverSettings
from .codes import Code
from .types import Batch, Time, Channels
class Interleaver(Code):
@abc.abstractmethod
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
pass
def reset(self):
pass
class FixedPermuteInterleaver(Interleaver):
def __init__(self, block_len: int, permutation=None, depermutation=None, name: str = 'FixedPermuteInterleaver'):
super().__init__(name)
self.block_len = block_len
if permutation is None:
self.permutation = tf.random.shuffle(tf.range(block_len))
else:
self.permutation = permutation
if depermutation is None:
self.depermutation = tf.math.invert_permutation(self.permutation)
else:
# No validation is done
self.depermutation = permutation
@property
def num_input_channels(self):
return None
@property
def num_output_channels(self):
return None
def __len__(self):
return self.block_len
def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self.permutation, axis=1)
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self.depermutation, axis=1)
def settings(self) -> FixedPermuteInterleaverSettings:
return FixedPermuteInterleaverSettings(permutation=self.permutation, block_len=self.block_len, name=self.name)
class RandomPermuteInterleaver(Interleaver):
def __init__(self, block_len: int, name: str = 'RandomPermuteInterleaver'):
super().__init__(name)
self.block_len = block_len
self._permutation = None
self._depermutation = None
@property
def num_input_channels(self):
return None
@property
def num_output_channels(self):
return None
def __len__(self):
return self.block_len
def generate_permutations(self, batch_size):
ta_perm = tf.TensorArray(tf.int32, size=batch_size, clear_after_read=True, element_shape=tf.TensorShape([self.block_len]))
ta_deperm = tf.TensorArray(tf.int32, size=batch_size, clear_after_read=True, element_shape=tf.TensorShape([self.block_len]))
for i in tf.range(batch_size):
permutation = tf.random.shuffle(tf.range(self.block_len))
ta_perm = ta_perm.write(i, permutation)
ta_deperm = ta_deperm.write(i, tf.math.invert_permutation(permutation))
return ta_perm.stack(), ta_deperm.stack()
def set(self, msg: ttf.Tensor3[Batch, Time, Channels]):
if self._permutation is None:
batch_size = tf.shape(msg)[0]
self._permutation, self._depermutation = self.generate_permutations(batch_size)
def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
self.set(msg)
return tf.gather(msg, self._permutation, axis=1, batch_dims=1)
def reset(self):
self._permutation = None
self._depermutation = None
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self._depermutation, axis=1, batch_dims=1)
def settings(self) -> RandomPermuteInterleaverSettings:
return RandomPermuteInterleaverSettings(block_len=self.block_len, name=self.name)
| 2,696 | 704 | 68 |
423effbf1de80387bb260a7df69b4773bc08c050 | 719 | py | Python | 07_algorithm/search_algorithm.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | 1 | 2022-03-06T13:03:56.000Z | 2022-03-06T13:03:56.000Z | 07_algorithm/search_algorithm.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | null | null | null | 07_algorithm/search_algorithm.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | null | null | null | # 顺序查找
# 二分查找
# 二分查找 递归 前提是已排序的
| 17.975 | 52 | 0.566064 | # 顺序查找
def sequence_search(list_a, v):
for i in list_a:
if v == list_a[i]:
return i
return -1
# 二分查找
def binary_search(list_a, v):
n = len(list_a)
start = 0
end = n - 1
while start <= end:
mid = int((start + end) / 2)
if list_a[mid] == v:
return mid
elif list_a[mid] > v:
end = mid - 1
else:
start = mid + 1
return -1
# 二分查找 递归 前提是已排序的
def binary_search2(list_a, v):
n = len(list_a)
print(list_a)
if n < 1:
return -1
start = 0
end = n - 1
mid = int((start + end) / 2)
if list_a[mid] == v:
return mid
elif list_a[mid] > v:
return binary_search2(list_a[start: mid - 1], v)
else:
return binary_search2(list_a[mid + 1:end], v)
| 617 | 0 | 66 |
6dff3afdaa9e6c52c2ab0c0b36822139082ba4a9 | 1,792 | py | Python | tests/test_watcher.py | mk-fg/aetcd | ad133dba914ed12e1c35ce4318de80b13626f4f6 | [
"Apache-2.0"
] | 2 | 2022-02-02T15:18:04.000Z | 2022-03-16T06:39:26.000Z | tests/test_watcher.py | mk-fg/aetcd | ad133dba914ed12e1c35ce4318de80b13626f4f6 | [
"Apache-2.0"
] | 6 | 2022-01-12T05:29:15.000Z | 2022-01-18T00:29:57.000Z | tests/test_watcher.py | mk-fg/aetcd | ad133dba914ed12e1c35ce4318de80b13626f4f6 | [
"Apache-2.0"
] | 2 | 2022-01-11T14:34:18.000Z | 2022-01-28T15:53:20.000Z | import asyncio
import pytest
import aetcd
import aetcd.exceptions
import aetcd.watcher
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| 28.903226 | 89 | 0.709821 | import asyncio
import pytest
import aetcd
import aetcd.exceptions
import aetcd.watcher
@pytest.mark.asyncio
async def test_watch_with_exception_during_watch(mocker, etcd, rpc_error):
async def pass_exception_to_callback(callback):
await asyncio.sleep(1)
e = rpc_error(aetcd.rpc.StatusCode.UNAVAILABLE)
await callback(e)
task = None
async def add_callback_mock(*args, **kwargs):
nonlocal task
callback = args[1]
task = asyncio.get_event_loop().create_task(
pass_exception_to_callback(callback))
watcher_callback = aetcd.watcher.WatcherCallback(callback)
watcher_callback.watch_id = 1
return watcher_callback
watcher_mock = mocker.AsyncMock()
watcher_mock.add_callback = add_callback_mock
etcd._watcher = watcher_mock
with pytest.raises(aetcd.exceptions.ConnectionFailedError):
async for _ in await etcd.watch(b'key'):
pass
await task
@pytest.mark.asyncio
async def test_watch_with_timeout_on_connect(mocker, rpc_error):
mocked_iter = mocker.AsyncMock()
mocked_iter.__aiter__.side_effect = rpc_error(aetcd.rpc.StatusCode.DEADLINE_EXCEEDED)
mocked_watch = mocker.Mock(return_value=mocked_iter)
async with aetcd.Client(timeout=3) as etcd:
etcd._watcher._watchstub.Watch = mocked_watch
with pytest.raises(aetcd.exceptions.ConnectionTimeoutError):
async for _ in await etcd.watch(b'key'):
pass
@pytest.mark.asyncio
async def test_watcher_with_wrong_kind(mocker):
watcher = aetcd.watcher.Watcher(mocker.Mock())
with pytest.raises(
TypeError,
match='an instance of EventKind should be provided',
):
await watcher.add_callback(b'key', None, kind='put')
| 1,568 | 0 | 66 |
968b3c5b9e66befae4f442bcc76628041d1f37f0 | 7,036 | py | Python | app/utmbill/utm_pay_statistic.py | ds-vologdin/utm_sugarcrm_report | f2f1afba6ecdf0ec3817d1aef7b3c5a2776e249f | [
"MIT"
] | null | null | null | app/utmbill/utm_pay_statistic.py | ds-vologdin/utm_sugarcrm_report | f2f1afba6ecdf0ec3817d1aef7b3c5a2776e249f | [
"MIT"
] | null | null | null | app/utmbill/utm_pay_statistic.py | ds-vologdin/utm_sugarcrm_report | f2f1afba6ecdf0ec3817d1aef7b3c5a2776e249f | [
"MIT"
] | null | null | null | from itertools import groupby
from datetime import date, timedelta
from operator import itemgetter
from sqlalchemy import func
from .utm_db import session_utm
from .models import PaymentTransaction, BalanceHistory, User
from .helpers import get_timestamp_from_date
def fetch_pays_from_utm(date_begin, date_end):
''' Получить данные из БД UTM '''
date_begin_timestamp = get_timestamp_from_date(date_begin)
date_end_timestamp = get_timestamp_from_date(date_end + timedelta(days=1))
pays_raw = session_utm.query(
PaymentTransaction.payment_enter_date,
PaymentTransaction.payment_absolute
).filter(
PaymentTransaction.method == 5,
PaymentTransaction.payment_enter_date >= date_begin_timestamp,
PaymentTransaction.payment_enter_date < date_end_timestamp
).all()
return group_pays_by_date(pays_raw)
def calculate_pays_stat_periods(pays, report_periods):
'''Функция рассчитывает статистику по платежам за каждый период
в report_periods
'''
pays_periods_dicts = []
# sum_tmp и count_tmp используется для расчёта смещения относительно
# предыдущего отчётного периода
sum_tmp = 0
count_tmp = 0
for date_begin, date_end in report_periods:
pays_period = [
pay for pay in pays
if (pay.get('date') >= date_begin and pay.get('date') < date_end)
]
# Расчитываем среднее значение и количество
summ = 0
count = 0
for pay in pays_period:
summ += pay.get('summ', 0)
count += pay.get('count', 0)
# Считаем среднее значение платежа (ARPU)
avg_pay = summ/count if count > 0 else 0
# Расчитываем изменение относительно предыдущего отчетного периода
sum_dif = 0
sum_dif_p = 0
count_dif = 0
count_dif_p = 0
if sum_tmp > 0 and count_tmp > 0:
sum_dif = summ - sum_tmp
sum_dif_p = sum_dif*100/sum_tmp
count_dif = count - count_tmp
count_dif_p = count_dif*100/count_tmp
sum_tmp = summ
count_tmp = count
pays_periods_dicts.append(
{'date': date_begin,
'summ': summ,
'count': count,
'avg': round(avg_pay, 2),
'sum_dif': sum_dif,
'sum_dif_p': round(sum_dif_p, 2),
'count_dif': count_dif,
'count_dif_p': round(count_dif_p, 2),
}
)
# Запрашиваем помесячную статистику по исходящему остаткуна
# на начало месяца и по количеству активных абонентов на начало месяца
# Если статистика не помесячная, то balances_periods = None
balances_periods = fetch_balances_periods(report_periods)
# Объединяем pays_stat_periods и balances_periods
if not balances_periods:
return pays_periods_dicts
for pays_period_dict, balance_period in zip(pays_periods_dicts, balances_periods):
pays_period_dict.update(
{
'count_active': balance_period.get('count', 0),
'avg_balance': balance_period.get('avg', 0),
'avg_balance_all': balance_period.get('avg_all', 0),
'sum_balance': balance_period.get('summ', 0),
}
)
return pays_periods_dicts
def calculate_summary_statistic_pays(pays_stat_periods, report_periods, last):
''' Считаем статистику по всем платежам '''
summ_pay, count_pay = 0, 0
for pay in pays_stat_periods:
summ_pay += pay['summ']
count_pay += pay['count']
if last == '':
count_period = len(report_periods)
avg_summ = summ_pay/count_period if count_period > 0 else 0
avg_count = count_pay/count_period if count_period > 0 else 0
else:
# Не учитываем последний месяц (он чаще всего не полный)
count_period = len(report_periods) - 1
avg_summ = (summ_pay - pays_stat_periods[-1]['summ'])/count_period \
if count_period > 0 else 0
avg_count = (count_pay - pays_stat_periods[-1]['count'])/count_period \
if count_period > 0 else 0
avg_pays = summ_pay/count_pay if count_pay > 0 else 0
return {
'summ': summ_pay,
'count': count_pay,
'avg_summ': avg_summ,
'avg_count': avg_count,
'avg_pay': avg_pays, # ARPU
}
def fetch_balances_periods(report_periods):
''' Функция расчёта баланса по заданным периодам '''
# Расчитываем только в случае если отчёт помесячный
date_begin, date_end = report_periods[0]
if (date_end - date_begin) < timedelta(days=28):
return
balances_dicts = []
for date_begin, date_end in report_periods:
# считаем сколько людей с положительным балансом перешло
# на текущий месяц, какой у них средний баланс
timestamp_begin = get_timestamp_from_date(date_begin)
timestamp_end = get_timestamp_from_date(date_begin+timedelta(days=1))
active_balance = session_utm.query(
func.count(BalanceHistory.out_balance),
func.avg(BalanceHistory.out_balance),
func.sum(BalanceHistory.out_balance),
).join(
User, BalanceHistory.account_id == User.basic_account
).filter(
BalanceHistory.date >= timestamp_begin,
BalanceHistory.date < timestamp_end,
User.login.op('~')('^\d\d\d\d\d$'),
BalanceHistory.out_balance >= 0,
BalanceHistory.out_balance < 15000
).all()
# Смотрим средний баланс среди всех абонентов
all_balance = session_utm.query(
func.count(BalanceHistory.out_balance),
func.avg(BalanceHistory.out_balance),
func.sum(BalanceHistory.out_balance),
).join(
User, BalanceHistory.account_id == User.basic_account
).filter(
BalanceHistory.date >= timestamp_begin,
BalanceHistory.date < timestamp_end,
User.login.op('~')('^\d\d\d\d\d$'),
BalanceHistory.out_balance > -15000,
BalanceHistory.out_balance < 15000
).all()
count, avg, summ = active_balance[0] if len(active_balance) == 1 else (0, 0, 0)
avg_all = all_balance[0][0] if len(all_balance) == 1 else 0
balances_dicts.append(
{'date': date_begin,
'count': count,
'avg': avg,
'summ': summ,
'avg_all': avg_all,
}
)
return balances_dicts
| 34.490196 | 87 | 0.629193 | from itertools import groupby
from datetime import date, timedelta
from operator import itemgetter
from sqlalchemy import func
from .utm_db import session_utm
from .models import PaymentTransaction, BalanceHistory, User
from .helpers import get_timestamp_from_date
def group_pays_by_date(pays_raw):
if not pays_raw:
return []
pays_with_date = [
(date.fromtimestamp(timestamp), payment)
for timestamp, payment in pays_raw
]
pays_group = []
for date_pays, payments_gen in groupby(pays_with_date, itemgetter(0)):
payments_list = list(map(itemgetter(1), payments_gen))
pays_group.append({
'date': date_pays,
'summ': sum(payments_list),
'count': len(payments_list),
})
return pays_group
def fetch_pays_from_utm(date_begin, date_end):
''' Получить данные из БД UTM '''
date_begin_timestamp = get_timestamp_from_date(date_begin)
date_end_timestamp = get_timestamp_from_date(date_end + timedelta(days=1))
pays_raw = session_utm.query(
PaymentTransaction.payment_enter_date,
PaymentTransaction.payment_absolute
).filter(
PaymentTransaction.method == 5,
PaymentTransaction.payment_enter_date >= date_begin_timestamp,
PaymentTransaction.payment_enter_date < date_end_timestamp
).all()
return group_pays_by_date(pays_raw)
def calculate_pays_stat_periods(pays, report_periods):
'''Функция рассчитывает статистику по платежам за каждый период
в report_periods
'''
pays_periods_dicts = []
# sum_tmp и count_tmp используется для расчёта смещения относительно
# предыдущего отчётного периода
sum_tmp = 0
count_tmp = 0
for date_begin, date_end in report_periods:
pays_period = [
pay for pay in pays
if (pay.get('date') >= date_begin and pay.get('date') < date_end)
]
# Расчитываем среднее значение и количество
summ = 0
count = 0
for pay in pays_period:
summ += pay.get('summ', 0)
count += pay.get('count', 0)
# Считаем среднее значение платежа (ARPU)
avg_pay = summ/count if count > 0 else 0
# Расчитываем изменение относительно предыдущего отчетного периода
sum_dif = 0
sum_dif_p = 0
count_dif = 0
count_dif_p = 0
if sum_tmp > 0 and count_tmp > 0:
sum_dif = summ - sum_tmp
sum_dif_p = sum_dif*100/sum_tmp
count_dif = count - count_tmp
count_dif_p = count_dif*100/count_tmp
sum_tmp = summ
count_tmp = count
pays_periods_dicts.append(
{'date': date_begin,
'summ': summ,
'count': count,
'avg': round(avg_pay, 2),
'sum_dif': sum_dif,
'sum_dif_p': round(sum_dif_p, 2),
'count_dif': count_dif,
'count_dif_p': round(count_dif_p, 2),
}
)
# Запрашиваем помесячную статистику по исходящему остаткуна
# на начало месяца и по количеству активных абонентов на начало месяца
# Если статистика не помесячная, то balances_periods = None
balances_periods = fetch_balances_periods(report_periods)
# Объединяем pays_stat_periods и balances_periods
if not balances_periods:
return pays_periods_dicts
for pays_period_dict, balance_period in zip(pays_periods_dicts, balances_periods):
pays_period_dict.update(
{
'count_active': balance_period.get('count', 0),
'avg_balance': balance_period.get('avg', 0),
'avg_balance_all': balance_period.get('avg_all', 0),
'sum_balance': balance_period.get('summ', 0),
}
)
return pays_periods_dicts
def calculate_summary_statistic_pays(pays_stat_periods, report_periods, last):
''' Считаем статистику по всем платежам '''
summ_pay, count_pay = 0, 0
for pay in pays_stat_periods:
summ_pay += pay['summ']
count_pay += pay['count']
if last == '':
count_period = len(report_periods)
avg_summ = summ_pay/count_period if count_period > 0 else 0
avg_count = count_pay/count_period if count_period > 0 else 0
else:
# Не учитываем последний месяц (он чаще всего не полный)
count_period = len(report_periods) - 1
avg_summ = (summ_pay - pays_stat_periods[-1]['summ'])/count_period \
if count_period > 0 else 0
avg_count = (count_pay - pays_stat_periods[-1]['count'])/count_period \
if count_period > 0 else 0
avg_pays = summ_pay/count_pay if count_pay > 0 else 0
return {
'summ': summ_pay,
'count': count_pay,
'avg_summ': avg_summ,
'avg_count': avg_count,
'avg_pay': avg_pays, # ARPU
}
def fetch_balances_periods(report_periods):
''' Функция расчёта баланса по заданным периодам '''
# Расчитываем только в случае если отчёт помесячный
date_begin, date_end = report_periods[0]
if (date_end - date_begin) < timedelta(days=28):
return
balances_dicts = []
for date_begin, date_end in report_periods:
# считаем сколько людей с положительным балансом перешло
# на текущий месяц, какой у них средний баланс
timestamp_begin = get_timestamp_from_date(date_begin)
timestamp_end = get_timestamp_from_date(date_begin+timedelta(days=1))
active_balance = session_utm.query(
func.count(BalanceHistory.out_balance),
func.avg(BalanceHistory.out_balance),
func.sum(BalanceHistory.out_balance),
).join(
User, BalanceHistory.account_id == User.basic_account
).filter(
BalanceHistory.date >= timestamp_begin,
BalanceHistory.date < timestamp_end,
User.login.op('~')('^\d\d\d\d\d$'),
BalanceHistory.out_balance >= 0,
BalanceHistory.out_balance < 15000
).all()
# Смотрим средний баланс среди всех абонентов
all_balance = session_utm.query(
func.count(BalanceHistory.out_balance),
func.avg(BalanceHistory.out_balance),
func.sum(BalanceHistory.out_balance),
).join(
User, BalanceHistory.account_id == User.basic_account
).filter(
BalanceHistory.date >= timestamp_begin,
BalanceHistory.date < timestamp_end,
User.login.op('~')('^\d\d\d\d\d$'),
BalanceHistory.out_balance > -15000,
BalanceHistory.out_balance < 15000
).all()
count, avg, summ = active_balance[0] if len(active_balance) == 1 else (0, 0, 0)
avg_all = all_balance[0][0] if len(all_balance) == 1 else 0
balances_dicts.append(
{'date': date_begin,
'count': count,
'avg': avg,
'summ': summ,
'avg_all': avg_all,
}
)
return balances_dicts
| 504 | 0 | 23 |
1a0b37b7ab331524552e7acf4a43f29f90b6ba57 | 7,022 | py | Python | examples/permutation_tree.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 116 | 2020-02-28T10:29:22.000Z | 2022-03-22T12:19:39.000Z | examples/permutation_tree.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 253 | 2020-02-17T16:18:56.000Z | 2022-03-30T16:55:02.000Z | examples/permutation_tree.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 27 | 2020-03-02T21:07:41.000Z | 2022-03-08T08:33:23.000Z | """
Block Permutations
=============================
The permutations of :class:`hyppo.independence.Dcorr` can be restricted to appropriately match known
dependencies of samples under the null distribution (i.e. multilevel and longitudinal data). Without such modifications, calculated
pvalues are invalid as the default space of permutations are misspecified.
In order to restrict the permutations, we pass in a list of group labels. Each column
is a list of labels which partitions the observations by shared label into blocks and
multiple columns repeat this process recursively. At each level, blocks are exchangeable
unless the label is a negative number, in which case it is fixed, and all elements of
the final blocks are exchangeable. This defines the space of allowable permutations
and the :math:`Y` matrix is permuted accordingly.
The block labels used in this notebook are visualized below, corresponding to data
where observations are dependent within pairs. Because of the :math:`Y` values in our
2-sample testing case, block labels of :math:`[1, 1, 2, 2, \ldots]` would also have been
allowable for both cases but would lead to unnecessary permutations being computed.
As shown in the following figures, pvalues under the default permutations are heavily
skewed and certainly not uniform, thus presenting either an inflated false positive rate or potentially incredibly low power. When the permutations are restricted, the pvalues under
the null distribution are empirically approximately uniformly distributed, as we would hope for. 95% binomial
proportion confidence interval error bars are displayed on the histogram of empirical
p-values for each bin.
"""
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.spatial.distance import pdist, squareform
# custom seaborn plot options to make the figures pretty
sns.set(color_codes=True, style="white", context="notebook", font_scale=1.25)
PALETTE = sns.color_palette("Set1")
sns.set_palette(PALETTE[3:])
import warnings
warnings.filterwarnings("ignore")
from hyppo.independence import Dcorr
def simulate_2sample_null(n1, n2=None, d=100, group_std=0.1, seed=None):
"""
Simulates a set of paired observations for a 2-sample test.
n1,n2 : size of the two groups. Are both n1 if n2 is None
d : dimension of observations
group_std : standard deviation of normal distribution around group mean
"""
np.random.seed(seed)
# Means for each observation
mus1 = np.random.normal(0, 1, (n1, d))
if n2 is not None:
mus2 = np.random.normal(0, 1, (n2, d))
# Paired observations
X = np.vstack(
[np.random.normal(mu, group_std, (2, d)) for mu in mus1]
+ [np.random.normal(mu, group_std, (2, d)) for mu in mus2]
)
return X
# Simulation parameters
n1 = 25
n2 = 25
d = 100
group_std = 0.1
# Labels
Y_within = np.asarray([0, 1] * (n1 + n2))
Y_across = np.hstack(([0] * n1 * 2, [1] * n2 * 2))
# Permutation tree blocks
blocks_within = -1 * np.hstack([[i + 1] * 2 for i in range(n1 + n2)]).T
blocks_across = np.c_[
np.hstack([[i + 1] * 2 for i in range(n1 + n2)]), -1 * Y_within - 1
]
# Test params
test_blocks = [None, None, blocks_within, blocks_across]
test_names = [
"Unrestricted within",
"Unrestricted across",
"Restricted within",
"Restricted across",
]
test_Ys = [Y_within, Y_across, Y_within, Y_across]
# Plot permutation tree blocks figure
fig, axes = plt.subplots(1, 2, figsize=(6, 6))
for ax, data in zip(axes, (blocks_within[:, np.newaxis], blocks_across)):
ax.matshow(data[:10], cmap="Set3")
for (i, j), z in np.ndenumerate(data[:10]):
ax.text(j, i, "{:}".format(int(z)), ha="center", va="center", fontsize=20)
ax.set_xticks([])
ax.set_yticks([])
axes[0].set_title("Within", fontsize=30)
axes[1].set_title("Across", fontsize=30)
plt.suptitle("Permutation Tree Blocks", y=1.07, fontsize=30)
plt.show()
# Independence tests figures
N_DATASETS = 100
REPS = 100
test_results = defaultdict(list)
for i in range(N_DATASETS):
X = simulate_2sample_null(n1, n2, d, group_std, seed=i)
for test, block, Y in zip(test_names, test_blocks, test_Ys):
_, pval = Dcorr().test(
X,
Y,
reps=REPS,
workers=-1,
perm_blocks=block,
)
test_results[test].append(pval)
# fig, axes = plt.subplots(2,3, figsize=(4, 4*len(data_dict.keys())))
fig = plt.figure(figsize=(16, 8))
# Show data example
ax = fig.add_subplot(241)
X = simulate_2sample_null(n1, n2, d, group_std, seed=0)[:10, :]
X = squareform(pdist(X))
heatmap = ax.pcolor(X, cmap=plt.cm.Blues)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("X distance matrix")
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)
plt.colorbar(heatmap, cax=cax, ticks=[0, 10], orientation="horizontal")
# Plot Y matrices
ax = fig.add_subplot(242)
heatmap = ax.pcolor(squareform(pdist(Y_within[:10, np.newaxis])), cmap=plt.cm.Blues)
# ax.colorbar(heatmap)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("Y within distance matrix")
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)
plt.colorbar(heatmap, cax=cax, ticks=[0, 1], orientation="horizontal")
ax = fig.add_subplot(246)
heatmap = ax.pcolor(
squareform(pdist(np.hstack((Y_across[:5], Y_across[-5:]))[:, np.newaxis])),
cmap=plt.cm.Blues,
)
# ax.colorbar(heatmap)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("Y across distance matrix")
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)
plt.colorbar(heatmap, cax=cax, ticks=[0, 1], orientation="horizontal")
# Plot pvalue histograms and errorbars using binomial CIs
ax = None
for i, test_name in zip([3, 7, 4, 8], test_names):
ax = fig.add_subplot(int(str(f"24{i}"))) # , sharey=ax)
n = len(test_results[test_name])
entries, edges, _ = ax.hist(
test_results[test_name],
bins=np.arange(0, 1.1, 0.1),
weights=np.ones(n) / n,
color="b",
)
# entries = height of each column = proportion in that bin
# calculate bin centers
bin_centers = 0.5 * (edges[:-1] + edges[1:])
ax.axhline(y=sum(entries) / len(bin_centers), ls="--", c="#333333")
# errorbars are binomial proportion confidence intervals
ax.errorbar(
bin_centers,
entries,
yerr=1.96 * np.sqrt(entries * (1 - entries) / n),
fmt=".",
c="#333333",
)
ax.set_title(f"{test_name} pvalues")
# ax.set_xlim(0,1)
if i in [3, 4]:
ax.set_xticks([])
else:
ax.set_xticks([0, 1])
if i in [4, 8]:
ax.set_yticks([0, 0.1])
else:
ax.set_yticks([0, 0.1, 1])
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.show()
| 32.660465 | 181 | 0.687269 | """
Block Permutations
=============================
The permutations of :class:`hyppo.independence.Dcorr` can be restricted to appropriately match known
dependencies of samples under the null distribution (i.e. multilevel and longitudinal data). Without such modifications, calculated
pvalues are invalid as the default space of permutations are misspecified.
In order to restrict the permutations, we pass in a list of group labels. Each column
is a list of labels which partitions the observations by shared label into blocks and
multiple columns repeat this process recursively. At each level, blocks are exchangeable
unless the label is a negative number, in which case it is fixed, and all elements of
the final blocks are exchangeable. This defines the space of allowable permutations
and the :math:`Y` matrix is permuted accordingly.
The block labels used in this notebook are visualized below, corresponding to data
where observations are dependent within pairs. Because of the :math:`Y` values in our
2-sample testing case, block labels of :math:`[1, 1, 2, 2, \ldots]` would also have been
allowable for both cases but would lead to unnecessary permutations being computed.
As shown in the following figures, pvalues under the default permutations are heavily
skewed and certainly not uniform, thus presenting either an inflated false positive rate or potentially incredibly low power. When the permutations are restricted, the pvalues under
the null distribution are empirically approximately uniformly distributed, as we would hope for. 95% binomial
proportion confidence interval error bars are displayed on the histogram of empirical
p-values for each bin.
"""
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.spatial.distance import pdist, squareform
# custom seaborn plot options to make the figures pretty
sns.set(color_codes=True, style="white", context="notebook", font_scale=1.25)
PALETTE = sns.color_palette("Set1")
sns.set_palette(PALETTE[3:])
import warnings
warnings.filterwarnings("ignore")
from hyppo.independence import Dcorr
def simulate_2sample_null(n1, n2=None, d=100, group_std=0.1, seed=None):
"""
Simulates a set of paired observations for a 2-sample test.
n1,n2 : size of the two groups. Are both n1 if n2 is None
d : dimension of observations
group_std : standard deviation of normal distribution around group mean
"""
np.random.seed(seed)
# Means for each observation
mus1 = np.random.normal(0, 1, (n1, d))
if n2 is not None:
mus2 = np.random.normal(0, 1, (n2, d))
# Paired observations
X = np.vstack(
[np.random.normal(mu, group_std, (2, d)) for mu in mus1]
+ [np.random.normal(mu, group_std, (2, d)) for mu in mus2]
)
return X
# Simulation parameters
n1 = 25
n2 = 25
d = 100
group_std = 0.1
# Labels
Y_within = np.asarray([0, 1] * (n1 + n2))
Y_across = np.hstack(([0] * n1 * 2, [1] * n2 * 2))
# Permutation tree blocks
blocks_within = -1 * np.hstack([[i + 1] * 2 for i in range(n1 + n2)]).T
blocks_across = np.c_[
np.hstack([[i + 1] * 2 for i in range(n1 + n2)]), -1 * Y_within - 1
]
# Test params
test_blocks = [None, None, blocks_within, blocks_across]
test_names = [
"Unrestricted within",
"Unrestricted across",
"Restricted within",
"Restricted across",
]
test_Ys = [Y_within, Y_across, Y_within, Y_across]
# Plot permutation tree blocks figure
fig, axes = plt.subplots(1, 2, figsize=(6, 6))
for ax, data in zip(axes, (blocks_within[:, np.newaxis], blocks_across)):
ax.matshow(data[:10], cmap="Set3")
for (i, j), z in np.ndenumerate(data[:10]):
ax.text(j, i, "{:}".format(int(z)), ha="center", va="center", fontsize=20)
ax.set_xticks([])
ax.set_yticks([])
axes[0].set_title("Within", fontsize=30)
axes[1].set_title("Across", fontsize=30)
plt.suptitle("Permutation Tree Blocks", y=1.07, fontsize=30)
plt.show()
# Independence tests figures
N_DATASETS = 100
REPS = 100
test_results = defaultdict(list)
for i in range(N_DATASETS):
X = simulate_2sample_null(n1, n2, d, group_std, seed=i)
for test, block, Y in zip(test_names, test_blocks, test_Ys):
_, pval = Dcorr().test(
X,
Y,
reps=REPS,
workers=-1,
perm_blocks=block,
)
test_results[test].append(pval)
# fig, axes = plt.subplots(2,3, figsize=(4, 4*len(data_dict.keys())))
fig = plt.figure(figsize=(16, 8))
# Show data example
ax = fig.add_subplot(241)
X = simulate_2sample_null(n1, n2, d, group_std, seed=0)[:10, :]
X = squareform(pdist(X))
heatmap = ax.pcolor(X, cmap=plt.cm.Blues)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("X distance matrix")
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)
plt.colorbar(heatmap, cax=cax, ticks=[0, 10], orientation="horizontal")
# Plot Y matrices
ax = fig.add_subplot(242)
heatmap = ax.pcolor(squareform(pdist(Y_within[:10, np.newaxis])), cmap=plt.cm.Blues)
# ax.colorbar(heatmap)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("Y within distance matrix")
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)
plt.colorbar(heatmap, cax=cax, ticks=[0, 1], orientation="horizontal")
ax = fig.add_subplot(246)
heatmap = ax.pcolor(
squareform(pdist(np.hstack((Y_across[:5], Y_across[-5:]))[:, np.newaxis])),
cmap=plt.cm.Blues,
)
# ax.colorbar(heatmap)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("Y across distance matrix")
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)
plt.colorbar(heatmap, cax=cax, ticks=[0, 1], orientation="horizontal")
# Plot pvalue histograms and errorbars using binomial CIs
ax = None
for i, test_name in zip([3, 7, 4, 8], test_names):
ax = fig.add_subplot(int(str(f"24{i}"))) # , sharey=ax)
n = len(test_results[test_name])
entries, edges, _ = ax.hist(
test_results[test_name],
bins=np.arange(0, 1.1, 0.1),
weights=np.ones(n) / n,
color="b",
)
# entries = height of each column = proportion in that bin
# calculate bin centers
bin_centers = 0.5 * (edges[:-1] + edges[1:])
ax.axhline(y=sum(entries) / len(bin_centers), ls="--", c="#333333")
# errorbars are binomial proportion confidence intervals
ax.errorbar(
bin_centers,
entries,
yerr=1.96 * np.sqrt(entries * (1 - entries) / n),
fmt=".",
c="#333333",
)
ax.set_title(f"{test_name} pvalues")
# ax.set_xlim(0,1)
if i in [3, 4]:
ax.set_xticks([])
else:
ax.set_xticks([0, 1])
if i in [4, 8]:
ax.set_yticks([0, 0.1])
else:
ax.set_yticks([0, 0.1, 1])
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.show()
| 0 | 0 | 0 |
beee0f29e4efad9e9fbd3242ac7119dc8ff3e8a8 | 5,624 | py | Python | elliot/recommender/latent_factor_models/CML/CML_model.py | swapUniba/Elliot_refactor-tesi-Ventrella | 3ddffc041696c90a6f6d3e8906c212fc4f55f842 | [
"Apache-2.0"
] | null | null | null | elliot/recommender/latent_factor_models/CML/CML_model.py | swapUniba/Elliot_refactor-tesi-Ventrella | 3ddffc041696c90a6f6d3e8906c212fc4f55f842 | [
"Apache-2.0"
] | null | null | null | elliot/recommender/latent_factor_models/CML/CML_model.py | swapUniba/Elliot_refactor-tesi-Ventrella | 3ddffc041696c90a6f6d3e8906c212fc4f55f842 | [
"Apache-2.0"
] | 1 | 2021-06-02T06:57:07.000Z | 2021-06-02T06:57:07.000Z | """
Module description:
"""
__version__ = '0.1'
__author__ = 'Felice Antonio Merra, Vito Walter Anelli, Claudio Pomo'
__email__ = 'felice.merra@poliba.it, vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.random.set_seed(0)
| 38.520548 | 105 | 0.569701 | """
Module description:
"""
__version__ = '0.1'
__author__ = 'Felice Antonio Merra, Vito Walter Anelli, Claudio Pomo'
__email__ = 'felice.merra@poliba.it, vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.random.set_seed(0)
class CML_model(keras.Model):
def __init__(self,
user_factors=200,
item_factors=200,
learning_rate=0.001,
l_w=0, l_b=0, margin=0.5,
num_users=100,
num_items=100,
name="CML",
**kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self._user_factors = user_factors
self._item_factors = item_factors
self._learning_rate = learning_rate
self.l_w = l_w
self.l_b = l_b
self._num_items = num_items
self._num_users = num_users
self.initializer = tf.initializers.GlorotUniform()
self.Gu = LatentFactor(num_instances=self._num_users,
dim=self._user_factors,
name='user_latent_factor')
self.Gi = LatentFactor(num_instances=self._num_items,
dim=self._item_factors,
name='item_latent_factor')
self.Bi = LatentFactor(num_instances=self._num_items,
dim=1,
name='item_bias')
self.margin = margin
self.optimizer = tf.optimizers.Adam(self._learning_rate)
@tf.function
def call(self, inputs, training=None):
user, item = inputs
beta_i = tf.squeeze(self.Bi(item))
gamma_u = tf.squeeze(self.Gu(user))
gamma_i = tf.squeeze(self.Gi(item))
l2_user_pos = tf.math.reduce_sum(tf.math.square(gamma_u - gamma_i),
axis=-1,
keepdims=True)
score = (-l2_user_pos) + beta_i
return score, beta_i, gamma_u, gamma_i
@tf.function
def train_step(self, batch):
user, pos, neg = batch
with tf.GradientTape() as tape:
# Clean Inference
xu_pos, beta_pos, gamma_u, gamma_pos = self(inputs=(user, pos), training=True)
xu_neg, beta_neg, gamma_u, gamma_neg = self(inputs=(user, neg), training=True)
difference = tf.clip_by_value(xu_pos - xu_neg, -80.0, 1e8)
loss = tf.reduce_sum(tf.maximum(self.margin - difference, 0))
# Regularization Component
reg_loss = self.l_w * tf.reduce_sum([tf.nn.l2_loss(gamma_u),
tf.nn.l2_loss(gamma_pos),
tf.nn.l2_loss(gamma_neg)]) \
+ self.l_b * tf.nn.l2_loss(beta_pos) \
+ self.l_b * tf.nn.l2_loss(beta_neg) / 10
# Loss to be optimized
loss += reg_loss
# grads = tape.gradient(loss, [self.Bi, self.Gu, self.Gi])
# self.optimizer.apply_gradients(zip(grads, [self.Bi, self.Gu, self.Gi]))
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss
@tf.function
def predict(self, start, stop, **kwargs):
# return self.Bi + tf.matmul(self.Gu[start:stop], self.Gi, transpose_b=True)
user_vec = self.Gu.embeddings[start:stop]
return -tf.math.reduce_sum(
tf.math.square(tf.expand_dims(user_vec, axis=1) - self.Gi.variables[0]), axis=-1,
keepdims=False) + tf.reshape(self.Bi.variables[0], [-1])
@tf.function
def get_top_k(self, predictions, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, predictions, -np.inf), k=k, sorted=True)
@tf.function
def get_positions(self, predictions, train_mask, items, inner_test_user_true_mask):
predictions = tf.gather(predictions, inner_test_user_true_mask)
train_mask = tf.gather(train_mask, inner_test_user_true_mask)
equal = tf.reshape(items, [len(items), 1])
i = tf.argsort(tf.where(train_mask, predictions, -np.inf), axis=-1,
direction='DESCENDING', stable=False, name=None)
positions = tf.where(tf.equal(equal, i))[:, 1]
return 1 - (positions / tf.reduce_sum(tf.cast(train_mask, tf.int64), axis=1))
def get_config(self):
raise NotImplementedError
class LatentFactor(tf.keras.layers.Embedding):
def __init__(self, num_instances, dim, zero_init=False, name=None):
if zero_init:
initializer = 'zeros'
else:
initializer = 'uniform'
super(LatentFactor, self).__init__(input_dim=num_instances,
output_dim=dim,
embeddings_initializer=initializer,
name=name)
def censor(self, censor_id):
unique_censor_id, _ = tf.unique(censor_id)
embedding_gather = tf.gather(self.variables[0], indices=unique_censor_id)
norm = tf.norm(embedding_gather, axis=1, keepdims=True)
return self.variables[0].scatter_nd_update(indices=tf.expand_dims(unique_censor_id, 1),
updates=embedding_gather / tf.math.maximum(norm, 0.1))
| 4,858 | 307 | 100 |
907c84e95c172d32d4dda5e59167ab3f5c67d431 | 27,608 | py | Python | composer/core/state.py | mosaicml/composer | a253d7dee8278e66d036bc191111bbe264ace0da | [
"Apache-2.0"
] | 945 | 2021-10-13T16:24:20.000Z | 2022-03-31T21:21:54.000Z | composer/core/state.py | mosaicml/composer | a253d7dee8278e66d036bc191111bbe264ace0da | [
"Apache-2.0"
] | 544 | 2021-10-13T20:23:27.000Z | 2022-03-31T02:47:54.000Z | composer/core/state.py | mosaicml/composer | a253d7dee8278e66d036bc191111bbe264ace0da | [
"Apache-2.0"
] | 39 | 2021-10-13T14:33:33.000Z | 2022-03-31T11:13:19.000Z | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The state of the trainer."""
from __future__ import annotations
import collections.abc
import logging
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional, Sequence, Union, cast
import torch
import torch.nn.modules.utils
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from composer.core.precision import Precision
from composer.core.serializable import Serializable
from composer.core.time import Time, Timestamp, TimeUnit
from composer.utils import batch_get, batch_set, dist, ensure_tuple
if TYPE_CHECKING:
import deepspeed
import composer.core.types as types
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.evaluator import Evaluator
from composer.profiler import Profiler
__all__ = ["State"]
logger = logging.getLogger(__name__)
_STATE_DICT_SERIALIZED_ATTRIBUTES = [
# List of attributes that are serialized with state_dict
# Only the attributes listed in state.serialized_attributes will actually be saved.
"model",
"optimizers",
"schedulers",
"algorithms",
"callbacks",
"scaler",
"timestamp",
]
class State(Serializable):
"""The state of the trainer.
Contains variables that the trainer tracks throughout the training loop. Note that all the necessary parts (i.e.,
:attr:`serialized_attributes`) of state are serialized when the trainer is checkpointed so that it can be used
restore the trainer and continue training from a checkpoint. :mod:`~composer.algorithms` are able to modify an
instance of this class in-place.
.. note::
An instance of this class is automatically constructed by the :class:`~.Trainer` constructor. A user need
not instantiate this class.
Args:
model (torch.nn.Module): The model, typically as a subclass of :class:`~.ComposerModel`.
rank_zero_seed (int): The seed used on the rank zero process. It is assumed that each rank's seed is
``rank_zero_seed + dist.get_global_rank()``.
grad_accum (int, optional): The number of gradient accumulation steps to use. With this argument, micro batch
size for each device becomes ``microbatch_size = train_batch_size / (num_devices * grad_accum)``.
train_dataloader (types.DataLoader, optional): Dataloader used for training
evaluators (Evalutor | Evaluators, optional): :class:`.Evaluator` used for evaluation.
dataloader (types.DataLoader, optional): The active DataLoader.
dataloader_len (int | Time[int], optional): The number of batches per dataloader iteration (e.g. epoch).
The trainer will yield the first ``dataloader_len`` batches per iteration. If ``-1`` (the default),
the entire dataloader will be iterated over.
dataloader_label (str, optional): The name for the dataloader. Required if ``dataloader`` is specified.
(default: ``None``)
By convention, the training dataloader is called ``'train'``. The evaluator dataloader is called
``'eval'``, or when multiple evaluators are used, the name of the evaluator.
max_duration (str | Time, optional): The maximum duration to train for. (default: ``None``)
precision (str | Precision): The numerical precision to use for training. See :class:`~.Precision` for
the supported precisions.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): The optimizer being used to
train the model. Multiple optimizers are not currently supported.
schedulers (types.PyTorchScheduler | Sequence[types.PyTorchScheduler], optional):
The learning rate scheduler (can also be a list or tuple of schedulers).
scaler (torch.cuda.amp.GradScaler, optional): The gradient scaler in use for mixed precision training.
algorithms (Algorithm | Sequence[Algorithm], optional): The algorithms used for training.
callbacks (Callback | Sequence[Callback], optional): The callbacks used for training.
profiler (Optional[Profiler]): The Composer profiler.
Attributes:
batch (types.Batch): The batch. This will be the entire batch during the :attr:`.Event.AFTER_DATALOADER`, or a
microbatch between :attr:`.Event.BATCH_START` and :attr:`.Event.BATCH_END`.
current_metrics (Dict[str, Dict[str, Any]]): The current computed metrics, organized by dataloader label
and then by metric name. The train dataloader is labeled ``'train'``. If not using an :class:`.Evaluator`,
the eval dataloader is labeled ``'eval'``. Otherwise, the evaluator label is used.
For example:
>>> trainer = Trainer(
... ...,
... compute_training_metrics=True,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... )
>>> trainer.fit()
>>> trainer.state.current_metrics
{'train': {'Accuracy': tensor(...)}, 'eval': {'Accuracy': tensor(...)}}
Or, when using an :class:`.Evaluator`:
.. testsetup::
eval_1_dl = eval_dataloader
eval_2_dl = eval_dataloader
>>> from torchmetrics import Accuracy
>>> from composer.core import Evaluator
>>> trainer = Trainer(
... ...,
... compute_training_metrics=True,
... train_dataloader=train_dataloader,
... eval_dataloader=[
... Evaluator(label='eval1', dataloader=eval_1_dl, metrics=Accuracy()),
... Evaluator(label='eval2', dataloader=eval_2_dl, metrics=Accuracy()),
... ],
... )
>>> trainer.fit()
>>> trainer.state.current_metrics
{'train': {'Accuracy': tensor(...)}, 'eval1': {'Accuracy': tensor(...)}, 'eval2': {'Accuracy': tensor(...)}}
eval_timestamp (Timestamp): The timestamp for the current evaluation dataloader. This timestamp is reset
before the dataloader is evaluated. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
grad_accum (int): The number of gradient accumulation steps per batch.
loss (torch.Tensor | Sequence[torch.Tensor]): The most recently computed loss.
model (torch.nn.Module): The training model.
.. note::
When using DeepSpeed or multi-rank training, the model will be wrapped with
:class:`~deepspeed.DeepSpeedEngine` or :class:`~torch.nn.parallel.DistributedDataParallel`,
respectively.
outputs (torch.Tensor | Sequence[torch.Tensor]): The most recently computed output from the model's forward
pass.
predict_timestamp (Timestamp): The timestamp for the current prediction dataloader. This timestamp is reset
before the dataloader is used. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
profiler (Profiler): The profiler (if profiling is enabled), or ``None`` if not profiling.
rank_zero_seed (int): The seed of the rank zero process.
scaler (torch.cuda.amp.GradScaler): The gradient scaler if using mixed-precision training, or
``None`` if not using mixed-precision training.
serialized_attributes (List[str]): The names of the attribute which are serialized in a checkpoint.
By default, the following attributes are serialized:
+-----------------------+-------------------------------------------------------------+
| Attribute | Description |
+=======================+=============================================================+
| model | The model under training. |
+-----------------------+-------------------------------------------------------------+
| optimizers | The optimizers being used to train the model. |
+-----------------------+-------------------------------------------------------------+
| schedulers | The learning rate schedulers. |
+-----------------------+-------------------------------------------------------------+
| algorithms | The algorithms used for training. |
+-----------------------+-------------------------------------------------------------+
| callbacks | The callbacks used for training. |
+-----------------------+-------------------------------------------------------------+
| scaler | The gradient scaler in use for mixed precision training. |
+-----------------------+-------------------------------------------------------------+
| timestamp | The timestamp that tracks training loop progress. |
+-----------------------+-------------------------------------------------------------+
| rank_zero_seed | The seed of the rank zero process. |
+-----------------------+-------------------------------------------------------------+
| current_metrics | The current metrics. |
+-----------------------+-------------------------------------------------------------+
timestamp (Timestamp): The current training timestamp.
train_dataloader (Iterable): The training dataloader. (May be ``None`` if not training.)
"""
@property
def seed(self):
"""The seed for the current rank."""
return self.rank_zero_seed + dist.get_global_rank()
@property
def max_duration(self):
"""The maximum training duration."""
return self._max_duration
@max_duration.setter
def get_elapsed_duration(self) -> Optional[Time[float]]:
"""Get the elapsed training duration.
Returns:
Optional[Time[float]]: The elapsed duration, in :attr:`TimeUnit.DURATION`.
``Time(0.0, TimeUnit.DURATION)`` represents the beginning of training and ``Time(1.0, TimeUnit.DURATION)``
represents a completed training process. Returns ``None`` if ``max_duration`` is None.
"""
if self.max_duration is None:
return None
return self.timestamp.get(self.max_duration.unit) / self.max_duration
@property
def optimizers(self):
"""The optimizers."""
return self._optimizers
@optimizers.setter
@property
def schedulers(self):
"""The schedulers."""
return self._schedulers
@schedulers.setter
def batch_get_item(self, key: Union[str, int, Callable, Any]) -> Any:
"""Gets element from batch either specified by key or user-specified function.
See batch_get in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a
user-specified function to do the extracting. A pair of callables is also
supported for cases where a get and set function pair are both passed
(like in Algorithms). The getter is assumed to be the first of the pair.
Returns:
The part of the batch specified by the key. This could be any type
depending on what the batch is composed of.
"""
return batch_get(self.batch, key)
def batch_set_item(self, key: Union[str, int, Callable, Any], value: Any):
"""Sets the element specified by the key of the set_fn to the specified value.
This is not an in-place operation, as for tuple-typed batches, a new batch object
must be created to modify them.
See batch_set in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a user-specified
function to do the setting. A pair of callables is also supported for
cases where a get and set function pair are both passed (like in
Algorithms). The setter is assumed to be the second of the pair.
value (Any): The value that batch[key] or batch.key gets set to or that the
user-defined set function sets a part of the batch to.
Returns:
batch (Any): The updated batch with value set at key.
"""
self.batch = batch_set(self.batch, key=key, value=value)
@property
def callbacks(self):
"""The callbacks."""
return self._callbacks
@callbacks.setter
@property
def algorithms(self):
"""The algorithms."""
return self._algorithms
@algorithms.setter
@property
def evaluators(self):
"""The evaluators."""
return self._evaluators
@evaluators.setter
def load_model_state(self, state_dict: Dict[str, Any], strict: bool):
"""Loads the model's state from a ``state_dict``.
Args:
state_dict (Dict[str, Any]): The state dict, generated from a previous call to :meth:`state_dict`.
strict (bool): Whether the keys (i.e., model parameter names) in the model state dict should
perfectly match the keys in the model instance.
"""
if state_dict.get("is_model_ddp", False) and not self.is_model_ddp:
# This check is for backwards compatibility, as pre-v0.6.0 checkpoints serialized the state
# with the `module.` prefix
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict['model'], "module.")
missing_keys, unexpected_keys = self.model.load_state_dict(state_dict['model'], strict=strict)
if len(missing_keys) > 0:
logger.warning(f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}")
if len(unexpected_keys) > 0:
logger.warning(f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}")
def load_state_dict(self, state: Dict[str, Any], strict: bool = False):
"""Loads the state.
Args:
state (Dict[str, Any]): object returned from call to :meth:`state_dict`.
strict (bool): whether the keys in the ``state["model"]`` should perfectly match the keys in the
``self.model``. Defaults to False.
"""
state = _ensure_backwards_compatible_checkpointing(state)
for attribute_name, serialized_value in state.items():
if attribute_name not in self.serialized_attributes:
# it's possible some attributes we removed
continue
if attribute_name == "model":
self.load_model_state(state, strict=strict)
continue
state_field_value = getattr(self, attribute_name)
if attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
for target in ensure_tuple(state_field_value):
if type(target).__qualname__ not in serialized_value:
warnings.warn(
f"{type(target).__qualname__} is not in the state_dict. Its state will not be restored.",
category=UserWarning)
continue
source = serialized_value[type(target).__qualname__]
target.load_state_dict(source)
else:
# direct serialization
try:
setattr(self, attribute_name, serialized_value)
except AttributeError:
# ignore AttributeError for properties that have getters but not setters.
pass
@property
def dataloader(self):
"""The active dataloader."""
return self._dataloader
@property
def dataloader_label(self):
"""The dataloader label for the active dataloader.
By default, the training dataloader is called ``'train'``. The evaluator dataloader
is called ``'eval'``, or when multiple evaluators are used, the name of the evaluator.
However, the dataloader label can be explicitely specified in :meth:`.Trainer.fit`
and :meth:`.Trainer.eval`.
Returns:
Optional[str]: The dataloader label, or None if no dataloader is set.
"""
return self._dataloader_label
def set_dataloader(
self,
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
):
"""Update the active dataloader and dataloader label.
Args:
dataloader (Iterable, optional): The dataloader. Defaults to None.
dataloader_label (str, optional): The dataloader label. Must be ``None`` if and only if
``dataloader`` is None. Defaults to None.
dataloader_len (int, int): The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
Set to ``-1`` to iterate over the entire dataset. (Default: ``-1``.)
"""
if dataloader is None:
dataloader_label = None
else:
if dataloader_label is None:
raise ValueError("If the `dataloader` is specified, then `dataloader_label` must not be None.")
self._dataloader = dataloader
self._dataloader_label = dataloader_label
if dataloader is not None:
self.dataloader_len = dataloader_len # setting it to -1 will do a failsafe read of len(dataloader)
else:
self._dataloader_len = None
@property
def dataloader_len(self):
"""The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
.. note::
If not explicitely specified, this value is an approximation, as it depends on ``len(self.dataloader)``.
See the :doc:`PyTorch DataLoader Documentation <torch:data>` for more information.
Returns:
Optional[Time[int]]: The number of batches per dataloader iteration (e.g. epoch), or None if no dataloader
is defined or if the dataloader has an unknown length (e.g. streaming dataloaders).
"""
return self._dataloader_len
@dataloader_len.setter
@property
def precision(self):
"""The numerical precision to use for training.
See :class:`~.Precision` for the supported precisions.
"""
return self._precision
@precision.setter
@property
def is_model_deepspeed(self) -> bool:
"""Whether :attr:`model` is an instance of a :class:`~deepspeed.DeepSpeedEngine`."""
try:
import deepspeed
except ImportError:
return False
else:
return isinstance(self.model, deepspeed.DeepSpeedEngine)
@property
def is_model_ddp(self):
"""Whether :attr:`model` is an instance of a :class:`.DistributedDataParallel`."""
return isinstance(self.model, DistributedDataParallel)
@property
def deepspeed_model(self) -> deepspeed.DeepSpeedEngine:
"""Cast :attr:`model` to :class:`~deepspeed.DeepSpeedEngine`."""
if self.is_model_deepspeed:
return cast("deepspeed.DeepSpeedEngine", self.model)
raise TypeError("state.model is not a DeepSpeed model")
| 45.333333 | 123 | 0.603412 | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The state of the trainer."""
from __future__ import annotations
import collections.abc
import logging
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional, Sequence, Union, cast
import torch
import torch.nn.modules.utils
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from composer.core.precision import Precision
from composer.core.serializable import Serializable
from composer.core.time import Time, Timestamp, TimeUnit
from composer.utils import batch_get, batch_set, dist, ensure_tuple
if TYPE_CHECKING:
import deepspeed
import composer.core.types as types
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.evaluator import Evaluator
from composer.profiler import Profiler
__all__ = ["State"]
logger = logging.getLogger(__name__)
def _ensure_backwards_compatible_checkpointing(state_dict: Dict[str, Any]):
# v0.4.1 removed the leading underscores for the keys in the state_dict
# It also renamed _is_model_ddp_wrapped to is_model_ddp
state = {}
for k, v in state_dict.items():
if k == "_is_model_ddp_wrapped":
k = "is_model_ddp"
if k.startswith("_"):
k = k[1:]
state[k] = v
return state
_STATE_DICT_SERIALIZED_ATTRIBUTES = [
# List of attributes that are serialized with state_dict
# Only the attributes listed in state.serialized_attributes will actually be saved.
"model",
"optimizers",
"schedulers",
"algorithms",
"callbacks",
"scaler",
"timestamp",
]
class State(Serializable):
"""The state of the trainer.
Contains variables that the trainer tracks throughout the training loop. Note that all the necessary parts (i.e.,
:attr:`serialized_attributes`) of state are serialized when the trainer is checkpointed so that it can be used
restore the trainer and continue training from a checkpoint. :mod:`~composer.algorithms` are able to modify an
instance of this class in-place.
.. note::
An instance of this class is automatically constructed by the :class:`~.Trainer` constructor. A user need
not instantiate this class.
Args:
model (torch.nn.Module): The model, typically as a subclass of :class:`~.ComposerModel`.
rank_zero_seed (int): The seed used on the rank zero process. It is assumed that each rank's seed is
``rank_zero_seed + dist.get_global_rank()``.
grad_accum (int, optional): The number of gradient accumulation steps to use. With this argument, micro batch
size for each device becomes ``microbatch_size = train_batch_size / (num_devices * grad_accum)``.
train_dataloader (types.DataLoader, optional): Dataloader used for training
evaluators (Evalutor | Evaluators, optional): :class:`.Evaluator` used for evaluation.
dataloader (types.DataLoader, optional): The active DataLoader.
dataloader_len (int | Time[int], optional): The number of batches per dataloader iteration (e.g. epoch).
The trainer will yield the first ``dataloader_len`` batches per iteration. If ``-1`` (the default),
the entire dataloader will be iterated over.
dataloader_label (str, optional): The name for the dataloader. Required if ``dataloader`` is specified.
(default: ``None``)
By convention, the training dataloader is called ``'train'``. The evaluator dataloader is called
``'eval'``, or when multiple evaluators are used, the name of the evaluator.
max_duration (str | Time, optional): The maximum duration to train for. (default: ``None``)
precision (str | Precision): The numerical precision to use for training. See :class:`~.Precision` for
the supported precisions.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): The optimizer being used to
train the model. Multiple optimizers are not currently supported.
schedulers (types.PyTorchScheduler | Sequence[types.PyTorchScheduler], optional):
The learning rate scheduler (can also be a list or tuple of schedulers).
scaler (torch.cuda.amp.GradScaler, optional): The gradient scaler in use for mixed precision training.
algorithms (Algorithm | Sequence[Algorithm], optional): The algorithms used for training.
callbacks (Callback | Sequence[Callback], optional): The callbacks used for training.
profiler (Optional[Profiler]): The Composer profiler.
Attributes:
batch (types.Batch): The batch. This will be the entire batch during the :attr:`.Event.AFTER_DATALOADER`, or a
microbatch between :attr:`.Event.BATCH_START` and :attr:`.Event.BATCH_END`.
current_metrics (Dict[str, Dict[str, Any]]): The current computed metrics, organized by dataloader label
and then by metric name. The train dataloader is labeled ``'train'``. If not using an :class:`.Evaluator`,
the eval dataloader is labeled ``'eval'``. Otherwise, the evaluator label is used.
For example:
>>> trainer = Trainer(
... ...,
... compute_training_metrics=True,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... )
>>> trainer.fit()
>>> trainer.state.current_metrics
{'train': {'Accuracy': tensor(...)}, 'eval': {'Accuracy': tensor(...)}}
Or, when using an :class:`.Evaluator`:
.. testsetup::
eval_1_dl = eval_dataloader
eval_2_dl = eval_dataloader
>>> from torchmetrics import Accuracy
>>> from composer.core import Evaluator
>>> trainer = Trainer(
... ...,
... compute_training_metrics=True,
... train_dataloader=train_dataloader,
... eval_dataloader=[
... Evaluator(label='eval1', dataloader=eval_1_dl, metrics=Accuracy()),
... Evaluator(label='eval2', dataloader=eval_2_dl, metrics=Accuracy()),
... ],
... )
>>> trainer.fit()
>>> trainer.state.current_metrics
{'train': {'Accuracy': tensor(...)}, 'eval1': {'Accuracy': tensor(...)}, 'eval2': {'Accuracy': tensor(...)}}
eval_timestamp (Timestamp): The timestamp for the current evaluation dataloader. This timestamp is reset
before the dataloader is evaluated. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
grad_accum (int): The number of gradient accumulation steps per batch.
loss (torch.Tensor | Sequence[torch.Tensor]): The most recently computed loss.
model (torch.nn.Module): The training model.
.. note::
When using DeepSpeed or multi-rank training, the model will be wrapped with
:class:`~deepspeed.DeepSpeedEngine` or :class:`~torch.nn.parallel.DistributedDataParallel`,
respectively.
outputs (torch.Tensor | Sequence[torch.Tensor]): The most recently computed output from the model's forward
pass.
predict_timestamp (Timestamp): The timestamp for the current prediction dataloader. This timestamp is reset
before the dataloader is used. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
profiler (Profiler): The profiler (if profiling is enabled), or ``None`` if not profiling.
rank_zero_seed (int): The seed of the rank zero process.
scaler (torch.cuda.amp.GradScaler): The gradient scaler if using mixed-precision training, or
``None`` if not using mixed-precision training.
serialized_attributes (List[str]): The names of the attribute which are serialized in a checkpoint.
By default, the following attributes are serialized:
+-----------------------+-------------------------------------------------------------+
| Attribute | Description |
+=======================+=============================================================+
| model | The model under training. |
+-----------------------+-------------------------------------------------------------+
| optimizers | The optimizers being used to train the model. |
+-----------------------+-------------------------------------------------------------+
| schedulers | The learning rate schedulers. |
+-----------------------+-------------------------------------------------------------+
| algorithms | The algorithms used for training. |
+-----------------------+-------------------------------------------------------------+
| callbacks | The callbacks used for training. |
+-----------------------+-------------------------------------------------------------+
| scaler | The gradient scaler in use for mixed precision training. |
+-----------------------+-------------------------------------------------------------+
| timestamp | The timestamp that tracks training loop progress. |
+-----------------------+-------------------------------------------------------------+
| rank_zero_seed | The seed of the rank zero process. |
+-----------------------+-------------------------------------------------------------+
| current_metrics | The current metrics. |
+-----------------------+-------------------------------------------------------------+
timestamp (Timestamp): The current training timestamp.
train_dataloader (Iterable): The training dataloader. (May be ``None`` if not training.)
"""
def __init__(
self,
# model
model: torch.nn.Module,
# determinism
rank_zero_seed: int,
# stopping conditions
max_duration: Optional[Union[str, Time[int]]] = None,
# data configurations
grad_accum: int = 1,
# dataloaders
train_dataloader: Optional[Iterable] = None,
evaluators: Optional[Union[Evaluator, Sequence[Evaluator]]] = None,
# these track the current 'active' dataloader
# depending on train, eval, or others
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
# precision
precision: Union[str, Precision] = Precision.FP32,
# optimizers
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
# scaler
scaler: Optional[torch.cuda.amp.grad_scaler.GradScaler] = None,
# algorithms and callbacks
algorithms: Optional[Union[Algorithm, Sequence[Algorithm]]] = None,
callbacks: Optional[Union[Callback, Sequence[Callback]]] = None,
):
self.rank_zero_seed = rank_zero_seed
self.model = model
self.grad_accum = grad_accum
self._dataloader_len = None
self._dataloader = None
self._dataloader_label = None
self.set_dataloader(dataloader, dataloader_label, dataloader_len)
self._max_duration = None
self.max_duration = max_duration
self.train_dataloader = train_dataloader
self._evaluators = list(ensure_tuple(evaluators))
self.timestamp = Timestamp()
self.eval_timestamp = Timestamp()
self.predict_timestamp = Timestamp()
self._precision = Precision(precision)
if optimizers is None:
self._optimizers = []
else:
self._optimizers = list(ensure_tuple(optimizers))
self._schedulers = []
self.scaler = scaler
self._algorithms = list(ensure_tuple(algorithms))
self._callbacks = list(ensure_tuple(callbacks))
self.profiler: Optional[Profiler] = None
# Set defaults for transient variables (to make pyright happy)
self.batch: Any = None
self.loss: Union[torch.Tensor, Sequence[torch.Tensor]] = torch.Tensor()
self.outputs: Union[torch.Tensor, Sequence[torch.Tensor]] = torch.Tensor()
# These attributes will be serialized using .state_dict(), and loaded with .load_state_dict()
# All other attributes will not be serialized.
# For simplicity, omit the leading underscore for private attributes.
# For example, even though the optimizers are stored on the state
# as the "_optimizers" attribute, here we specify just "optimizers"
self.serialized_attributes = [
"model",
"optimizers",
"schedulers",
"algorithms",
"callbacks",
"scaler",
"timestamp",
"rank_zero_seed",
"current_metrics",
]
self.current_metrics: Dict[str, Dict[str, Any]] = {}
@property
def seed(self):
"""The seed for the current rank."""
return self.rank_zero_seed + dist.get_global_rank()
@property
def max_duration(self):
"""The maximum training duration."""
return self._max_duration
@max_duration.setter
def max_duration(self, max_duration: Optional[Union[str, Time[int]]]):
if max_duration is None:
self._max_duration = None
return
if isinstance(max_duration, str):
max_duration = cast(Time[int], Time.from_timestring(max_duration))
if max_duration.unit == TimeUnit.DURATION:
raise ValueError("TimeUnit.DURATION is not allowed as a unit for max_duration")
self._max_duration = max_duration
def get_elapsed_duration(self) -> Optional[Time[float]]:
"""Get the elapsed training duration.
Returns:
Optional[Time[float]]: The elapsed duration, in :attr:`TimeUnit.DURATION`.
``Time(0.0, TimeUnit.DURATION)`` represents the beginning of training and ``Time(1.0, TimeUnit.DURATION)``
represents a completed training process. Returns ``None`` if ``max_duration`` is None.
"""
if self.max_duration is None:
return None
return self.timestamp.get(self.max_duration.unit) / self.max_duration
@property
def optimizers(self):
"""The optimizers."""
return self._optimizers
@optimizers.setter
def optimizers(self, optimizers: Union[Optimizer, Sequence[Optimizer]]):
self._optimizers[:] = ensure_tuple(optimizers)
@property
def schedulers(self):
"""The schedulers."""
return self._schedulers
@schedulers.setter
def schedulers(self, schedulers: Union[types.PyTorchScheduler, Sequence[types.PyTorchScheduler]]):
self._schedulers[:] = ensure_tuple(schedulers)
def batch_get_item(self, key: Union[str, int, Callable, Any]) -> Any:
"""Gets element from batch either specified by key or user-specified function.
See batch_get in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a
user-specified function to do the extracting. A pair of callables is also
supported for cases where a get and set function pair are both passed
(like in Algorithms). The getter is assumed to be the first of the pair.
Returns:
The part of the batch specified by the key. This could be any type
depending on what the batch is composed of.
"""
return batch_get(self.batch, key)
def batch_set_item(self, key: Union[str, int, Callable, Any], value: Any):
"""Sets the element specified by the key of the set_fn to the specified value.
This is not an in-place operation, as for tuple-typed batches, a new batch object
must be created to modify them.
See batch_set in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a user-specified
function to do the setting. A pair of callables is also supported for
cases where a get and set function pair are both passed (like in
Algorithms). The setter is assumed to be the second of the pair.
value (Any): The value that batch[key] or batch.key gets set to or that the
user-defined set function sets a part of the batch to.
Returns:
batch (Any): The updated batch with value set at key.
"""
self.batch = batch_set(self.batch, key=key, value=value)
@property
def callbacks(self):
"""The callbacks."""
return self._callbacks
@callbacks.setter
def callbacks(self, callbacks: Sequence[Callback]):
self._callbacks[:] = callbacks
@property
def algorithms(self):
"""The algorithms."""
return self._algorithms
@algorithms.setter
def algorithms(self, algorithms: Sequence[Algorithm]):
self._algorithms[:] = algorithms
@property
def evaluators(self):
"""The evaluators."""
return self._evaluators
@evaluators.setter
def evaluators(self, evaluators: Union[Evaluator, Sequence[Evaluator]]):
self._evaluators[:] = list(ensure_tuple(evaluators))
def state_dict(self) -> Dict[str, Any]:
state_dict = {}
for attribute_name in self.serialized_attributes:
attribute_value = getattr(self, attribute_name)
if attribute_name == "model":
# Save model directly instead of by class name, since model may be wrapped by DistributedDataParallel
# If it is DDP wrapped, do not save the `module.` prefix, as that is an implmentation detail
model_state = attribute_value.state_dict()
if self.is_model_ddp:
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(model_state, "module.")
serialized_value = model_state
else:
if attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
serialized_value = {
type(obj).__qualname__: obj.state_dict() for obj in ensure_tuple(attribute_value)
}
else:
serialized_value = attribute_value
state_dict[attribute_name] = serialized_value
return state_dict
def load_model_state(self, state_dict: Dict[str, Any], strict: bool):
"""Loads the model's state from a ``state_dict``.
Args:
state_dict (Dict[str, Any]): The state dict, generated from a previous call to :meth:`state_dict`.
strict (bool): Whether the keys (i.e., model parameter names) in the model state dict should
perfectly match the keys in the model instance.
"""
if state_dict.get("is_model_ddp", False) and not self.is_model_ddp:
# This check is for backwards compatibility, as pre-v0.6.0 checkpoints serialized the state
# with the `module.` prefix
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict['model'], "module.")
missing_keys, unexpected_keys = self.model.load_state_dict(state_dict['model'], strict=strict)
if len(missing_keys) > 0:
logger.warning(f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}")
if len(unexpected_keys) > 0:
logger.warning(f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}")
def load_state_dict(self, state: Dict[str, Any], strict: bool = False):
"""Loads the state.
Args:
state (Dict[str, Any]): object returned from call to :meth:`state_dict`.
strict (bool): whether the keys in the ``state["model"]`` should perfectly match the keys in the
``self.model``. Defaults to False.
"""
state = _ensure_backwards_compatible_checkpointing(state)
for attribute_name, serialized_value in state.items():
if attribute_name not in self.serialized_attributes:
# it's possible some attributes we removed
continue
if attribute_name == "model":
self.load_model_state(state, strict=strict)
continue
state_field_value = getattr(self, attribute_name)
if attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
for target in ensure_tuple(state_field_value):
if type(target).__qualname__ not in serialized_value:
warnings.warn(
f"{type(target).__qualname__} is not in the state_dict. Its state will not be restored.",
category=UserWarning)
continue
source = serialized_value[type(target).__qualname__]
target.load_state_dict(source)
else:
# direct serialization
try:
setattr(self, attribute_name, serialized_value)
except AttributeError:
# ignore AttributeError for properties that have getters but not setters.
pass
@property
def dataloader(self):
"""The active dataloader."""
return self._dataloader
@property
def dataloader_label(self):
"""The dataloader label for the active dataloader.
By default, the training dataloader is called ``'train'``. The evaluator dataloader
is called ``'eval'``, or when multiple evaluators are used, the name of the evaluator.
However, the dataloader label can be explicitely specified in :meth:`.Trainer.fit`
and :meth:`.Trainer.eval`.
Returns:
Optional[str]: The dataloader label, or None if no dataloader is set.
"""
return self._dataloader_label
def set_dataloader(
self,
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
):
"""Update the active dataloader and dataloader label.
Args:
dataloader (Iterable, optional): The dataloader. Defaults to None.
dataloader_label (str, optional): The dataloader label. Must be ``None`` if and only if
``dataloader`` is None. Defaults to None.
dataloader_len (int, int): The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
Set to ``-1`` to iterate over the entire dataset. (Default: ``-1``.)
"""
if dataloader is None:
dataloader_label = None
else:
if dataloader_label is None:
raise ValueError("If the `dataloader` is specified, then `dataloader_label` must not be None.")
self._dataloader = dataloader
self._dataloader_label = dataloader_label
if dataloader is not None:
self.dataloader_len = dataloader_len # setting it to -1 will do a failsafe read of len(dataloader)
else:
self._dataloader_len = None
@property
def dataloader_len(self):
"""The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
.. note::
If not explicitely specified, this value is an approximation, as it depends on ``len(self.dataloader)``.
See the :doc:`PyTorch DataLoader Documentation <torch:data>` for more information.
Returns:
Optional[Time[int]]: The number of batches per dataloader iteration (e.g. epoch), or None if no dataloader
is defined or if the dataloader has an unknown length (e.g. streaming dataloaders).
"""
return self._dataloader_len
@dataloader_len.setter
def dataloader_len(self, num_batches: Union[int, Time[int]]):
if isinstance(num_batches, int):
num_batches = Time(num_batches, TimeUnit.BATCH)
if self._dataloader is None:
raise RuntimeError("`State.dataloader_len` cannot be set if the dataloader is not defined.")
try:
if isinstance(self._dataloader, collections.abc.Sized):
dataloader_len = len(self._dataloader)
else:
dataloader_len = None
except (TypeError, NotImplementedError):
dataloader_len = None
if dataloader_len is not None and num_batches >= 0 and int(num_batches) > dataloader_len:
warnings.warn((f"DataloaderNumBatchesWarning: The dataloader_len ({int(num_batches)}) "
f"is greater than the length (i.e. number of batches) of the dataloader, which is "
f"{dataloader_len}. State.dataloader_len is thus being set to {dataloader_len}."))
self._dataloader_len = Time(dataloader_len, TimeUnit.BATCH)
return
if num_batches < 0:
if dataloader_len is not None:
# len(dataloader) is an approximation -- see https://pytorch.org/docs/stable/data.html.
# However, in the worst case where additional last batches are dropped, this calculation should be
# an over-estimate, leading to the entire dataloader still being iterated over.
self._dataloader_len = Time(dataloader_len, TimeUnit.BATCH)
else:
# The dataloader length is unknown.
self._dataloader_len = None
return
self._dataloader_len = num_batches
@property
def precision(self):
"""The numerical precision to use for training.
See :class:`~.Precision` for the supported precisions.
"""
return self._precision
@precision.setter
def precision(self, precision: Union[str, Precision]):
self._precision = Precision(precision)
@property
def is_model_deepspeed(self) -> bool:
"""Whether :attr:`model` is an instance of a :class:`~deepspeed.DeepSpeedEngine`."""
try:
import deepspeed
except ImportError:
return False
else:
return isinstance(self.model, deepspeed.DeepSpeedEngine)
@property
def is_model_ddp(self):
"""Whether :attr:`model` is an instance of a :class:`.DistributedDataParallel`."""
return isinstance(self.model, DistributedDataParallel)
@property
def deepspeed_model(self) -> deepspeed.DeepSpeedEngine:
"""Cast :attr:`model` to :class:`~deepspeed.DeepSpeedEngine`."""
if self.is_model_deepspeed:
return cast("deepspeed.DeepSpeedEngine", self.model)
raise TypeError("state.model is not a DeepSpeed model")
| 7,355 | 0 | 285 |
7646da8211587d41de2444eceb7133af1f680277 | 2,017 | py | Python | Insurance-claim-prediction-/code.py | ANanade/ga-learner-dsmp-repo | ba5c06d039cfba6222998fccaca88e629c4bc3b8 | [
"MIT"
] | null | null | null | Insurance-claim-prediction-/code.py | ANanade/ga-learner-dsmp-repo | ba5c06d039cfba6222998fccaca88e629c4bc3b8 | [
"MIT"
] | null | null | null | Insurance-claim-prediction-/code.py | ANanade/ga-learner-dsmp-repo | ba5c06d039cfba6222998fccaca88e629c4bc3b8 | [
"MIT"
] | null | null | null | # --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
df.head()
X = df.iloc[:,: -1]
y = df.iloc[:,-1]
X_train, X_test , y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
q_value = X_train['bmi'].quantile(.95)
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children','sex','region','smoker']
# create subplot
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20,20))
# create loop for plotting countplot
for i in range(0,2):
for j in range(0,2):
col=cols[i*2 + j]
sns.countplot(x=X_train[col], hue=y_train, ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression()
grid = GridSearchCV(estimator = lr, param_grid= parameters)
grid.fit(X_train, y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score = roc_auc_score(y_test, y_pred)
y_pred_proba = grid.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_proba)
roc_auc = roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| 18.675926 | 93 | 0.693604 | # --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
df.head()
X = df.iloc[:,: -1]
y = df.iloc[:,-1]
X_train, X_test , y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
q_value = X_train['bmi'].quantile(.95)
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children','sex','region','smoker']
# create subplot
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20,20))
# create loop for plotting countplot
for i in range(0,2):
for j in range(0,2):
col=cols[i*2 + j]
sns.countplot(x=X_train[col], hue=y_train, ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression()
grid = GridSearchCV(estimator = lr, param_grid= parameters)
grid.fit(X_train, y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score = roc_auc_score(y_test, y_pred)
y_pred_proba = grid.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_proba)
roc_auc = roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| 0 | 0 | 0 |
068a5865811f611da85e29a92748a2b89f2e5df1 | 30 | py | Python | moneytracker/settings/__init__.py | alaasalman/moneytracker | cba9844f0abd31959e0f4b6343bc72e5235aa5b0 | [
"MIT"
] | null | null | null | moneytracker/settings/__init__.py | alaasalman/moneytracker | cba9844f0abd31959e0f4b6343bc72e5235aa5b0 | [
"MIT"
] | 10 | 2020-04-22T08:15:43.000Z | 2021-06-10T19:58:46.000Z | moneytracker/settings/__init__.py | alaasalman/moneytracker | cba9844f0abd31959e0f4b6343bc72e5235aa5b0 | [
"MIT"
] | null | null | null | from .local_settings import *
| 15 | 29 | 0.8 | from .local_settings import *
| 0 | 0 | 0 |
4350e6849654f43f7eec97135b1a5bf52c63877e | 1,148 | py | Python | applications/SwimmingDEMApplication/python_scripts/cellular_flow/make_mesh_ethier_benchmark_analysis.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | 1 | 2019-08-01T09:01:08.000Z | 2019-08-01T09:01:08.000Z | applications/SwimmingDEMApplication/python_scripts/cellular_flow/make_mesh_ethier_benchmark_analysis.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | null | null | null | applications/SwimmingDEMApplication/python_scripts/cellular_flow/make_mesh_ethier_benchmark_analysis.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | null | null | null | from KratosMultiphysics import Parameters
import ethier_benchmark_analysis
BaseAnalysis = ethier_benchmark_analysis.EthierBenchmarkAnalysis
| 49.913043 | 116 | 0.659408 | from KratosMultiphysics import Parameters
import ethier_benchmark_analysis
BaseAnalysis = ethier_benchmark_analysis.EthierBenchmarkAnalysis
class EthierBenchmarkMakeMeshAnalysis(BaseAnalysis):
def __init__(self, varying_parameters = Parameters("{}")):
BaseAnalysis.__init__(self, varying_parameters)
def SetBetaParameters(self):
BaseAnalysis.SetBetaParameters(self)
self.project_parameters.AddEmptyValue("pressure_grad_recovery_type")
self.project_parameters.AddEmptyValue("size_parameter").SetInt(1)
def ReadFluidModelParts(self):
from meshing import meshing_utilities
self.mesh_generator = meshing_utilities.ParallelepipedRegularMesher(
model_part_to_be_filled = self._GetFluidAnalysis().fluid_model_part,
lower_corner_coordinates = [0.0, 0.0, 0.0],
higher_corner_coordinates = [0.1, 0.1, 0.1],
number_of_divisions_per_dimension = 10)
self.mesh_generator.FillModelPartWithNewMesh()
| 874 | 31 | 103 |
5d2c8aedc393aa83eff5ac4eb489e80d1bc37329 | 496 | py | Python | src/pythae/models/beta_vae/beta_vae_config.py | clementchadebec/benchmark_VAE | 943e231f9e5dfa40b4eec14d4536f1c229ad9be1 | [
"Apache-2.0"
] | 143 | 2021-10-17T08:43:33.000Z | 2022-03-31T11:10:53.000Z | src/pythae/models/beta_vae/beta_vae_config.py | louis-j-vincent/benchmark_VAE | 943e231f9e5dfa40b4eec14d4536f1c229ad9be1 | [
"Apache-2.0"
] | 6 | 2022-01-21T17:40:09.000Z | 2022-03-16T13:09:22.000Z | src/pythae/models/beta_vae/beta_vae_config.py | louis-j-vincent/benchmark_VAE | 943e231f9e5dfa40b4eec14d4536f1c229ad9be1 | [
"Apache-2.0"
] | 18 | 2021-12-16T15:17:08.000Z | 2022-03-15T01:30:13.000Z | from pydantic.dataclasses import dataclass
from ..vae import VAEConfig
@dataclass
class BetaVAEConfig(VAEConfig):
r"""
:math:`\beta`-VAE model config config class
Parameters:
input_dim (tuple): The input_data dimension.
latent_dim (int): The latent space dimension. Default: None.
reconstruction_loss (str): The reconstruction loss to use ['bce', 'mse']. Default: 'mse'
beta (float): The balancing factor. Default: 1
"""
beta: float = 1.0
| 26.105263 | 96 | 0.671371 | from pydantic.dataclasses import dataclass
from ..vae import VAEConfig
@dataclass
class BetaVAEConfig(VAEConfig):
r"""
:math:`\beta`-VAE model config config class
Parameters:
input_dim (tuple): The input_data dimension.
latent_dim (int): The latent space dimension. Default: None.
reconstruction_loss (str): The reconstruction loss to use ['bce', 'mse']. Default: 'mse'
beta (float): The balancing factor. Default: 1
"""
beta: float = 1.0
| 0 | 0 | 0 |
0b064943d83f84d379f42996881c57015423c893 | 792 | py | Python | verify/checker/abc213/d.py | naskya/testcase-generator | 02765184a275152e1d8c177f2028ca8db315cfee | [
"MIT"
] | 4 | 2020-09-23T07:11:41.000Z | 2022-02-02T09:08:21.000Z | verify/checker/abc213/d.py | naskya/testcase-generator | 02765184a275152e1d8c177f2028ca8db315cfee | [
"MIT"
] | 5 | 2021-08-29T18:23:01.000Z | 2021-11-20T03:53:19.000Z | verify/checker/abc213/d.py | naskya/testcase-generator | 02765184a275152e1d8c177f2028ca8db315cfee | [
"MIT"
] | null | null | null | import queue
if __name__ == '__main__':
main()
| 18 | 50 | 0.508838 | import queue
def main() -> None:
N = int(input())
adjacency_list = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = map(int, input().split())
assert 1 <= a <= N
assert 1 <= b <= N
a -= 1
b -= 1
adjacency_list[a].append(b)
adjacency_list[b].append(a)
assert 2 <= N <= 100
q = queue.SimpleQueue()
q.put((0, -1))
visited = [False] * N
while not q.empty():
cur_node, prev_node = q.get()
visited[cur_node] = True
for next_node in adjacency_list[cur_node]:
if next_node == prev_node:
continue
assert not visited[next_node]
q.put((next_node, cur_node))
assert all(visited)
if __name__ == '__main__':
main()
| 715 | 0 | 23 |
10c82b231ac8964e241f873af59eb78a8abd5377 | 553 | py | Python | python/playground/Types/words.py | manojbhargavan/exercismPractice | c7ff1ea4647e78117d2bf93ef12c1cf8809805b0 | [
"MIT"
] | null | null | null | python/playground/Types/words.py | manojbhargavan/exercismPractice | c7ff1ea4647e78117d2bf93ef12c1cf8809805b0 | [
"MIT"
] | 6 | 2020-09-07T22:37:04.000Z | 2021-09-02T17:09:57.000Z | python/playground/Types/words.py | manojbhargavan/exercismPractice | c7ff1ea4647e78117d2bf93ef12c1cf8809805b0 | [
"MIT"
] | null | null | null | """This program is to fetch the words from an url online.
Example:
$python words.py "http://sixty-north.com/c/t.txt"
"""
import sys
from urllib.request import urlopen
# Do Something nice:
if __name__ == "__main__":
print_items(fetch_words(sys.argv[1]))
| 18.433333 | 57 | 0.654611 | """This program is to fetch the words from an url online.
Example:
$python words.py "http://sixty-north.com/c/t.txt"
"""
import sys
from urllib.request import urlopen
# Do Something nice:
def fetch_words(uri):
story = urlopen(uri)
story_words = []
for line in story:
for word in line.split():
story_words.append(word.decode("utf-8"))
story.close()
return story_words
def print_items(items):
for item in items:
print(item)
if __name__ == "__main__":
print_items(fetch_words(sys.argv[1]))
| 244 | 0 | 45 |
457be35d43522dd6a7b7be740a04ce2932d48a0b | 9,223 | py | Python | kubernetes/client/models/com_coreos_monitoring_v1_prometheus_status.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/com_coreos_monitoring_v1_prometheus_status.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/com_coreos_monitoring_v1_prometheus_status.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class ComCoreosMonitoringV1PrometheusStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'paused': 'bool',
'replicas': 'int',
'unavailable_replicas': 'int',
'updated_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'paused': 'paused',
'replicas': 'replicas',
'unavailable_replicas': 'unavailableReplicas',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, available_replicas=None, paused=None, replicas=None, unavailable_replicas=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
"""ComCoreosMonitoringV1PrometheusStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._paused = None
self._replicas = None
self._unavailable_replicas = None
self._updated_replicas = None
self.discriminator = None
self.available_replicas = available_replicas
self.paused = paused
self.replicas = replicas
self.unavailable_replicas = unavailable_replicas
self.updated_replicas = updated_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of available pods (ready for at least minReadySeconds) targeted by this Prometheus deployment. # noqa: E501
:return: The available_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of available pods (ready for at least minReadySeconds) targeted by this Prometheus deployment. # noqa: E501
:param available_replicas: The available_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and available_replicas is None: # noqa: E501
raise ValueError("Invalid value for `available_replicas`, must not be `None`") # noqa: E501
self._available_replicas = available_replicas
@property
def paused(self):
"""Gets the paused of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. # noqa: E501
:return: The paused of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""Sets the paused of this ComCoreosMonitoringV1PrometheusStatus.
Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. # noqa: E501
:param paused: The paused of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and paused is None: # noqa: E501
raise ValueError("Invalid value for `paused`, must not be `None`") # noqa: E501
self._paused = paused
@property
def replicas(self):
"""Gets the replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of non-terminated pods targeted by this Prometheus deployment (their labels match the selector). # noqa: E501
:return: The replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of non-terminated pods targeted by this Prometheus deployment (their labels match the selector). # noqa: E501
:param replicas: The replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def unavailable_replicas(self):
"""Gets the unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of unavailable pods targeted by this Prometheus deployment. # noqa: E501
:return: The unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._unavailable_replicas
@unavailable_replicas.setter
def unavailable_replicas(self, unavailable_replicas):
"""Sets the unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of unavailable pods targeted by this Prometheus deployment. # noqa: E501
:param unavailable_replicas: The unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and unavailable_replicas is None: # noqa: E501
raise ValueError("Invalid value for `unavailable_replicas`, must not be `None`") # noqa: E501
self._unavailable_replicas = unavailable_replicas
@property
def updated_replicas(self):
"""Gets the updated_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of non-terminated pods targeted by this Prometheus deployment that have the desired version spec. # noqa: E501
:return: The updated_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of non-terminated pods targeted by this Prometheus deployment that have the desired version spec. # noqa: E501
:param updated_replicas: The updated_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and updated_replicas is None: # noqa: E501
raise ValueError("Invalid value for `updated_replicas`, must not be `None`") # noqa: E501
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComCoreosMonitoringV1PrometheusStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComCoreosMonitoringV1PrometheusStatus):
return True
return self.to_dict() != other.to_dict()
| 38.429167 | 171 | 0.669522 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class ComCoreosMonitoringV1PrometheusStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'paused': 'bool',
'replicas': 'int',
'unavailable_replicas': 'int',
'updated_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'paused': 'paused',
'replicas': 'replicas',
'unavailable_replicas': 'unavailableReplicas',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, available_replicas=None, paused=None, replicas=None, unavailable_replicas=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
"""ComCoreosMonitoringV1PrometheusStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._paused = None
self._replicas = None
self._unavailable_replicas = None
self._updated_replicas = None
self.discriminator = None
self.available_replicas = available_replicas
self.paused = paused
self.replicas = replicas
self.unavailable_replicas = unavailable_replicas
self.updated_replicas = updated_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of available pods (ready for at least minReadySeconds) targeted by this Prometheus deployment. # noqa: E501
:return: The available_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of available pods (ready for at least minReadySeconds) targeted by this Prometheus deployment. # noqa: E501
:param available_replicas: The available_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and available_replicas is None: # noqa: E501
raise ValueError("Invalid value for `available_replicas`, must not be `None`") # noqa: E501
self._available_replicas = available_replicas
@property
def paused(self):
"""Gets the paused of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. # noqa: E501
:return: The paused of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""Sets the paused of this ComCoreosMonitoringV1PrometheusStatus.
Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. # noqa: E501
:param paused: The paused of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and paused is None: # noqa: E501
raise ValueError("Invalid value for `paused`, must not be `None`") # noqa: E501
self._paused = paused
@property
def replicas(self):
"""Gets the replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of non-terminated pods targeted by this Prometheus deployment (their labels match the selector). # noqa: E501
:return: The replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of non-terminated pods targeted by this Prometheus deployment (their labels match the selector). # noqa: E501
:param replicas: The replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def unavailable_replicas(self):
"""Gets the unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of unavailable pods targeted by this Prometheus deployment. # noqa: E501
:return: The unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._unavailable_replicas
@unavailable_replicas.setter
def unavailable_replicas(self, unavailable_replicas):
"""Sets the unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of unavailable pods targeted by this Prometheus deployment. # noqa: E501
:param unavailable_replicas: The unavailable_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and unavailable_replicas is None: # noqa: E501
raise ValueError("Invalid value for `unavailable_replicas`, must not be `None`") # noqa: E501
self._unavailable_replicas = unavailable_replicas
@property
def updated_replicas(self):
"""Gets the updated_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
Total number of non-terminated pods targeted by this Prometheus deployment that have the desired version spec. # noqa: E501
:return: The updated_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this ComCoreosMonitoringV1PrometheusStatus.
Total number of non-terminated pods targeted by this Prometheus deployment that have the desired version spec. # noqa: E501
:param updated_replicas: The updated_replicas of this ComCoreosMonitoringV1PrometheusStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and updated_replicas is None: # noqa: E501
raise ValueError("Invalid value for `updated_replicas`, must not be `None`") # noqa: E501
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComCoreosMonitoringV1PrometheusStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComCoreosMonitoringV1PrometheusStatus):
return True
return self.to_dict() != other.to_dict()
| 0 | 0 | 0 |
1202175b7bc05994d58b825c7983f9c5213b8a54 | 3,485 | py | Python | sast_controller/drivers/cx/CxManager.py | dovbiyi/reapsaw | 9b86c51e73de6d540468d6ed88e964c2811ba666 | [
"Apache-2.0"
] | 41 | 2019-08-07T12:45:42.000Z | 2021-08-31T18:22:00.000Z | sast_controller/drivers/cx/CxManager.py | dovbiyi/reapsaw | 9b86c51e73de6d540468d6ed88e964c2811ba666 | [
"Apache-2.0"
] | null | null | null | sast_controller/drivers/cx/CxManager.py | dovbiyi/reapsaw | 9b86c51e73de6d540468d6ed88e964c2811ba666 | [
"Apache-2.0"
] | 13 | 2019-08-06T23:08:22.000Z | 2022-02-16T12:55:39.000Z | # Copyright (c) 2018 Dow Jones & Company, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sast_controller.drivers.cx.Checkmarx import Checkmarx
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
INCREMENTAL_SCAN_ERROR = 'full scan should be submitted for accurate results'
NO_SOURCES_ERROR = 'worker failed to retrieve scan'
NO_SOURCES = "No supported files to scan in Checkmarx. \n Please find details by the link:\n " \
"https://checkmarx.atlassian.net/wiki" \
"/spaces/KC/pages/141328390/8.5.0+Supported+Code+Languages+and+Frameworks "
class CxIncrementalScanException(Exception):
"""Use when unable to start Checkmarx incremental scan"""
class CxNoSourceScanException(Exception):
"""Use when no supported files in zip"""
def scan_project(local_path=None, project=None, incremental_scan=False):
"""
Scan project using Checkmarx
:param local_path:
path to folder with project
:param project:
name of Checkmarx project
:param incremental_scan:
:return:
:raise: CxIncrementalScanException
if unable to start incremental scan
"""
cxClient = Checkmarx(project)
report = None
if not cxClient.valid:
cxClient.logger.critical("Invalid connection")
return report
response = cxClient.run_scan(local_path=local_path,
incremental_scan=incremental_scan)
if not response:
cxClient.logger.critical("No response")
return report
run_id = response.RunId
if run_id:
currently_running = None
scan_id = None
while currently_running != 'Finished':
scan = cxClient.get_status_of_single_run(run_id)
status = scan.CurrentStatus
currently_running = status
if currently_running == 'Finished':
cxClient.logger.info("Scan Finished")
try:
scan_id = scan.ScanId
except Exception:
cxClient.logger.critical(str(scan))
raise
if currently_running == 'Failed':
cxClient.logger.critical("Scan Failed")
if scan.StageMessage.find(NO_SOURCES_ERROR) > -1:
raise CxNoSourceScanException(NO_SOURCES)
cxClient.logger.critical(str(scan))
if str(scan).find(INCREMENTAL_SCAN_ERROR) > -1:
raise CxIncrementalScanException(str(scan))
break
if currently_running != "Failed":
report_id = cxClient.create_scan_report(scan_id).ID
while not cxClient.get_scan_report_status(report_id).IsReady:
cxClient.logger.info("Report generation in progress")
report = cxClient.get_scan_report(report_id)
return report
| 37.074468 | 96 | 0.657389 | # Copyright (c) 2018 Dow Jones & Company, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sast_controller.drivers.cx.Checkmarx import Checkmarx
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
INCREMENTAL_SCAN_ERROR = 'full scan should be submitted for accurate results'
NO_SOURCES_ERROR = 'worker failed to retrieve scan'
NO_SOURCES = "No supported files to scan in Checkmarx. \n Please find details by the link:\n " \
"https://checkmarx.atlassian.net/wiki" \
"/spaces/KC/pages/141328390/8.5.0+Supported+Code+Languages+and+Frameworks "
class CxIncrementalScanException(Exception):
"""Use when unable to start Checkmarx incremental scan"""
def __init__(self, message):
self.message = message
class CxNoSourceScanException(Exception):
"""Use when no supported files in zip"""
def __init__(self, message):
self.message = message
def scan_project(local_path=None, project=None, incremental_scan=False):
"""
Scan project using Checkmarx
:param local_path:
path to folder with project
:param project:
name of Checkmarx project
:param incremental_scan:
:return:
:raise: CxIncrementalScanException
if unable to start incremental scan
"""
cxClient = Checkmarx(project)
report = None
if not cxClient.valid:
cxClient.logger.critical("Invalid connection")
return report
response = cxClient.run_scan(local_path=local_path,
incremental_scan=incremental_scan)
if not response:
cxClient.logger.critical("No response")
return report
run_id = response.RunId
if run_id:
currently_running = None
scan_id = None
while currently_running != 'Finished':
scan = cxClient.get_status_of_single_run(run_id)
status = scan.CurrentStatus
currently_running = status
if currently_running == 'Finished':
cxClient.logger.info("Scan Finished")
try:
scan_id = scan.ScanId
except Exception:
cxClient.logger.critical(str(scan))
raise
if currently_running == 'Failed':
cxClient.logger.critical("Scan Failed")
if scan.StageMessage.find(NO_SOURCES_ERROR) > -1:
raise CxNoSourceScanException(NO_SOURCES)
cxClient.logger.critical(str(scan))
if str(scan).find(INCREMENTAL_SCAN_ERROR) > -1:
raise CxIncrementalScanException(str(scan))
break
if currently_running != "Failed":
report_id = cxClient.create_scan_report(scan_id).ID
while not cxClient.get_scan_report_status(report_id).IsReady:
cxClient.logger.info("Report generation in progress")
report = cxClient.get_scan_report(report_id)
return report
| 76 | 0 | 54 |
e1e51ce2618e62a2255d91a50390de24e140d42d | 537 | py | Python | app/urls.py | Hassanzadeh-sd/bookshop | 17b2efcc9a595f526628b5f31549214b02e300ab | [
"MIT"
] | 5 | 2019-06-15T16:27:30.000Z | 2021-12-23T00:07:21.000Z | app/urls.py | Hassanzadeh-sd/bookshop | 17b2efcc9a595f526628b5f31549214b02e300ab | [
"MIT"
] | null | null | null | app/urls.py | Hassanzadeh-sd/bookshop | 17b2efcc9a595f526628b5f31549214b02e300ab | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
app_name= "app"
urlpatterns = [
# ex : /book/
url(r'^$', views.Index, name="Index"),
url(r'^test$', views.test, name="test"),
# ex : /book/list
url(r'^list/$', views.Book_list, name="book-list"),
# ex : /book/1/
url(r'^(?P<pk>[0-9]+)/$', views.Book_detail, name="book-detail"),
# ex : /author/
url(r'^author/$', views.Author, name="author"),
# ex : /author/1/
url(r'^author/(?P<pk>[0-9]+)/$', views.Author_detail, name="author-detail"),
] | 29.833333 | 80 | 0.56797 | from django.conf.urls import url
from . import views
app_name= "app"
urlpatterns = [
# ex : /book/
url(r'^$', views.Index, name="Index"),
url(r'^test$', views.test, name="test"),
# ex : /book/list
url(r'^list/$', views.Book_list, name="book-list"),
# ex : /book/1/
url(r'^(?P<pk>[0-9]+)/$', views.Book_detail, name="book-detail"),
# ex : /author/
url(r'^author/$', views.Author, name="author"),
# ex : /author/1/
url(r'^author/(?P<pk>[0-9]+)/$', views.Author_detail, name="author-detail"),
] | 0 | 0 | 0 |
707a9a052f8ef6d57574c8a527e90e00122f4d9f | 3,408 | py | Python | app/views.py | ArRosid/learn-def | bd1bfaf32e12f758a0095b2ac259c5c5e5abf3f8 | [
"MIT"
] | 1 | 2021-10-01T11:13:19.000Z | 2021-10-01T11:13:19.000Z | app/views.py | ArRosid/learn-def | bd1bfaf32e12f758a0095b2ac259c5c5e5abf3f8 | [
"MIT"
] | null | null | null | app/views.py | ArRosid/learn-def | bd1bfaf32e12f758a0095b2ac259c5c5e5abf3f8 | [
"MIT"
] | 1 | 2022-02-13T16:49:50.000Z | 2022-02-13T16:49:50.000Z | from django.shortcuts import render
from rest_framework import status
from rest_framework import generics
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import (IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly)
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from . import models
from . import serializers
from .permissions import IsOwnArticleOrReadOnly
# @api_view(["GET"])
# def home(request):
# return Response({"message":"Welcome Home!"},
# status=status.HTTP_200_OK)
# class ArticleListCreateAPIView(APIView):
# def get(self, request):
# articles = models.Article.objects.all()
# serializer = serializers.ArticleSerializer(articles, many=True)
# return Response(serializer.data)
# def post(self, request):
# serializer = serializers.ArticleSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class ArticleDetailAPIView(APIView):
# def get(self, request, pk):
# article = models.Article.objects.get(id=pk)
# serializer = serializers.ArticleSerializer(article)
# return Response(serializer.data)
# def put(self, request, pk):
# article = models.Article.objects.get(id=pk)
# serializer = serializers.ArticleSerializer(article, request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def delete(self, request, pk):
# article = models.Article.objects.get(id=pk)
# article.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
# class ArticleListCreateView(generics.ListCreateAPIView):
# queryset = models.Article.objects.all()
# serializer_class = serializers.ArticleSerializer
# class ArticleDetailView(generics.RetrieveUpdateDestroyAPIView):
# queryset = models.Article.objects.all()
# serializer_class = serializers.ArticleSerializer
@csrf_exempt
@api_view(['POST']) | 38.292135 | 80 | 0.694836 | from django.shortcuts import render
from rest_framework import status
from rest_framework import generics
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import (IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly)
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from . import models
from . import serializers
from .permissions import IsOwnArticleOrReadOnly
# @api_view(["GET"])
# def home(request):
# return Response({"message":"Welcome Home!"},
# status=status.HTTP_200_OK)
# class ArticleListCreateAPIView(APIView):
# def get(self, request):
# articles = models.Article.objects.all()
# serializer = serializers.ArticleSerializer(articles, many=True)
# return Response(serializer.data)
# def post(self, request):
# serializer = serializers.ArticleSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class ArticleDetailAPIView(APIView):
# def get(self, request, pk):
# article = models.Article.objects.get(id=pk)
# serializer = serializers.ArticleSerializer(article)
# return Response(serializer.data)
# def put(self, request, pk):
# article = models.Article.objects.get(id=pk)
# serializer = serializers.ArticleSerializer(article, request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def delete(self, request, pk):
# article = models.Article.objects.get(id=pk)
# article.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
# class ArticleListCreateView(generics.ListCreateAPIView):
# queryset = models.Article.objects.all()
# serializer_class = serializers.ArticleSerializer
# class ArticleDetailView(generics.RetrieveUpdateDestroyAPIView):
# queryset = models.Article.objects.all()
# serializer_class = serializers.ArticleSerializer
@csrf_exempt
@api_view(['POST'])
def login(request):
username = request.data.get("username")
password = request.data.get("password")
if username is None and password is None:
return Response({'error':'Please provide both username & password'},
status=status.HTTP_400_BAD_REQUEST)
user = authenticate(username=username, password=password)
if not user:
return Response({'error': 'Invalid credentials'},
status=status.HTTP_404_NOT_FOUND)
token, _ = Token.objects.get_or_create(user=user)
return Response({'token':token.key},
status=status.HTTP_200_OK)
class ArticleViewSet(viewsets.ModelViewSet):
serializer_class = serializers.ArticleSerializer
queryset = models.Article.objects.all()
permission_classes = [IsAuthenticated, IsOwnArticleOrReadOnly] | 617 | 187 | 45 |
700c5078dc1131d8f41a18f860a42ec567e75b09 | 2,273 | py | Python | oshino/agents/__init__.py | CodersOfTheNight/oshino | 08e35d004aa16a378d87d5e548649a1bc1f5dc17 | [
"MIT"
] | 6 | 2016-11-06T17:47:57.000Z | 2020-04-08T12:20:59.000Z | oshino/agents/__init__.py | CodersOfTheNight/oshino | 08e35d004aa16a378d87d5e548649a1bc1f5dc17 | [
"MIT"
] | 24 | 2016-11-15T06:20:50.000Z | 2019-02-08T18:54:57.000Z | oshino/agents/__init__.py | CodersOfTheNight/oshino | 08e35d004aa16a378d87d5e548649a1bc1f5dc17 | [
"MIT"
] | null | null | null | import logging
from abc import ABC, abstractmethod
from oshino.util import current_ts, timer
| 25.829545 | 75 | 0.573251 | import logging
from abc import ABC, abstractmethod
from oshino.util import current_ts, timer
class Agent(ABC):
def __init__(self, cfg):
self._data = cfg
self._last_run = current_ts()
def on_start(self):
pass
def on_stop(self):
pass
@property
def name(self):
return self._data["name"].lower().replace(" ", "-")
@property
def prefix(self):
return "{0}.".format(self.name)
def get_logger(self):
return logging.getLogger(self.__class__.__name__)
@abstractmethod
async def process(self, event_fn):
"""
Each agent must implement this one to provide actual logic
"""
pass
def is_valid(self):
return "name" in self._data
async def pull_metrics(self, event_fn, loop=None):
"""
Method called by core.
Should not be overwritten.
"""
if self.lazy and not self.ready:
return None
logger = self.get_logger()
ts = timer()
# logger.trace("Waiting for process event")
result = await self.process(event_fn)
td = int(timer() - ts)
# logger.trace("It took: {}ms".format(td))
self._last_run = current_ts()
return result
@property
def lazy(self):
"""
Agents with flag `lazy` gives data when they want to,
not when they are requested for.
"""
return self._data.get("lazy", False)
@property
def ready(self):
"""
Function used when agent is `lazy`.
It is being processed only when `ready` condition is satisfied
"""
logger = self.get_logger()
now = current_ts()
# logger.trace("Current time: {0}".format(now))
# logger.trace("Last Run: {0}".format(self._last_run))
delta = (now - self._last_run)
# logger.trace("Delta: {0}, Interval: {1}"
# .format(delta, self.interval * 1000))
return delta > self.interval * 1000
@property
def interval(self):
"""
By default, lazy agents expects to have some kind of time interval.
`ready` output is calculated according to this interval.
"""
return self._data["interval"]
| 269 | 1,885 | 23 |
fca5ef6c92a374ed8e19842cabea3af8b90e1532 | 33,691 | py | Python | tflib.py | dgathogo/SealFaceRecognition | b68762b5a0874279386d27963c44ec8ba33569eb | [
"MIT"
] | 7 | 2019-06-19T03:45:18.000Z | 2021-07-25T17:37:29.000Z | tflib.py | dgathogo/SealFaceRecognition | b68762b5a0874279386d27963c44ec8ba33569eb | [
"MIT"
] | 12 | 2019-06-19T04:01:13.000Z | 2022-03-11T23:51:46.000Z | tflib.py | dgathogo/SealFaceRecognition | b68762b5a0874279386d27963c44ec8ba33569eb | [
"MIT"
] | 5 | 2019-06-12T15:28:23.000Z | 2021-01-19T10:32:11.000Z | """Functions for building tensorflow graph
"""
# MIT License
#
# Copyright (c) 2018 Debayan Deb
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
def average_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of gradients. The outer list is over different
towers. The inner list is over the gradient calculation in each tower.
Returns:
List of gradients where the gradient has been averaged across all towers.
"""
if len(tower_grads) == 1:
return tower_grads[0]
else:
average_grads = []
for grad_ in zip(*tower_grads):
# Note that each grad looks like the following:
# (grad0_gpu0, ... , grad0_gpuN)
average_grad = None if grad_[0]==None else average_tensors(grad_)
average_grads.append(average_grad)
return average_grads
def collect_watch_list():
'''Collect the variables in watch list.
Tensors or Varialbes can be addto collection 'watchlist' with
the type of tuple ('name', var/tensor). The functions collects
them and returns a dict for evaluate
'''
watch_list = {}
for pair in tf.get_collection('watch_list'):
watch_list[pair[0]] = pair[1]
return watch_list
def restore_model(sess, var_list, model_dir, restore_scopes=None):
''' Load the variable values from a checkpoint file into pre-defined graph.
Filter the variables so that they contain at least one of the given keywords.'''
with sess.graph.as_default():
if restore_scopes is not None:
var_list = [var for var in var_list if any([scope in var.name for scope in restore_scopes])]
model_dir = os.path.expanduser(model_dir)
ckpt_file = tf.train.latest_checkpoint(model_dir)
print('Restoring variables from %s ...' % ckpt_file)
saver = tf.train.Saver(var_list)
saver.restore(sess, ckpt_file)
def load_model(sess, model_path, scope=None):
''' Load the the graph and variables values from a model path.
Model path is either a a frozen graph or a directory with both
a .meta file and checkpoint files.'''
with sess.graph.as_default():
model_path = os.path.expanduser(model_path)
if (os.path.isfile(model_path)):
# Frozen grpah
print('Model filename: %s' % model_path)
with gfile.FastGFile(model_path,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
else:
# Load grapha and variables separatedly.
meta_files = [file for file in os.listdir(model_path) if file.endswith('.meta')]
assert len(meta_files) == 1
meta_file = os.path.join(model_path, meta_files[0])
ckpt_file = tf.train.latest_checkpoint(model_path)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(meta_file, clear_devices=True, import_scope=scope)
saver.restore(sess, ckpt_file)
def euclidean_distance(X, Y, sqrt=False):
'''Compute the distance between each X and Y.
Args:
X: a (m x d) tensor
Y: a (d x n) tensor
Returns:
diffs: an m x n distance matrix.
'''
with tf.name_scope('EuclideanDistance'):
XX = tf.reduce_sum(tf.square(X), 1, keep_dims=True)
YY = tf.reduce_sum(tf.square(Y), 0, keep_dims=True)
XY = tf.matmul(X, Y)
diffs = XX + YY - 2*XY
if sqrt == True:
diffs = tf.sqrt(tf.maximum(0.0, diffs))
return diffs
| 45.775815 | 111 | 0.650381 | """Functions for building tensorflow graph
"""
# MIT License
#
# Copyright (c) 2018 Debayan Deb
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
def average_tensors(tensors, name=None):
if len(tensors) == 1:
return tf.identity(tensors[0], name=name)
else:
# Each tensor in the list should be of the same size
expanded_tensors = []
for t in tensors:
expanded_t = tf.expand_dims(t, 0)
expanded_tensors.append(expanded_t)
average_tensor = tf.concat(axis=0, values=expanded_tensors)
average_tensor = tf.reduce_mean(average_tensor, 0, name=name)
return average_tensor
def average_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of gradients. The outer list is over different
towers. The inner list is over the gradient calculation in each tower.
Returns:
List of gradients where the gradient has been averaged across all towers.
"""
if len(tower_grads) == 1:
return tower_grads[0]
else:
average_grads = []
for grad_ in zip(*tower_grads):
# Note that each grad looks like the following:
# (grad0_gpu0, ... , grad0_gpuN)
average_grad = None if grad_[0]==None else average_tensors(grad_)
average_grads.append(average_grad)
return average_grads
def apply_gradient(update_gradient_vars, grads, optimizer, learning_rate, learning_rate_multipliers=None):
assert(len(grads)==len(update_gradient_vars))
if learning_rate_multipliers is None: learning_rate_multipliers = {}
# Build a dictionary to save multiplier config
# format -> {scope_name: ((grads, vars), lr_multi)}
learning_rate_dict = {}
learning_rate_dict['__default__'] = ([], 1.0)
for scope, multiplier in learning_rate_multipliers.items():
assert scope != '__default__'
learning_rate_dict[scope] = ([], multiplier)
# Scan all the variables, insert into dict
scopes = learning_rate_dict.keys()
for var, grad in zip(update_gradient_vars, grads):
count = 0
scope_temp = ''
for scope in scopes:
if scope in var.name:
scope_temp = scope
count += 1
assert count <= 1, "More than one multiplier scopes appear in variable: %s" % var.name
if count == 0: scope_temp = '__default__'
if grad is not None:
learning_rate_dict[scope_temp][0].append((grad, var))
# Build a optimizer for each multiplier scope
apply_gradient_ops = []
print('\nLearning rate multipliers:')
for scope, scope_content in learning_rate_dict.items():
scope_grads_vars, multiplier = scope_content
print('%s:\n # variables: %d\n lr_multi: %f' % (scope, len(scope_grads_vars), multiplier))
if len(scope_grads_vars) == 0:
continue
scope_learning_rate = multiplier * learning_rate
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(scope_learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(scope_learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt = tf.train.AdamOptimizer(scope_learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(scope_learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt = tf.train.MomentumOptimizer(scope_learning_rate, 0.9, use_nesterov=False)
elif optimizer=='SGD':
opt = tf.train.GradientDescentOptimizer(scope_learning_rate)
else:
raise ValueError('Invalid optimization algorithm')
apply_gradient_ops.append(opt.apply_gradients(scope_grads_vars))
print('')
apply_gradient_op = tf.group(*apply_gradient_ops)
return apply_gradient_op
def rank_accuracy(logits, label, batch_size, k=1):
_, arg_top = tf.nn.top_k(logits, k)
label = tf.cast(label, tf.int32)
label = tf.reshape(label, [batch_size, 1])
label = tf.tile(label, [1, k])
correct = tf.reduce_any(tf.equal(label, arg_top), axis=1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return accuracy
def collect_watch_list():
'''Collect the variables in watch list.
Tensors or Varialbes can be addto collection 'watchlist' with
the type of tuple ('name', var/tensor). The functions collects
them and returns a dict for evaluate
'''
watch_list = {}
for pair in tf.get_collection('watch_list'):
watch_list[pair[0]] = pair[1]
return watch_list
def save_model(sess, saver, model_dir, global_step):
with sess.graph.as_default():
checkpoint_path = os.path.join(model_dir, 'ckpt')
metagraph_path = os.path.join(model_dir, 'graph.meta')
print('Saving variables...')
saver.save(sess, checkpoint_path, global_step=global_step, write_meta_graph=False)
if not os.path.exists(metagraph_path):
print('Saving metagraph...')
saver.export_meta_graph(metagraph_path)
def restore_model(sess, var_list, model_dir, restore_scopes=None):
''' Load the variable values from a checkpoint file into pre-defined graph.
Filter the variables so that they contain at least one of the given keywords.'''
with sess.graph.as_default():
if restore_scopes is not None:
var_list = [var for var in var_list if any([scope in var.name for scope in restore_scopes])]
model_dir = os.path.expanduser(model_dir)
ckpt_file = tf.train.latest_checkpoint(model_dir)
print('Restoring variables from %s ...' % ckpt_file)
saver = tf.train.Saver(var_list)
saver.restore(sess, ckpt_file)
def load_model(sess, model_path, scope=None):
''' Load the the graph and variables values from a model path.
Model path is either a a frozen graph or a directory with both
a .meta file and checkpoint files.'''
with sess.graph.as_default():
model_path = os.path.expanduser(model_path)
if (os.path.isfile(model_path)):
# Frozen grpah
print('Model filename: %s' % model_path)
with gfile.FastGFile(model_path,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
else:
# Load grapha and variables separatedly.
meta_files = [file for file in os.listdir(model_path) if file.endswith('.meta')]
assert len(meta_files) == 1
meta_file = os.path.join(model_path, meta_files[0])
ckpt_file = tf.train.latest_checkpoint(model_path)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(meta_file, clear_devices=True, import_scope=scope)
saver.restore(sess, ckpt_file)
def euclidean_distance(X, Y, sqrt=False):
'''Compute the distance between each X and Y.
Args:
X: a (m x d) tensor
Y: a (d x n) tensor
Returns:
diffs: an m x n distance matrix.
'''
with tf.name_scope('EuclideanDistance'):
XX = tf.reduce_sum(tf.square(X), 1, keep_dims=True)
YY = tf.reduce_sum(tf.square(Y), 0, keep_dims=True)
XY = tf.matmul(X, Y)
diffs = XX + YY - 2*XY
if sqrt == True:
diffs = tf.sqrt(tf.maximum(0.0, diffs))
return diffs
def cosine_softmax(prelogits, label, num_classes, weight_decay, gamma=16.0, reuse=None):
nrof_features = prelogits.shape[1].value
with tf.variable_scope('Logits', reuse=reuse):
weights = tf.get_variable('weights', shape=(nrof_features, num_classes),
regularizer=slim.l2_regularizer(weight_decay),
initializer=slim.xavier_initializer(),
# initializer=tf.truncated_normal_initializer(stddev=0.1),
dtype=tf.float32)
alpha = tf.get_variable('alpha', shape=(),
regularizer=slim.l2_regularizer(1e-2),
initializer=tf.constant_initializer(1.00),
trainable=True,
dtype=tf.float32)
weights_normed = tf.nn.l2_normalize(weights, dim=0)
prelogits_normed = tf.nn.l2_normalize(prelogits, dim=1)
if gamma == 'auto':
gamma = tf.nn.softplus(alpha)
else:
assert type(gamma) == float
gamma = tf.constant(gamma)
logits = gamma * tf.matmul(prelogits_normed, weights_normed)
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\
labels=label, logits=logits), name='cross_entropy')
tf.summary.scalar('gamma', gamma)
tf.add_to_collection('watch_list', ('gamma', gamma))
return logits, cross_entropy
def norm_loss(prelogits, alpha, reuse=None):
with tf.variable_scope('NormLoss', reuse=reuse):
sigma = tf.get_variable('sigma', shape=(),
# regularizer=slim.l2_regularizer(weight_decay),
initializer=tf.constant_initializer(0.1),
trainable=True,
dtype=tf.float32)
prelogits_norm = tf.reduce_sum(tf.square(prelogits), axis=1)
# norm_loss = alpha * tf.square(tf.sqrt(prelogits_norm) - sigma)
norm_loss = alpha * prelogits_norm
norm_loss = tf.reduce_mean(norm_loss, axis=0, name='norm_loss')
# tf.summary.scalar('sigma', sigma)
# tf.add_to_collection('watch_list', ('sigma', sigma))
return norm_loss
def angular_softmax(prelogits, label, num_classes, global_step,
m, lamb_min, lamb_max, weight_decay, reuse=None):
num_features = prelogits.shape[1].value
batch_size = tf.shape(prelogits)[0]
lamb_min = lamb_min
lamb_max = lamb_max
lambda_m_theta = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2.0*(x**2) - 1.0,
lambda x: 4.0*(x**3) - 3.0*x,
lambda x: 8.0*(x**4) - 8.0*(x**2) + 1.0,
lambda x: 16.0*(x**5) - 20.0*(x**3) + 5.0*x
]
with tf.variable_scope('AngularSoftmax', reuse=reuse):
weights = tf.get_variable('weights', shape=(num_features, num_classes),
regularizer=slim.l2_regularizer(1e-4),
initializer=slim.xavier_initializer(),
# initializer=tf.truncated_normal_initializer(stddev=0.1),
trainable=True,
dtype=tf.float32)
lamb = tf.get_variable('lambda', shape=(),
initializer=tf.constant_initializer(lamb_max),
trainable=False,
dtype=tf.float32)
prelogits_norm = tf.sqrt(tf.reduce_sum(tf.square(prelogits), axis=1, keep_dims=True))
weights_normed = tf.nn.l2_normalize(weights, dim=0)
prelogits_normed = tf.nn.l2_normalize(prelogits, dim=1)
# Compute cosine and phi
cos_theta = tf.matmul(prelogits_normed, weights_normed)
cos_theta = tf.minimum(1.0, tf.maximum(-1.0, cos_theta))
theta = tf.acos(cos_theta)
cos_m_theta = lambda_m_theta[m](cos_theta)
k = tf.floor(m*theta / 3.14159265)
phi_theta = tf.pow(-1.0, k) * cos_m_theta - 2.0 * k
cos_theta = cos_theta * prelogits_norm
phi_theta = phi_theta * prelogits_norm
lamb_new = tf.maximum(lamb_min, lamb_max/(1.0+0.1*tf.cast(global_step, tf.float32)))
update_lamb = tf.assign(lamb, lamb_new)
# Compute loss
with tf.control_dependencies([update_lamb]):
label_dense = tf.one_hot(label, num_classes, dtype=tf.float32)
logits = cos_theta
logits -= label_dense * cos_theta * 1.0 / (1.0+lamb)
logits += label_dense * phi_theta * 1.0 / (1.0+lamb)
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\
labels=label, logits=logits), name='cross_entropy')
tf.add_to_collection('watch_list', ('lamb', lamb))
return cross_entropy
def split_softmax(prelogits, label, num_classes,
global_step, weight_decay, gamma=16.0, reuse=None):
nrof_features = prelogits.shape[1].value
batch_size = tf.shape(prelogits)[0]
with tf.variable_scope('SplitSoftmax', reuse=reuse):
weights = tf.get_variable('weights', shape=(num_classes, nrof_features),
regularizer=slim.l2_regularizer(weight_decay),
initializer=slim.xavier_initializer(),
# initializer=tf.truncated_normal_initializer(stddev=0.1),
# initializer=tf.constant_initializer(0),
trainable=True,
dtype=tf.float32)
alpha = tf.get_variable('alpha', shape=(),
regularizer=slim.l2_regularizer(1e-2),
initializer=tf.constant_initializer(1.00),
trainable=True,
dtype=tf.float32)
beta = tf.get_variable('beta', shape=(),
# regularizer=slim.l2_regularizer(1e-2),
initializer=tf.constant_initializer(0.0),
trainable=True,
dtype=tf.float32)
sigma = tf.get_variable('sigma', shape=(),
regularizer=slim.l2_regularizer(1e-1),
initializer=tf.constant_initializer(1.0),
trainable=True,
dtype=tf.float32)
threshold_pos = tf.get_variable('threshold_pos', shape=(),
initializer=tf.constant_initializer(16.0),
trainable=False,
dtype=tf.float32)
threshold_neg = tf.get_variable('threshold_neg', shape=(),
initializer=tf.constant_initializer(0.0),
trainable=False,
dtype=tf.float32)
# Normalizing the vecotors
weights_normed = tf.nn.l2_normalize(weights, dim=1)
prelogits_normed = tf.nn.l2_normalize(prelogits, dim=1)
# weights_normed = weights
# prelogits_normed = prelogits
# Caluculate Centers
centers, label_center, center_idx, center_weight = centers_by_label(prelogits_normed, label)
centers = tf.gather(centers, center_idx)
centers_normed = tf.nn.l2_normalize(centers, dim=1)
coef = 1.0
# Label and logits between batch and examplars
label_mat_glob = tf.one_hot(label, num_classes, dtype=tf.float32)
label_mask_pos_glob = tf.cast(label_mat_glob, tf.bool)
label_mask_neg_glob = tf.logical_not(label_mask_pos_glob)
# label_exp_batch = tf.expand_dims(label, 1)
# label_exp_glob = tf.expand_dims(label_history, 1)
# label_mat_glob = tf.equal(label_exp_batch, tf.transpose(label_exp_glob))
# label_mask_pos_glob = tf.cast(label_mat_glob, tf.bool)
# label_mask_neg_glob = tf.logical_not(label_mat_glob)
# dist_mat_glob = euclidean_distance(prelogits_normed, tf.transpose(weights_normed), False)
dist_mat_glob = tf.matmul(prelogits_normed, tf.transpose(weights_normed)) # + beta
dist_pos_glob = tf.boolean_mask(dist_mat_glob, label_mask_pos_glob)
dist_neg_glob = tf.boolean_mask(dist_mat_glob, label_mask_neg_glob)
logits_glob = coef * dist_mat_glob
logits_pos_glob = tf.boolean_mask(logits_glob, label_mask_pos_glob)
logits_neg_glob = tf.boolean_mask(logits_glob, label_mask_neg_glob)
# Label and logits within batch
label_exp_batch = tf.expand_dims(label, 1)
label_mat_batch = tf.equal(label_exp_batch, tf.transpose(label_exp_batch))
label_mask_pos_batch = tf.cast(label_mat_batch, tf.bool)
label_mask_neg_batch = tf.logical_not(label_mask_pos_batch)
mask_non_diag = tf.logical_not(tf.cast(tf.eye(batch_size), tf.bool))
label_mask_pos_batch = tf.logical_and(label_mask_pos_batch, mask_non_diag)
# dist_mat_batch = euclidean_distance(prelogits_normed, tf.transpose(prelogits_normed), False)
dist_mat_batch = tf.matmul(prelogits_normed, tf.transpose(prelogits_normed))
dist_pos_batch = tf.boolean_mask(dist_mat_batch, label_mask_pos_batch)
dist_neg_batch = tf.boolean_mask(dist_mat_batch, label_mask_neg_batch)
logits_batch = coef * dist_mat_batch
logits_pos_batch = tf.boolean_mask(logits_batch, label_mask_pos_batch)
logits_neg_batch = tf.boolean_mask(logits_batch, label_mask_neg_batch)
# num_anchor = 32
# prelogits_anchor = tf.reshape(prelogits_normed[:num_anchor], [num_anchor, 1, nrof_features])
# prelogits_refer = tf.reshape(prelogits_normed[num_anchor:], [num_anchor, -1, nrof_features])
# dist_anchor = tf.reduce_sum(tf.square(prelogits_anchor-prelogits_refer), axis=2)
# dist_anchor = tf.reshape(dist_anchor, [-1])
# logits_anchor = -0.5 * gamma * dist_anchor
logits_pos = logits_pos_glob
logits_neg = logits_neg_glob
dist_pos = dist_pos_glob
dist_neg = dist_neg_glob
# epsilon_trsd = 0.3
t_pos = coef * (threshold_pos)
t_neg = coef * (threshold_neg)
if gamma == 'auto':
# gamma = tf.nn.softplus(alpha)
gamma = tf.log(tf.exp(1.0) + tf.exp(alpha))
elif type(gamma) == tuple:
t_min, decay = gamma
epsilon = 1e-5
t = t_min + 1.0/(epsilon + decay*tf.cast(global_step, tf.float32))
gamma = 1.0 / t
else:
assert type(gamma) == float
gamma = tf.constant(gamma)
hinge_loss = lambda x: tf.nn.relu(1.0 + x)
margin_func = hinge_loss
# Losses
losses = []
# num_pos = tf.cast(0.95 * tf.cast(tf.size(logits_pos), tf.float32), tf.int32)
# # num_neg = tf.cast(0.75 * tf.cast(tf.size(logits_neg), tf.float32), tf.int32)
# q_d = tf.pow(tf.sqrt(dist_neg), 2-nrof_features)*tf.pow(1-0.25*dist_neg, (3-nrof_features)/2)
# tf.add_to_collection('watch_list', ('q_d', tf.reduce_sum(q_d)))
# q_d = tf.minimum(1.0, 1 * q_d / tf.reduce_sum(q_d))
# tf.add_to_collection('watch_list', ('q_d', tf.reduce_mean(q_d)))
# sample_mask = tf.random_uniform(shape=tf.shape(logits_neg)) <= q_d
# sample_mask = logits_neg >= tf.reduce_min(logits_pos)
# _logits_neg = tf.boolean_mask(logits_neg, sample_mask)
# tf.add_to_collection('watch_list', ('sample_ratio',
# tf.cast(tf.size(_logits_neg),tf.float32) / tf.cast(tf.size(logits_neg),tf.float32)))
# gamma2 = 1 / 0.01
_logits_pos = tf.reshape(logits_pos, [batch_size, -1])
_logits_neg = tf.reshape(logits_neg, [batch_size, -1])
norm = tf.square(tf.reduce_sum(tf.square(prelogits), axis=1, keep_dims=True))
norm_weights = tf.norm(tf.gather(weights, label), axis=1, keep_dims=True)
t_pos = (beta)
t_neg = (beta)
_logits_pos = _logits_pos * gamma
_logits_neg = _logits_neg * gamma
# _logits_neg, _ = tf.nn.top_k(_logits_neg, num_neg)
# _logits_pos, _ = tf.nn.top_k(_logits_pos, num_pos)
# _logits_neg = tf.boolean_mask(_logits_neg, sample_mask)
# _logits_pos = -tf.reduce_logsumexp(-_logits_pos)# , axis=1)[:,None]
_logits_neg = tf.reduce_logsumexp(_logits_neg, axis=1)[:,None]
# _logits_pos = tf.reduce_mean(_logits_pos)
#-- Simulate Ranking
# se_neg = tf.reduce_sum(tf.exp(_logits_neg))
# min_pos = tf.reduce_min(_logits_pos)
# t_pos = tf.stop_gradient(tf.log(se_neg))
# t_neg = tf.stop_gradient(tf.log(se_neg - tf.exp(_logits_neg)))
# norm = tf.reshape(prelogits[:,-1], [batch_size, -1])
# norm_weighted = tf.exp(-norm)
# norm_weighted = norm / tf.reduce_sum(norm) * tf.cast(tf.size(norm), tf.float32)
# sigma_batch = tf.reshape(tf.gather(sigma, label), [batch_size, -1])
m = 5.0
# tf.add_to_collection('watch_list', ('m',m))
factor = 1 / tf.cast(batch_size, tf.float32)
bias = tf.log(tf.cast(num_classes, tf.float32))
loss_pos = tf.nn.relu(m + _logits_neg - _logits_pos) * 0.5
loss_neg = tf.nn.relu(m + _logits_neg - _logits_pos) * 0.5
loss = tf.reduce_mean((loss_pos + loss_neg), name='split_loss')
losses.extend([loss])
tf.add_to_collection('watch_list', ('split_loss', loss))
# Global loss
# weights_batch = tf.gather(weights_normed, label)
# _logits_pos_glob = tf.reduce_sum(tf.square(prelogits_normed - weights_batch), axis=1) * coef * gamma
_logits_pos_glob = tf.reshape(logits_pos_glob, [batch_size, -1]) * gamma
_logits_neg_glob = tf.reshape(logits_neg_glob, [batch_size, -1]) * gamma
_logits_neg_glob = tf.reduce_logsumexp(_logits_neg_glob) # , axis=1)[:,None]
loss_glob = tf.reduce_mean(tf.nn.relu(1 + _logits_neg_glob - _logits_pos_glob), name='loss_glob')
# losses.append(loss_glob)
# tf.add_to_collection('watch_list', ('loss_glob', loss_glob))
# Weight decay
loss_weight = tf.reduce_sum( 1e-7 * tf.square(weights_normed), name='loss_weight')
# losses.append(loss_weight)
# tf.add_to_collection('watch_list', ('loss_weight', loss_weight))
# Split Softmax
# _logits_pos_glob = tf.reshape(logits_pos_glob, [batch_size, -1]) * gamma
# _logits_neg_glob = tf.reshape(logits_neg_glob, [batch_size, -1]) * gamma
# _logits_pos_glob = tf.log(tf.reduce_sum(tf.exp(_logits_pos_glob) + num_classes-1, axis=1)[:,None])
# _logits_neg_glob = tf.reduce_logsumexp(_logits_neg_glob, axis=1)[:,None]
# _t_pos = t_pos * gamma
# _t_neg = t_neg * gamma
# loss_pos = tf.reduce_mean(tf.nn.softplus(_t_pos - _logits_pos_glob), name='loss_pos')
# loss_neg = tf.reduce_mean(tf.nn.softplus(_logits_neg_glob - _t_neg), name='loss_neg')
# losses.extend([loss_pos, loss_neg])
# Batch Center loss
# centers_batch = tf.gather(centers, center_idx)
centers_batch = tf.gather(weights_normed, label)
dist_center = tf.reduce_sum(tf.square(prelogits_normed - centers_batch), axis=1)
loss_center = tf.reduce_mean(1.0*dist_center, name='loss_center')
# losses.append(loss_center)
# tf.add_to_collection('watch_list', ('loss_center', loss_center))
# Update threshold
if not threshold_pos in tf.trainable_variables():
# -- Mean threshold
mean_pos, var_pos = tf.nn.moments(dist_pos, axes=[0])
mean_neg, var_neg = tf.nn.moments(dist_neg, axes=[0])
std_pos = tf.sqrt(var_pos)
std_neg = tf.sqrt(var_neg)
threshold_batch = std_neg*mean_pos / (std_pos+std_neg) + std_pos*mean_neg / (std_pos+std_neg)
threshold_pos_batch = threshold_neg_batch = threshold_batch
# -- Logits
# threshold_pos_batch = tf.reduce_logsumexp(_logits_neg)
# threshold_neg_batch = -tf.reduce_logsumexp(-_logits_pos)
# -- Quantile
# diff_pos_sorted, _ = tf.nn.top_k(logits_pos, 2)
# diff_neg_sorted, _ = tf.nn.top_k(logits_neg, 2704237)
# threshold_pos_batch = diff_neg_sorted[-1]
# threshold_neg_batch = diff_pos_sorted[-1]
threshold_neg_batch = tf.reduce_min(_logits_pos)
threshold_pos_batch = tf.reduce_max(_logits_neg)
# -- Update
diff_threshold_pos = threshold_pos - threshold_pos_batch
diff_threshold_neg = threshold_neg - threshold_neg_batch
diff_threshold_pos = 0.1 * diff_threshold_pos
diff_threshold_neg = 0.1 * diff_threshold_neg
threshold_pos_update_op = tf.assign_sub(threshold_pos, diff_threshold_pos)
threshold_neg_update_op = tf.assign_sub(threshold_neg, diff_threshold_neg)
threshold_update_op = tf.group(threshold_pos_update_op, threshold_neg_update_op)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, threshold_update_op)
# Update centers
if not weights in tf.trainable_variables():
weights_batch = tf.gather(weights, label)
diff_centers = weights_batch - prelogits
unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff_centers = diff_centers / tf.cast((1 + appear_times), tf.float32)
diff_centers = 0.5 * diff_centers
centers_update_op = tf.scatter_sub(weights, label, diff_centers)
# centers_decay_op = tf.assign_sub(weights, 2*weight_decay*weights)# weight decay
centers_update_op = tf.group(centers_update_op)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, centers_update_op)
# if not sigma in tf.trainable_variables():
# weights_batch = tf.gather(weights, label)
# diff_centers = weights_batch - prelogits
# _, var_pos = tf.nn.moments(diff_centers, axes=[0])
# sigma_batch = tf.reduce_mean(tf.sqrt(var_pos))
# diff_sigma = sigma - sigma_batch
# diff_sigma = 0.01 * diff_sigma
# sigma_update_op = tf.assign_sub(sigma, diff_sigma)
# tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, sigma_update_op)
# Analysis
mean_dist_pos = tf.reduce_mean(dist_pos, name='mean_dist_pos')
mean_dist_neg = tf.reduce_mean(dist_neg, name='mean_dist_neg')
acc_pos = tf.reduce_mean(tf.cast(tf.greater_equal(logits_pos, t_pos), tf.float32), name='acc_pos')
acc_neg = tf.reduce_mean(tf.cast(tf.less(logits_neg, t_neg), tf.float32), name='acc_neg')
tf.summary.scalar('threshold_pos', threshold_pos)
tf.summary.scalar('mean_dist_pos', mean_dist_pos)
tf.summary.scalar('mean_dist_neg', mean_dist_neg)
tf.summary.scalar('acc_pos', acc_pos)
tf.summary.scalar('acc_neg', acc_neg)
tf.summary.scalar('gamma', gamma)
tf.summary.scalar('alpha', alpha)
tf.summary.scalar('beta', beta)
tf.summary.histogram('dist_pos', dist_pos)
tf.summary.histogram('dist_neg', dist_neg)
# tf.summary.histogram('dist_neg_min', _logits_neg / coef)
# tf.summary.histogram('sigma', sigma)
# tf.add_to_collection('watch_list', ('alpha', alpha))
tf.add_to_collection('watch_list', ('gamma', gamma))
tf.add_to_collection('watch_list', ('alpha', alpha))
tf.add_to_collection('watch_list', ('beta', beta))
# tf.add_to_collection('watch_list', ('t_pos', t_pos))
# tf.add_to_collection('watch_list', ('t_neg', tf.reduce_mean(t_neg)))
# tf.add_to_collection('watch_list', ('dpos', mean_dist_pos))
# tf.add_to_collection('watch_list', ('dneg', mean_dist_neg))
# tf.add_to_collection('watch_list', ('loss_pos', loss_pos))
# tf.add_to_collection('watch_list', ('loss_neg', loss_neg))
# tf.add_to_collection('watch_list', ('sigma', sigma))
# tf.add_to_collection('watch_list', ('logits_pos', tf.reduce_mean(_logits_pos)))
# tf.add_to_collection('watch_list', ('logits_neg', tf.reduce_mean(_logits_neg)))
# tf.add_to_collection('watch_list', ('acc_pos', acc_pos))
# tf.add_to_collection('watch_list', ('acc_neg', acc_neg))
return losses
def centers_by_label(features, label):
# Compute centers within batch
unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
num_centers = tf.size(unique_label)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
weighted_prelogits = features / tf.cast(appear_times, tf.float32)
centers = tf.unsorted_segment_sum(weighted_prelogits, unique_idx, num_centers)
return centers, unique_label, unique_idx, unique_count
def cluster_loss(prelogits, label, num_classes,
weight_decay, gamma=16.0, reuse=None):
embedding_size = prelogits.shape[1].value
batch_size = tf.shape(prelogits)[0]
with tf.variable_scope('ClusterLoss', reuse=reuse):
alpha = tf.get_variable('alpha', shape=(),
# regularizer=slim.l2_regularizer(weight_decay),
initializer=tf.constant_initializer(1.0),
trainable=True,
dtype=tf.float32)
gamma = gamma
prelogits = tf.nn.l2_normalize(prelogits, dim=1)
centers, label_center, center_idx, center_weight = centers_by_label(prelogits, label)
centers = tf.nn.l2_normalize(centers, dim=1)
num_centers = tf.size(label_center)
# Compute distance between centers
dist_centers_mat = euclidean_distance(centers, tf.transpose(centers))
mask_non_diag = tf.logical_not(tf.cast(tf.eye(num_centers), tf.bool))
mask_triu = tf.cast(tf.matrix_band_part(tf.ones((num_centers, num_centers)), 0, -1), tf.bool)
mask_triu = tf.logical_and(mask_non_diag, mask_triu)
dist_centers_vec = tf.boolean_mask(dist_centers_mat, mask_triu)
# Compute distance between instance and ceners
centers_batch = tf.gather(centers, center_idx)
dist_instance = euclidean_distance(prelogits, tf.transpose(centers))
label_dense = tf.one_hot(center_idx, num_centers, dtype=tf.float32)
label_pos = tf.cast(label_dense, tf.bool)
label_neg = tf.logical_not(label_pos)
dist_instance_pos = tf.boolean_mask(dist_instance, label_pos)
dist_instance_neg = tf.boolean_mask(dist_instance, label_neg)
# Losses
alpha = 1.0
gamma = 20.0
dist_instance_pos = tf.reshape(dist_instance_pos, [batch_size, -1])
dist_instance_neg = tf.reshape(dist_instance_neg, [batch_size, -1])
logits_pos = - 0.5 * 2 * dist_instance_pos * gamma
logits_neg = - 0.5 * dist_centers_vec * gamma
# logits_pos = tf.reduce_mean(logits_pos)
logits_neg = tf.reduce_logsumexp(logits_neg)#, axis=1)[:,None]
# min_dist_centers = -tf.reduce_logsumexp(-dist_centers_vec)
# loss_instance = tf.identity(alpha*dist_instance_pos - min_dist_centers)
loss_instance = tf.reduce_mean(tf.nn.softplus(logits_neg - logits_pos))
losses = [loss_instance]
# Analysis
tf.summary.histogram('prelogits', prelogits)
# tf.summary.scalar('min_dist_centers', min_dist_centers)
# tf.summary.histogram('min_dist_centers', min_dist_centers)
tf.summary.histogram('dist_centers_vec', dist_centers_vec)
tf.summary.histogram('dist_instances_pos', dist_instance_pos)
# tf.add_to_collection('watch_list', ('dcenters', min_dist_centers))
tf.add_to_collection('watch_list', ('loss', loss_instance))
# tf.add_to_collection('watch_list', ('alpha', alpha))
return losses
def binary_loss(prelogits, label, num_classes,
weight_decay, gamma=16.0, reuse=None):
nrof_features = prelogits.shape[1].value
batch_size = tf.shape(prelogits)[0]
with tf.variable_scope('BinaryLoss', reuse=reuse):
weights = tf.get_variable('weights', shape=(num_classes, nrof_features),
# regularizer=slim.l2_regularizer(weight_decay),
initializer=tf.truncated_normal_initializer(stddev=1.0),
# initializer=tf.constant_initializer(1.0),
trainable=True,
dtype=tf.float32)
weights_normed = tf.nn.sigmoid(weights)
prelogits_normed = prelogits
weights_batch = tf.gather(weights_normed, label)
closs = tf.nn.sigmoid_cross_entropy_with_logits(logits=prelogits_normed, labels=weights_batch)
closs = tf.reduce_sum(closs, axis=1)
closs = tf.reduce_mean(closs, name='cross_entropy')
p_pos = tf.reduce_mean(weights_normed, axis=0)
p_neg = tf.reduce_mean(1-weights_normed, axis=0)
eloss = (p_pos * tf.log(p_pos) + p_neg * tf.log(p_neg))
eloss = tf.reduce_sum(eloss, name='entropy')
losses = [closs, eloss]
tf.add_to_collection('watch_list', ('closs', closs))
tf.add_to_collection('watch_list', ('eloss', eloss))
return losses
| 28,398 | 0 | 253 |
578f06ae1ebd5a3d9628ef466c4ca8f12de717ab | 299 | py | Python | corma.py | BjornArnelid/corma | f5999a5962375a6067e8eea86f6ecaf2b6fca578 | [
"MIT"
] | null | null | null | corma.py | BjornArnelid/corma | f5999a5962375a6067e8eea86f6ecaf2b6fca578 | [
"MIT"
] | null | null | null | corma.py | BjornArnelid/corma | f5999a5962375a6067e8eea86f6ecaf2b6fca578 | [
"MIT"
] | null | null | null | """Corma, one time report to rule them all!"""
import argparse
# corma report test-dev -t 7h -d 2020-10-12
# corma report vacation
# corma submit bombardier -t 2020-09
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("register")
parser.parse_args()
| 19.933333 | 46 | 0.702341 | """Corma, one time report to rule them all!"""
import argparse
# corma report test-dev -t 7h -d 2020-10-12
# corma report vacation
# corma submit bombardier -t 2020-09
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("register")
parser.parse_args()
| 0 | 0 | 0 |
e155275fca96cdfbd96ddb9a082aefb4fb4e0e36 | 3,144 | py | Python | bin/final_summary.py | salzmanlab/SpliZ | 146cd9b64b9fe718975bb6c59813e4732aac7e03 | [
"MIT"
] | 3 | 2021-09-15T06:27:06.000Z | 2022-01-13T02:51:16.000Z | bin/final_summary.py | salzmanlab/SpliZ | 146cd9b64b9fe718975bb6c59813e4732aac7e03 | [
"MIT"
] | 9 | 2021-09-12T04:25:08.000Z | 2022-03-31T18:31:45.000Z | bin/final_summary.py | salzmanlab/SpliZ | 146cd9b64b9fe718975bb6c59813e4732aac7e03 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
import logging
main() | 38.814815 | 176 | 0.674936 | #!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
import logging
def get_args():
parser = argparse.ArgumentParser(description="Create final summary file")
parser.add_argument("--perm_pvals", help="Permutation pvalue file")
parser.add_argument("--first_evec", help="First eigenvector file")
parser.add_argument("--second_evec", help="Second eigenvector file")
parser.add_argument("--third_evec", help="Third eigenvector file")
parser.add_argument("--splizvd", help="SpliZVD file")
parser.add_argument("--grouping_level_2", help="column to group the data by (e.g. ontology, compartment, tissue)", default="ontology")
parser.add_argument("--grouping_level_1", help="subset data by this column before checking for differences (e.g. tissue, compartment)", default="dummy")
parser.add_argument("--outname", help="Name of output file")
parser.add_argument("--outname_log", help="Name of log file")
args = parser.parse_args()
return args
def main():
args = get_args()
logging.basicConfig(
filename = args.outname_log,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("Starting")
# load in data
pval_df = pd.read_csv(args.perm_pvals, sep = "\t")
splizsite_dfs = []
evec_files = [args.first_evec, args.second_evec, args.third_evec]
for evec_file in evec_files:
splizsite_dfs.append(pd.read_csv(evec_file, sep="\t"))
splizsite_df = pd.concat(splizsite_dfs,axis=0).drop_duplicates()
df = pd.read_csv(args.splizvd, sep="\t")
if (args.grouping_level_1 == "tiss_comp") & (args.grouping_level_1 not in df.columns):
df["tiss_comp"] = df[args.grouping_level_1] + df[args.grouping_level_2]
elif args.grouping_level_1 == "dummy":
df["dummy"] = "dummy"
# combine outputs
out_dict = {"gene" : [],"grouping_level_1" : [], "grouping_level_2" : [], "SpliZsites" : []}
z_cols = ["scZ","svd_z0","svd_z1","svd_z2"]
for z_col in z_cols:
out_dict["{}_median".format(z_col)] = []
out_dict["{}_pval".format(z_col)] = []
for gene, gene_df in df.groupby("gene"):
for tiss, tiss_df in gene_df.groupby(args.grouping_level_1):
for ont, ont_df in tiss_df.groupby(args.grouping_level_2):
out_dict["gene"].append(gene)
out_dict["grouping_level_1"].append(tiss)
out_dict["grouping_level_2"].append(ont)
out_dict["SpliZsites"].append(",".join([str(x) for x in splizsite_df[splizsite_df["gene"] == gene]["end"]]))
for z_col in z_cols:
out_dict["{}_median".format(z_col)].append(ont_df[z_col].median())
try:
pval = pval_df[(pval_df["gene"] == gene) & ((pval_df["grouping_level_1"] == tiss) | (pval_df["grouping_level_1"].isna()))]["perm_pval_adj_{}".format(z_col)].iloc[0]
except:
pval = np.nan
out_dict["{}_pval".format(z_col)].append(pval)
out_df = pd.DataFrame.from_dict(out_dict)
out_df = out_df.sort_values(["gene","grouping_level_1","scZ_median"])
out_df.to_csv(args.outname, sep="\t", index=False)
logging.info("Completed")
main() | 2,997 | 0 | 46 |
0a6ffa4d5294fb5befb195d7d3fb8f4ad2645eb3 | 16,268 | py | Python | ironic/api/controllers/v1/volume_connector.py | inmotionhosting/ironic | 1c7b5f82592e23ab66dddca56e0b059d3cb0710b | [
"Apache-2.0"
] | 1 | 2021-02-27T02:48:59.000Z | 2021-02-27T02:48:59.000Z | ironic/api/controllers/v1/volume_connector.py | inmotionhosting/ironic | 1c7b5f82592e23ab66dddca56e0b059d3cb0710b | [
"Apache-2.0"
] | null | null | null | ironic/api/controllers/v1/volume_connector.py | inmotionhosting/ironic | 1c7b5f82592e23ab66dddca56e0b059d3cb0710b | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
from ironic import api
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import notification_utils as notify
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api import method
from ironic.common import args
from ironic.common import exception
from ironic.common.i18n import _
from ironic import objects
METRICS = metrics_utils.get_metrics_logger(__name__)
_DEFAULT_RETURN_FIELDS = ['uuid', 'node_uuid', 'type', 'connector_id']
CONNECTOR_SCHEMA = {
'type': 'object',
'properties': {
'connector_id': {'type': 'string'},
'extra': {'type': ['object', 'null']},
'node_uuid': {'type': 'string'},
'type': {'type': 'string'},
'uuid': {'type': ['string', 'null']},
},
'required': ['connector_id', 'node_uuid', 'type'],
'additionalProperties': False,
}
CONNECTOR_VALIDATOR_EXTRA = args.dict_valid(
node_uuid=args.uuid,
uuid=args.uuid,
)
CONNECTOR_VALIDATOR = args.and_valid(
args.schema(CONNECTOR_SCHEMA),
CONNECTOR_VALIDATOR_EXTRA
)
PATCH_ALLOWED_FIELDS = [
'connector_id',
'extra',
'node_uuid',
'type'
]
class VolumeConnectorsController(rest.RestController):
"""REST controller for VolumeConnectors."""
invalid_sort_key_list = ['extra']
@METRICS.timer('VolumeConnectorsController.get_all')
@method.expose()
@args.validate(node=args.uuid_or_name, marker=args.uuid,
limit=args.integer, sort_key=args.string,
sort_dir=args.string, fields=args.string_list,
detail=args.boolean)
def get_all(self, node=None, marker=None, limit=None, sort_key='id',
sort_dir='asc', fields=None, detail=None):
"""Retrieve a list of volume connectors.
:param node: UUID or name of a node, to get only volume connectors
for that node.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: "asc".
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param detail: Optional, whether to retrieve with detail.
:returns: a list of volume connectors, or an empty list if no volume
connector is found.
:raises: InvalidParameterValue if sort_key does not exist
:raises: InvalidParameterValue if sort key is invalid for sorting.
:raises: InvalidParameterValue if both fields and detail are specified.
"""
api_utils.check_policy('baremetal:volume:get')
if fields is None and not detail:
fields = _DEFAULT_RETURN_FIELDS
if fields and detail:
raise exception.InvalidParameterValue(
_("Can't fetch a subset of fields with 'detail' set"))
resource_url = 'volume/connectors'
return self._get_volume_connectors_collection(
node, marker, limit, sort_key, sort_dir, resource_url=resource_url,
fields=fields, detail=detail)
@METRICS.timer('VolumeConnectorsController.get_one')
@method.expose()
@args.validate(connector_uuid=args.uuid, fields=args.string_list)
def get_one(self, connector_uuid, fields=None):
"""Retrieve information about the given volume connector.
:param connector_uuid: UUID of a volume connector.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:returns: API-serializable volume connector object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeConnectorNotFound if no volume connector exists with
the specified UUID.
"""
api_utils.check_policy('baremetal:volume:get')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
rpc_connector = objects.VolumeConnector.get_by_uuid(
api.request.context, connector_uuid)
return convert_with_links(rpc_connector, fields=fields)
@METRICS.timer('VolumeConnectorsController.post')
@method.expose(status_code=http_client.CREATED)
@method.body('connector')
@args.validate(connector=CONNECTOR_VALIDATOR)
def post(self, connector):
"""Create a new volume connector.
:param connector: a volume connector within the request body.
:returns: API-serializable volume connector object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeConnectorTypeAndIdAlreadyExists if a volume
connector already exists with the same type and connector_id
:raises: VolumeConnectorAlreadyExists if a volume connector with the
same UUID already exists
"""
context = api.request.context
api_utils.check_policy('baremetal:volume:create')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
# NOTE(hshiina): UUID is mandatory for notification payload
if not connector.get('uuid'):
connector['uuid'] = uuidutils.generate_uuid()
node = api_utils.replace_node_uuid_with_id(connector)
new_connector = objects.VolumeConnector(context, **connector)
notify.emit_start_notification(context, new_connector, 'create',
node_uuid=node.uuid)
with notify.handle_error_notification(context, new_connector,
'create',
node_uuid=node.uuid):
new_connector.create()
notify.emit_end_notification(context, new_connector, 'create',
node_uuid=node.uuid)
# Set the HTTP Location Header
api.response.location = link.build_url('volume/connectors',
new_connector.uuid)
return convert_with_links(new_connector)
@METRICS.timer('VolumeConnectorsController.patch')
@method.expose()
@method.body('patch')
@args.validate(connector_uuid=args.uuid, patch=args.patch)
def patch(self, connector_uuid, patch):
"""Update an existing volume connector.
:param connector_uuid: UUID of a volume connector.
:param patch: a json PATCH document to apply to this volume connector.
:returns: API-serializable volume connector object.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: PatchError if a given patch can not be applied.
:raises: VolumeConnectorNotFound if no volume connector exists with
the specified UUID.
:raises: InvalidParameterValue if the volume connector's UUID is being
changed
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorTypeAndIdAlreadyExists if another connector
already exists with the same values for type and connector_id
fields
:raises: InvalidUUID if invalid node UUID is passed in the patch.
:raises: InvalidStateRequested If a node associated with the
volume connector is not powered off.
"""
context = api.request.context
api_utils.check_policy('baremetal:volume:update')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS)
for value in api_utils.get_patch_values(patch, '/node_uuid'):
if not uuidutils.is_uuid_like(value):
message = _("Expected a UUID for node_uuid, but received "
"%(uuid)s.") % {'uuid': str(value)}
raise exception.InvalidUUID(message=message)
rpc_connector = objects.VolumeConnector.get_by_uuid(context,
connector_uuid)
connector_dict = rpc_connector.as_dict()
# NOTE(smoriya):
# 1) Remove node_id because it's an internal value and
# not present in the API object
# 2) Add node_uuid
rpc_node = api_utils.replace_node_id_with_uuid(connector_dict)
connector_dict = api_utils.apply_jsonpatch(connector_dict, patch)
try:
if connector_dict['node_uuid'] != rpc_node.uuid:
rpc_node = objects.Node.get(
api.request.context, connector_dict['node_uuid'])
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a PATCH request to change a Port
e.code = http_client.BAD_REQUEST # BadRequest
raise
api_utils.patched_validate_with_schema(
connector_dict, CONNECTOR_SCHEMA, CONNECTOR_VALIDATOR)
api_utils.patch_update_changed_fields(
connector_dict, rpc_connector,
fields=objects.VolumeConnector.fields,
schema=CONNECTOR_SCHEMA, id_map={'node_id': rpc_node.id}
)
notify.emit_start_notification(context, rpc_connector, 'update',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_connector, 'update',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
new_connector = api.request.rpcapi.update_volume_connector(
context, rpc_connector, topic)
api_connector = convert_with_links(new_connector)
notify.emit_end_notification(context, new_connector, 'update',
node_uuid=rpc_node.uuid)
return api_connector
@METRICS.timer('VolumeConnectorsController.delete')
@method.expose(status_code=http_client.NO_CONTENT)
@args.validate(connector_uuid=args.uuid)
def delete(self, connector_uuid):
"""Delete a volume connector.
:param connector_uuid: UUID of a volume connector.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorNotFound if the volume connector cannot be
found
:raises: InvalidStateRequested If a node associated with the
volume connector is not powered off.
"""
context = api.request.context
api_utils.check_policy('baremetal:volume:delete')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
rpc_connector = objects.VolumeConnector.get_by_uuid(context,
connector_uuid)
rpc_node = objects.Node.get_by_id(context, rpc_connector.node_id)
notify.emit_start_notification(context, rpc_connector, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_connector,
'delete',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
api.request.rpcapi.destroy_volume_connector(context,
rpc_connector, topic)
notify.emit_end_notification(context, rpc_connector, 'delete',
node_uuid=rpc_node.uuid)
| 41.606138 | 79 | 0.622019 | # Copyright (c) 2017 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
from ironic import api
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import notification_utils as notify
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api import method
from ironic.common import args
from ironic.common import exception
from ironic.common.i18n import _
from ironic import objects
METRICS = metrics_utils.get_metrics_logger(__name__)
_DEFAULT_RETURN_FIELDS = ['uuid', 'node_uuid', 'type', 'connector_id']
CONNECTOR_SCHEMA = {
'type': 'object',
'properties': {
'connector_id': {'type': 'string'},
'extra': {'type': ['object', 'null']},
'node_uuid': {'type': 'string'},
'type': {'type': 'string'},
'uuid': {'type': ['string', 'null']},
},
'required': ['connector_id', 'node_uuid', 'type'],
'additionalProperties': False,
}
CONNECTOR_VALIDATOR_EXTRA = args.dict_valid(
node_uuid=args.uuid,
uuid=args.uuid,
)
CONNECTOR_VALIDATOR = args.and_valid(
args.schema(CONNECTOR_SCHEMA),
CONNECTOR_VALIDATOR_EXTRA
)
PATCH_ALLOWED_FIELDS = [
'connector_id',
'extra',
'node_uuid',
'type'
]
def convert_with_links(rpc_connector, fields=None, sanitize=True):
connector = api_utils.object_to_dict(
rpc_connector,
link_resource='volume/connectors',
fields=('connector_id', 'extra', 'type')
)
api_utils.populate_node_uuid(rpc_connector, connector)
if fields is not None:
api_utils.check_for_invalid_fields(fields, connector)
if not sanitize:
return connector
api_utils.sanitize_dict(connector, fields)
return connector
def list_convert_with_links(rpc_connectors, limit, url=None, fields=None,
detail=None, **kwargs):
if detail:
kwargs['detail'] = detail
return collection.list_convert_with_links(
items=[convert_with_links(p, fields=fields, sanitize=False)
for p in rpc_connectors],
item_name='connectors',
limit=limit,
url=url,
fields=fields,
sanitize_func=api_utils.sanitize_dict,
**kwargs
)
class VolumeConnectorsController(rest.RestController):
"""REST controller for VolumeConnectors."""
invalid_sort_key_list = ['extra']
def __init__(self, node_ident=None):
super(VolumeConnectorsController, self).__init__()
self.parent_node_ident = node_ident
def _get_volume_connectors_collection(self, node_ident, marker, limit,
sort_key, sort_dir,
resource_url=None,
fields=None, detail=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.VolumeConnector.get_by_uuid(
api.request.context, marker)
if sort_key in self.invalid_sort_key_list:
raise exception.InvalidParameterValue(
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
node_ident = self.parent_node_ident or node_ident
if node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
# for that column. This will get cleaned up
# as we move to the object interface.
node = api_utils.get_rpc_node(node_ident)
connectors = objects.VolumeConnector.list_by_node_id(
api.request.context, node.id, limit, marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
else:
connectors = objects.VolumeConnector.list(api.request.context,
limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return list_convert_with_links(connectors, limit,
url=resource_url,
fields=fields,
sort_key=sort_key,
sort_dir=sort_dir,
detail=detail)
@METRICS.timer('VolumeConnectorsController.get_all')
@method.expose()
@args.validate(node=args.uuid_or_name, marker=args.uuid,
limit=args.integer, sort_key=args.string,
sort_dir=args.string, fields=args.string_list,
detail=args.boolean)
def get_all(self, node=None, marker=None, limit=None, sort_key='id',
sort_dir='asc', fields=None, detail=None):
"""Retrieve a list of volume connectors.
:param node: UUID or name of a node, to get only volume connectors
for that node.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: "asc".
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param detail: Optional, whether to retrieve with detail.
:returns: a list of volume connectors, or an empty list if no volume
connector is found.
:raises: InvalidParameterValue if sort_key does not exist
:raises: InvalidParameterValue if sort key is invalid for sorting.
:raises: InvalidParameterValue if both fields and detail are specified.
"""
api_utils.check_policy('baremetal:volume:get')
if fields is None and not detail:
fields = _DEFAULT_RETURN_FIELDS
if fields and detail:
raise exception.InvalidParameterValue(
_("Can't fetch a subset of fields with 'detail' set"))
resource_url = 'volume/connectors'
return self._get_volume_connectors_collection(
node, marker, limit, sort_key, sort_dir, resource_url=resource_url,
fields=fields, detail=detail)
@METRICS.timer('VolumeConnectorsController.get_one')
@method.expose()
@args.validate(connector_uuid=args.uuid, fields=args.string_list)
def get_one(self, connector_uuid, fields=None):
"""Retrieve information about the given volume connector.
:param connector_uuid: UUID of a volume connector.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:returns: API-serializable volume connector object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeConnectorNotFound if no volume connector exists with
the specified UUID.
"""
api_utils.check_policy('baremetal:volume:get')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
rpc_connector = objects.VolumeConnector.get_by_uuid(
api.request.context, connector_uuid)
return convert_with_links(rpc_connector, fields=fields)
@METRICS.timer('VolumeConnectorsController.post')
@method.expose(status_code=http_client.CREATED)
@method.body('connector')
@args.validate(connector=CONNECTOR_VALIDATOR)
def post(self, connector):
"""Create a new volume connector.
:param connector: a volume connector within the request body.
:returns: API-serializable volume connector object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeConnectorTypeAndIdAlreadyExists if a volume
connector already exists with the same type and connector_id
:raises: VolumeConnectorAlreadyExists if a volume connector with the
same UUID already exists
"""
context = api.request.context
api_utils.check_policy('baremetal:volume:create')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
# NOTE(hshiina): UUID is mandatory for notification payload
if not connector.get('uuid'):
connector['uuid'] = uuidutils.generate_uuid()
node = api_utils.replace_node_uuid_with_id(connector)
new_connector = objects.VolumeConnector(context, **connector)
notify.emit_start_notification(context, new_connector, 'create',
node_uuid=node.uuid)
with notify.handle_error_notification(context, new_connector,
'create',
node_uuid=node.uuid):
new_connector.create()
notify.emit_end_notification(context, new_connector, 'create',
node_uuid=node.uuid)
# Set the HTTP Location Header
api.response.location = link.build_url('volume/connectors',
new_connector.uuid)
return convert_with_links(new_connector)
@METRICS.timer('VolumeConnectorsController.patch')
@method.expose()
@method.body('patch')
@args.validate(connector_uuid=args.uuid, patch=args.patch)
def patch(self, connector_uuid, patch):
"""Update an existing volume connector.
:param connector_uuid: UUID of a volume connector.
:param patch: a json PATCH document to apply to this volume connector.
:returns: API-serializable volume connector object.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: PatchError if a given patch can not be applied.
:raises: VolumeConnectorNotFound if no volume connector exists with
the specified UUID.
:raises: InvalidParameterValue if the volume connector's UUID is being
changed
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorTypeAndIdAlreadyExists if another connector
already exists with the same values for type and connector_id
fields
:raises: InvalidUUID if invalid node UUID is passed in the patch.
:raises: InvalidStateRequested If a node associated with the
volume connector is not powered off.
"""
context = api.request.context
api_utils.check_policy('baremetal:volume:update')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS)
for value in api_utils.get_patch_values(patch, '/node_uuid'):
if not uuidutils.is_uuid_like(value):
message = _("Expected a UUID for node_uuid, but received "
"%(uuid)s.") % {'uuid': str(value)}
raise exception.InvalidUUID(message=message)
rpc_connector = objects.VolumeConnector.get_by_uuid(context,
connector_uuid)
connector_dict = rpc_connector.as_dict()
# NOTE(smoriya):
# 1) Remove node_id because it's an internal value and
# not present in the API object
# 2) Add node_uuid
rpc_node = api_utils.replace_node_id_with_uuid(connector_dict)
connector_dict = api_utils.apply_jsonpatch(connector_dict, patch)
try:
if connector_dict['node_uuid'] != rpc_node.uuid:
rpc_node = objects.Node.get(
api.request.context, connector_dict['node_uuid'])
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a PATCH request to change a Port
e.code = http_client.BAD_REQUEST # BadRequest
raise
api_utils.patched_validate_with_schema(
connector_dict, CONNECTOR_SCHEMA, CONNECTOR_VALIDATOR)
api_utils.patch_update_changed_fields(
connector_dict, rpc_connector,
fields=objects.VolumeConnector.fields,
schema=CONNECTOR_SCHEMA, id_map={'node_id': rpc_node.id}
)
notify.emit_start_notification(context, rpc_connector, 'update',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_connector, 'update',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
new_connector = api.request.rpcapi.update_volume_connector(
context, rpc_connector, topic)
api_connector = convert_with_links(new_connector)
notify.emit_end_notification(context, new_connector, 'update',
node_uuid=rpc_node.uuid)
return api_connector
@METRICS.timer('VolumeConnectorsController.delete')
@method.expose(status_code=http_client.NO_CONTENT)
@args.validate(connector_uuid=args.uuid)
def delete(self, connector_uuid):
"""Delete a volume connector.
:param connector_uuid: UUID of a volume connector.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorNotFound if the volume connector cannot be
found
:raises: InvalidStateRequested If a node associated with the
volume connector is not powered off.
"""
context = api.request.context
api_utils.check_policy('baremetal:volume:delete')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
rpc_connector = objects.VolumeConnector.get_by_uuid(context,
connector_uuid)
rpc_node = objects.Node.get_by_id(context, rpc_connector.node_id)
notify.emit_start_notification(context, rpc_connector, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_connector,
'delete',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
api.request.rpcapi.destroy_volume_connector(context,
rpc_connector, topic)
notify.emit_end_notification(context, rpc_connector, 'delete',
node_uuid=rpc_node.uuid)
| 3,092 | 0 | 100 |
07aa52bdc7baa96863bd6296e6bcbb7c0ca19f90 | 1,448 | py | Python | cvefinder/cve_scraper.py | vipin08/cvefinder | 3f99c087c5705bce8e6b12053638cc339454d3dd | [
"MIT"
] | null | null | null | cvefinder/cve_scraper.py | vipin08/cvefinder | 3f99c087c5705bce8e6b12053638cc339454d3dd | [
"MIT"
] | null | null | null | cvefinder/cve_scraper.py | vipin08/cvefinder | 3f99c087c5705bce8e6b12053638cc339454d3dd | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
SEARCH_URL = "https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword="
CVE_URL = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" | 28.392157 | 64 | 0.588398 | import requests
from bs4 import BeautifulSoup
SEARCH_URL = "https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword="
CVE_URL = "https://cve.mitre.org/cgi-bin/cvename.cgi?name="
def get_html(url):
request = requests.get(url)
if request.status_code == 200:
return request.content
else:
raise Exception("Bad request")
def search(s):
url = f"{SEARCH_URL}{s}"
results=[]
html = get_html(url)
soup = BeautifulSoup(html, "lxml")
result_rows = soup.select("#TableWithRules table tr")
for row in result_rows:
_row = {}
name = row.select_one("td a")
description = row.select_one("td:nth-child(2)")
if all([name, description]):
_row["name"] = name.text
_row["url"] = name.get("href")
_row["description"] = description.text
results.append(_row)
return results
def lookup_cve(name):
url = f"{CVE_URL}{name}"
html = get_html(url)
soup = BeautifulSoup(html, "lxml")
result_rows = soup.select("#GeneratedTable table tr")
subtitle = ""
description = ""
raw_results = {}
for row in result_rows:
head = row.select_one("th")
if head:
subtitle = head.text
else:
body = row.select_one("td")
description = body.text.strip().strip("\n")
raw_results[subtitle.lower()] = description
return raw_results | 1,209 | 0 | 69 |
1e109adda58aa84ce9bbf20102aa04ece1da32b6 | 10,940 | py | Python | test/unit/mongo_perf/run_program.py | mjpernot/mongo-perf | ba15f77cd8006aeb061ba83d6c605caedfd6015e | [
"MIT"
] | null | null | null | test/unit/mongo_perf/run_program.py | mjpernot/mongo-perf | ba15f77cd8006aeb061ba83d6c605caedfd6015e | [
"MIT"
] | null | null | null | test/unit/mongo_perf/run_program.py | mjpernot/mongo-perf | ba15f77cd8006aeb061ba83d6c605caedfd6015e | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: run_program.py
Description: Unit testing of run_program in mongo_perf.py.
Usage:
test/unit/mongo_perf/run_program.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mongo_perf
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
def mongo_stat(server, args_array, **kwargs):
"""Method: mongo_stat
Description: Function stub holder for mongo_perf.mongo_stat.
Arguments:
(input) server
(input) args_array
(input) **kwargs
class_cfg
"""
status = True
if server and args_array and kwargs.get("class_cfg", True):
status = True
return status
class Server(object):
"""Class: Server
Description: Class stub holder for mongo_class.Server class.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.status = True
self.err_msg = None
def connect(self):
"""Method: connect
Description: Stub method holder for mongo_class.Server.connect.
Arguments:
"""
return self.status, self.err_msg
class CfgTest(object):
"""Class: CfgTest
Description: Class which is a representation of a cfg module.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the CfgTest class.
Arguments:
"""
self.name = "Mongo"
self.user = "mongo"
self.japd = None
self.host = "hostname"
self.port = 27017
self.auth = True
self.auth_db = "admin"
self.use_arg = True
self.use_uri = False
self.repset = None
self.repset_hosts = None
class CfgTest2(object):
"""Class: CfgTest2
Description: Class which is a representation of a cfg module.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the CfgTest class.
Arguments:
"""
self.name = "Mongo"
self.user = "mongo"
self.japd = None
self.host = "hostname"
self.port = 27017
self.auth = True
self.auth_db = "admin"
self.use_arg = True
self.use_uri = False
self.repset = None
self.repset_hosts = None
self.auth_mech = "SCRAM-SHA-1"
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_rep_arg
test_no_rep_arg
test_conn_fail_suppress
test_connection_fail
test_connection_success
test_auth_mech
test_no_auth_mech
test_replica_set
test_mongo
test_run_program
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.cfg = CfgTest()
self.cfg2 = CfgTest2()
self.server = Server()
self.func_dict = {"-S": mongo_stat}
self.args_array = {"-m": True, "-d": True, "-c": True, "-S": True}
self.args_array2 = {"-m": True, "-d": True, "-c": True, "-S": True,
"-e": "ToEmail", "-s": "SubjectLine"}
self.args_array3 = {"-d": True, "-c": True, "-S": True}
self.args_array4 = {"-w": True, "-d": True, "-c": True, "-S": True}
self.repset_list = ["host1:27017", "host2:27017"]
self.req_arg_list = ["--authenticationDatabase="]
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_rep_arg(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_rep_arg
Description: Test with passing rep_arg argument.
Arguments:
"""
self.cfg2.repset = "replicasetname"
self.cfg2.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg2, self.cfg2]
mock_disconn.return_value = True
self.assertFalse(
mongo_perf.run_program(
self.args_array, self.func_dict, req_arg=self.req_arg_list))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_no_rep_arg(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_no_rep_arg
Description: Test with not passing rep_arg argument.
Arguments:
"""
self.cfg2.repset = "replicasetname"
self.cfg2.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg2, self.cfg2]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_conn_fail_suppress(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_conn_fail_suppress
Description: Test with failed connection with suppression.
Arguments:
"""
self.server.status = False
self.server.err_msg = "Error Connection Message"
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array4,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_connection_fail(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_connection_fail
Description: Test with failed connection.
Arguments:
"""
self.server.status = False
self.server.err_msg = "Error Connection Message"
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
with gen_libs.no_std_out():
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_connection_success(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_connection_success
Description: Test with successful connection.
Arguments:
"""
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_auth_mech(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_auth_mech
Description: Test with authorization mechanism setting.
Arguments:
"""
self.cfg2.repset = "replicasetname"
self.cfg2.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg2, self.cfg2]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_no_auth_mech(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_no_auth_mech
Description: Test with no authorization mechanism setting.
Arguments:
"""
self.cfg.repset = "replicasetname"
self.cfg.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_replica_set(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_replica_set
Description: Test connecting to Mongo replica set.
Arguments:
"""
self.cfg.repset = "replicasetname"
self.cfg.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_mongo(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_mongo
Description: Test with mongo option.
Arguments:
"""
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_run_program(self, mock_inst, mock_disconn, mock_cfg):
"""Function: test_run_program
Description: Test run_program function.
Arguments:
"""
mock_inst.return_value = self.server
mock_disconn.return_value = True
mock_cfg.return_value = self.cfg
self.assertFalse(mongo_perf.run_program(self.args_array3,
self.func_dict))
if __name__ == "__main__":
unittest.main()
| 25.149425 | 76 | 0.618556 | #!/usr/bin/python
# Classification (U)
"""Program: run_program.py
Description: Unit testing of run_program in mongo_perf.py.
Usage:
test/unit/mongo_perf/run_program.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mongo_perf
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
def mongo_stat(server, args_array, **kwargs):
"""Method: mongo_stat
Description: Function stub holder for mongo_perf.mongo_stat.
Arguments:
(input) server
(input) args_array
(input) **kwargs
class_cfg
"""
status = True
if server and args_array and kwargs.get("class_cfg", True):
status = True
return status
class Server(object):
"""Class: Server
Description: Class stub holder for mongo_class.Server class.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.status = True
self.err_msg = None
def connect(self):
"""Method: connect
Description: Stub method holder for mongo_class.Server.connect.
Arguments:
"""
return self.status, self.err_msg
class CfgTest(object):
"""Class: CfgTest
Description: Class which is a representation of a cfg module.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the CfgTest class.
Arguments:
"""
self.name = "Mongo"
self.user = "mongo"
self.japd = None
self.host = "hostname"
self.port = 27017
self.auth = True
self.auth_db = "admin"
self.use_arg = True
self.use_uri = False
self.repset = None
self.repset_hosts = None
class CfgTest2(object):
"""Class: CfgTest2
Description: Class which is a representation of a cfg module.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the CfgTest class.
Arguments:
"""
self.name = "Mongo"
self.user = "mongo"
self.japd = None
self.host = "hostname"
self.port = 27017
self.auth = True
self.auth_db = "admin"
self.use_arg = True
self.use_uri = False
self.repset = None
self.repset_hosts = None
self.auth_mech = "SCRAM-SHA-1"
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_rep_arg
test_no_rep_arg
test_conn_fail_suppress
test_connection_fail
test_connection_success
test_auth_mech
test_no_auth_mech
test_replica_set
test_mongo
test_run_program
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.cfg = CfgTest()
self.cfg2 = CfgTest2()
self.server = Server()
self.func_dict = {"-S": mongo_stat}
self.args_array = {"-m": True, "-d": True, "-c": True, "-S": True}
self.args_array2 = {"-m": True, "-d": True, "-c": True, "-S": True,
"-e": "ToEmail", "-s": "SubjectLine"}
self.args_array3 = {"-d": True, "-c": True, "-S": True}
self.args_array4 = {"-w": True, "-d": True, "-c": True, "-S": True}
self.repset_list = ["host1:27017", "host2:27017"]
self.req_arg_list = ["--authenticationDatabase="]
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_rep_arg(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_rep_arg
Description: Test with passing rep_arg argument.
Arguments:
"""
self.cfg2.repset = "replicasetname"
self.cfg2.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg2, self.cfg2]
mock_disconn.return_value = True
self.assertFalse(
mongo_perf.run_program(
self.args_array, self.func_dict, req_arg=self.req_arg_list))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_no_rep_arg(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_no_rep_arg
Description: Test with not passing rep_arg argument.
Arguments:
"""
self.cfg2.repset = "replicasetname"
self.cfg2.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg2, self.cfg2]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_conn_fail_suppress(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_conn_fail_suppress
Description: Test with failed connection with suppression.
Arguments:
"""
self.server.status = False
self.server.err_msg = "Error Connection Message"
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array4,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_connection_fail(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_connection_fail
Description: Test with failed connection.
Arguments:
"""
self.server.status = False
self.server.err_msg = "Error Connection Message"
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
with gen_libs.no_std_out():
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_connection_success(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_connection_success
Description: Test with successful connection.
Arguments:
"""
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_auth_mech(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_auth_mech
Description: Test with authorization mechanism setting.
Arguments:
"""
self.cfg2.repset = "replicasetname"
self.cfg2.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg2, self.cfg2]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_no_auth_mech(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_no_auth_mech
Description: Test with no authorization mechanism setting.
Arguments:
"""
self.cfg.repset = "replicasetname"
self.cfg.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_class.RepSet")
def test_replica_set(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_replica_set
Description: Test connecting to Mongo replica set.
Arguments:
"""
self.cfg.repset = "replicasetname"
self.cfg.repset_hosts = self.repset_list
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_mongo(self, mock_inst, mock_cfg, mock_disconn):
"""Function: test_mongo
Description: Test with mongo option.
Arguments:
"""
mock_inst.return_value = self.server
mock_cfg.side_effect = [self.cfg, True]
mock_disconn.return_value = True
self.assertFalse(mongo_perf.run_program(self.args_array,
self.func_dict))
@mock.patch("mongo_perf.gen_libs.load_module")
@mock.patch("mongo_perf.mongo_libs.disconnect")
@mock.patch("mongo_perf.mongo_libs.create_instance")
def test_run_program(self, mock_inst, mock_disconn, mock_cfg):
"""Function: test_run_program
Description: Test run_program function.
Arguments:
"""
mock_inst.return_value = self.server
mock_disconn.return_value = True
mock_cfg.return_value = self.cfg
self.assertFalse(mongo_perf.run_program(self.args_array3,
self.func_dict))
if __name__ == "__main__":
unittest.main()
| 0 | 0 | 0 |
90efcd209e25bd6d81925c9fcc593323cf9fc1fc | 13,242 | py | Python | chainer_/chainercv2/models/cbamresnet.py | yick2232/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 | [
"MIT"
] | 1 | 2019-11-28T10:02:58.000Z | 2019-11-28T10:02:58.000Z | chainer_/chainercv2/models/cbamresnet.py | fireoil/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 | [
"MIT"
] | null | null | null | chainer_/chainercv2/models/cbamresnet.py | fireoil/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 | [
"MIT"
] | 1 | 2019-11-20T18:47:37.000Z | 2019-11-20T18:47:37.000Z | """
CBAM-ResNet for ImageNet-1K, implemented in Chainer.
Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
"""
__all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import SimpleSequential, conv1x1_block, conv7x7_block
from .resnet import ResInitBlock, ResBlock, ResBottleneck
class MLP(Chain):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
class ChannelGate(Chain):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
class SpatialGate(Chain):
"""
CBAM spatial gate block.
"""
class CbamBlock(Chain):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
class CbamResUnit(Chain):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
class CbamResNet(Chain):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)
if __name__ == "__main__":
_test()
| 30.723898 | 115 | 0.572799 | """
CBAM-ResNet for ImageNet-1K, implemented in Chainer.
Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
"""
__all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import SimpleSequential, conv1x1_block, conv7x7_block
from .resnet import ResInitBlock, ResBlock, ResBottleneck
class MLP(Chain):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(MLP, self).__init__()
mid_channels = channels // reduction_ratio
with self.init_scope():
self.fc1 = L.Linear(
in_size=channels,
out_size=mid_channels)
self.activ = F.relu
self.fc2 = L.Linear(
in_size=mid_channels,
out_size=channels)
def __call__(self, x):
x = F.reshape(x, shape=(x.shape[0], -1))
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class ChannelGate(Chain):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(ChannelGate, self).__init__()
with self.init_scope():
self.mlp = MLP(
channels=channels,
reduction_ratio=reduction_ratio)
def __call__(self, x):
att1 = F.average_pooling_2d(x, ksize=x.shape[2:])
att1 = self.mlp(att1)
att2 = F.max_pooling_2d(x, ksize=x.shape[2:])
att2 = self.mlp(att2)
att = att1 + att2
att = F.sigmoid(att)
att = F.broadcast_to(F.expand_dims(F.expand_dims(att, axis=2), axis=3), x.shape)
x = x * att
return x
class SpatialGate(Chain):
"""
CBAM spatial gate block.
"""
def __init__(self):
super(SpatialGate, self).__init__()
with self.init_scope():
self.conv = conv7x7_block(
in_channels=2,
out_channels=1,
activation=None)
def __call__(self, x):
att1 = F.expand_dims(F.max(x, axis=1), axis=1)
att2 = F.expand_dims(F.mean(x, axis=1), axis=1)
att = F.concat((att1, att2), axis=1)
att = self.conv(att)
att = F.broadcast_to(F.sigmoid(att), x.shape)
x = x * att
return x
class CbamBlock(Chain):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(CbamBlock, self).__init__()
with self.init_scope():
self.ch_gate = ChannelGate(
channels=channels,
reduction_ratio=reduction_ratio)
self.sp_gate = SpatialGate()
def __call__(self, x):
x = self.ch_gate(x)
x = self.sp_gate(x)
return x
class CbamResUnit(Chain):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(CbamResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
with self.init_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=False)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.cbam = CbamBlock(channels=out_channels)
self.activ = F.relu
def __call__(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.cbam(x)
x = x + identity
x = self.activ(x)
return x
class CbamResNet(Chain):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(CbamResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), CbamResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, 'final_pool', partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, 'flatten', partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, 'fc', L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
# cbam_resnet18,
# cbam_resnet34,
cbam_resnet50,
# cbam_resnet101,
# cbam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cbam_resnet18 or weight_count == 11779392)
assert (model != cbam_resnet34 or weight_count == 21960468)
assert (model != cbam_resnet50 or weight_count == 28089624)
assert (model != cbam_resnet101 or weight_count == 49330172)
assert (model != cbam_resnet152 or weight_count == 66826848)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 6,138 | 0 | 341 |
9d92ca3dc01340f1d4db974a8ac33a96c4e6b6e9 | 35 | py | Python | jupyterlab_nvdashboard/apps/__init__.py | Northern-Data-AG/jupyterlab-ANDashboard | b266d40d9558279310e62b39cc36f8d470e0600c | [
"BSD-3-Clause"
] | null | null | null | jupyterlab_nvdashboard/apps/__init__.py | Northern-Data-AG/jupyterlab-ANDashboard | b266d40d9558279310e62b39cc36f8d470e0600c | [
"BSD-3-Clause"
] | 1 | 2021-05-20T12:04:10.000Z | 2021-05-20T12:04:10.000Z | jupyterlab_nvdashboard/apps/__init__.py | Northern-Data-AG/jupyterlab-ANDashboard | b266d40d9558279310e62b39cc36f8d470e0600c | [
"BSD-3-Clause"
] | null | null | null | from . import cpu
from . import gpu | 17.5 | 17 | 0.742857 | from . import cpu
from . import gpu | 0 | 0 | 0 |
38d14e1a9ab4d24a21bc7273774416bfd31271f1 | 6,109 | py | Python | tests/test_api/test_bookmark.py | yu-iskw/polyaxon-client | af72f30af218a8a027fea1ad966b543c900e0444 | [
"MIT"
] | null | null | null | tests/test_api/test_bookmark.py | yu-iskw/polyaxon-client | af72f30af218a8a027fea1ad966b543c900e0444 | [
"MIT"
] | null | null | null | tests/test_api/test_bookmark.py | yu-iskw/polyaxon-client | af72f30af218a8a027fea1ad966b543c900e0444 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import httpretty
import json
import uuid
from collections import Mapping
from faker import Faker
from tests.test_api.utils import TestBaseApi
from polyaxon_client.api.base import BaseApiHandler
from polyaxon_client.api.bookmark import BookmarkApi
from polyaxon_client.schemas import (
ExperimentConfig,
ExperimentGroupConfig,
JobConfig,
ProjectConfig
)
faker = Faker()
| 35.109195 | 95 | 0.562121 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import httpretty
import json
import uuid
from collections import Mapping
from faker import Faker
from tests.test_api.utils import TestBaseApi
from polyaxon_client.api.base import BaseApiHandler
from polyaxon_client.api.bookmark import BookmarkApi
from polyaxon_client.schemas import (
ExperimentConfig,
ExperimentGroupConfig,
JobConfig,
ProjectConfig
)
faker = Faker()
class TestBookmarkApi(TestBaseApi):
def setUp(self):
super(TestBookmarkApi, self).setUp()
self.api_handler = BookmarkApi(transport=self.transport, config=self.api_config)
@httpretty.activate
def test_get_bookmarked_builds(self):
project_uuid = uuid.uuid4().hex
obj_uuid = uuid.uuid4().hex
objs = [{'content_object': JobConfig(config={},
uuid=obj_uuid,
project=project_uuid).to_dict()}
for _ in range(10)]
httpretty.register_uri(
httpretty.GET,
BaseApiHandler.build_url(
self.api_config.base_url,
'/bookmarks',
'user',
'builds'),
body=json.dumps({'results': objs, 'count': 10, 'next': None}),
content_type='application/json',
status=200)
# Schema response
result = self.api_handler.builds('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], JobConfig)
# Raw response
self.set_raw_response()
result = self.api_handler.builds('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], Mapping)
@httpretty.activate
def test_get_bookmarked_jobs(self):
project_uuid = uuid.uuid4().hex
obj_uuid = uuid.uuid4().hex
objs = [{'content_object': JobConfig(config={},
uuid=obj_uuid,
project=project_uuid).to_dict()}
for _ in range(10)]
httpretty.register_uri(
httpretty.GET,
BaseApiHandler.build_url(
self.api_config.base_url,
'/bookmarks',
'user',
'jobs'),
body=json.dumps({'results': objs, 'count': 10, 'next': None}),
content_type='application/json',
status=200)
# Schema response
result = self.api_handler.jobs('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], JobConfig)
# Raw response
self.set_raw_response()
result = self.api_handler.jobs('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], Mapping)
@httpretty.activate
def test_get_bookmarked_experiments(self):
project_uuid = uuid.uuid4().hex
obj_uuid = uuid.uuid4().hex
objs = [{'content_object': ExperimentConfig(config={},
uuid=obj_uuid,
project=project_uuid).to_dict()}
for _ in range(10)]
httpretty.register_uri(
httpretty.GET,
BaseApiHandler.build_url(
self.api_config.base_url,
'/bookmarks',
'user',
'experiments'),
body=json.dumps({'results': objs, 'count': 10, 'next': None}),
content_type='application/json',
status=200)
# Schema response
result = self.api_handler.experiments('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], ExperimentConfig)
# Raw response
self.set_raw_response()
result = self.api_handler.experiments('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], Mapping)
@httpretty.activate
def test_get_bookmarked_groups(self):
project_uuid = uuid.uuid4().hex
experiment_groups = [
{'content_object': ExperimentGroupConfig(content=faker.word,
project=project_uuid).to_dict()}
for _ in range(10)]
httpretty.register_uri(
httpretty.GET,
BaseApiHandler.build_url(
self.api_config.base_url,
'/bookmarks',
'user',
'groups'),
body=json.dumps({'results': experiment_groups, 'count': 10, 'next': None}),
content_type='application/json',
status=200)
# Schema response
result = self.api_handler.groups('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], ExperimentGroupConfig)
# Raw response
self.set_raw_response()
result = self.api_handler.groups('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], Mapping)
@httpretty.activate
def test_get_bookmarked_projects(self):
projects = [{'content_object': ProjectConfig(faker.word).to_dict()} for _ in range(10)]
httpretty.register_uri(
httpretty.GET,
BaseApiHandler.build_url(
self.api_config.base_url,
'/bookmarks',
'user',
'projects'),
body=json.dumps({'results': projects, 'count': 10, 'next': None}),
content_type='application/json',
status=200)
# Schema response
result = self.api_handler.projects('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], ProjectConfig)
# Raw response
self.set_raw_response()
result = self.api_handler.projects('user')
assert len(result['results']) == 10
assert isinstance(result['results'][0], Mapping)
| 5,310 | 296 | 23 |
5d23d339d44fbef5e2462c16509f46896288913e | 203 | py | Python | test_cuda.py | uta1/graph_models | 5d5a7dcbbe5195ac4a8a2f516694bbfc4378c3b4 | [
"Unlicense"
] | null | null | null | test_cuda.py | uta1/graph_models | 5d5a7dcbbe5195ac4a8a2f516694bbfc4378c3b4 | [
"Unlicense"
] | null | null | null | test_cuda.py | uta1/graph_models | 5d5a7dcbbe5195ac4a8a2f516694bbfc4378c3b4 | [
"Unlicense"
] | null | null | null | import tensorflow as tf
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
print('physical devices:', str(tf.config.experimental.list_physical_devices()))
| 29 | 79 | 0.778325 | import tensorflow as tf
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
print('physical devices:', str(tf.config.experimental.list_physical_devices()))
| 0 | 0 | 0 |
bdb095531f2a9a262411bc36740dde801c331f93 | 3,149 | py | Python | plugins/modules/trigger_image_activation.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/modules/trigger_image_activation.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/modules/trigger_image_activation.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: trigger_image_activation
short_description: Manage TriggerImageActivation objects of SoftwareImageManagementSwim
description:
- Activates a software image on a given device. Software image must be present in the device flash.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options:
schedule_validate:
description:
- ScheduleValidate, validates data before schedule (Optional).
type: bool
payload:
description:
- An object to send in the Request body.
type: list
required: True
elements: dict
suboptions:
activateLowerImageVersion:
description:
- It is the trigger image activation's activateLowerImageVersion.
type: bool
deviceUpgradeMode:
description:
- It is the trigger image activation's deviceUpgradeMode.
type: str
deviceUuid:
description:
- It is the trigger image activation's deviceUuid.
type: str
distributeIfNeeded:
description:
- It is the trigger image activation's distributeIfNeeded.
type: bool
imageUuidList:
description:
- It is the trigger image activation's imageUuidList.
type: list
smuImageUuidList:
description:
- It is the trigger image activation's smuImageUuidList.
type: list
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.trigger_image_activation
# Reference by Internet resource
- name: TriggerImageActivation reference
description: Complete reference of the TriggerImageActivation object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: TriggerImageActivation reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: trigger_software_image_activation
cisco.dnac.trigger_image_activation:
state: create # required
payload: # required
- activateLowerImageVersion: True # boolean
deviceUpgradeMode: SomeValue # string
deviceUuid: SomeValue # string
distributeIfNeeded: True # boolean
imageUuidList:
- SomeValue # string
smuImageUuidList:
- SomeValue # string
schedule_validate: True # boolean
"""
RETURN = r"""
dnac_response:
description: A dictionary with the response returned by the DNA Center Python SDK
returned: always
type: dict
sample: {"response": 29, "version": "1.0"}
sdk_function:
description: The DNA Center SDK function used to execute the task
returned: always
type: str
sample: software_image_management_swim.trigger_software_image_activation
missing_params:
description: Provided arguments do not comply with the schema of the DNA Center Python SDK function
returned: when the function request schema is not satisfied
type: list
sample:
"""
| 30.872549 | 101 | 0.718958 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: trigger_image_activation
short_description: Manage TriggerImageActivation objects of SoftwareImageManagementSwim
description:
- Activates a software image on a given device. Software image must be present in the device flash.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options:
schedule_validate:
description:
- ScheduleValidate, validates data before schedule (Optional).
type: bool
payload:
description:
- An object to send in the Request body.
type: list
required: True
elements: dict
suboptions:
activateLowerImageVersion:
description:
- It is the trigger image activation's activateLowerImageVersion.
type: bool
deviceUpgradeMode:
description:
- It is the trigger image activation's deviceUpgradeMode.
type: str
deviceUuid:
description:
- It is the trigger image activation's deviceUuid.
type: str
distributeIfNeeded:
description:
- It is the trigger image activation's distributeIfNeeded.
type: bool
imageUuidList:
description:
- It is the trigger image activation's imageUuidList.
type: list
smuImageUuidList:
description:
- It is the trigger image activation's smuImageUuidList.
type: list
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.trigger_image_activation
# Reference by Internet resource
- name: TriggerImageActivation reference
description: Complete reference of the TriggerImageActivation object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: TriggerImageActivation reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: trigger_software_image_activation
cisco.dnac.trigger_image_activation:
state: create # required
payload: # required
- activateLowerImageVersion: True # boolean
deviceUpgradeMode: SomeValue # string
deviceUuid: SomeValue # string
distributeIfNeeded: True # boolean
imageUuidList:
- SomeValue # string
smuImageUuidList:
- SomeValue # string
schedule_validate: True # boolean
"""
RETURN = r"""
dnac_response:
description: A dictionary with the response returned by the DNA Center Python SDK
returned: always
type: dict
sample: {"response": 29, "version": "1.0"}
sdk_function:
description: The DNA Center SDK function used to execute the task
returned: always
type: str
sample: software_image_management_swim.trigger_software_image_activation
missing_params:
description: Provided arguments do not comply with the schema of the DNA Center Python SDK function
returned: when the function request schema is not satisfied
type: list
sample:
"""
| 0 | 0 | 0 |