hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f4de4faa1e841e8a6ac1f41b6fa33129d80bab6
| 7,981
|
py
|
Python
|
client/BaseClient.py
|
andrsd/civet
|
ac9ffffdea987437a5eb75779b9c7fe681e1ba85
|
[
"Apache-2.0"
] | 1
|
2016-11-29T15:15:55.000Z
|
2016-11-29T15:15:55.000Z
|
client/BaseClient.py
|
andrsd/civet
|
ac9ffffdea987437a5eb75779b9c7fe681e1ba85
|
[
"Apache-2.0"
] | null | null | null |
client/BaseClient.py
|
andrsd/civet
|
ac9ffffdea987437a5eb75779b9c7fe681e1ba85
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import logging, logging.handlers
from client.JobGetter import JobGetter
from client.JobRunner import JobRunner
from client.ServerUpdater import ServerUpdater
from client.InterruptHandler import InterruptHandler
import os, signal
import time
import traceback
import logging
logger = logging.getLogger("civet_client")
from threading import Thread
try:
from queue import Queue
except ImportError:
from Queue import Queue
def has_handler(handler_type):
"""
Check to see if a handler is already installed.
Normally this isn't a problem but when running tests it might be.
"""
for h in logger.handlers:
# Use type instead of isinstance since the types have
# to match exactly
if type(h) == handler_type:
return True
return False
def setup_logger(log_file=None):
"""
Setup the "civet_client" logger.
Input:
log_file: If not None then a RotatingFileHandler is installed. Otherwise a logger to console is used.
"""
formatter = logging.Formatter('%(asctime)-15s:%(levelname)s:%(message)s')
fhandler = None
if log_file:
if has_handler(logging.handlers.RotatingFileHandler):
return
fhandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=1024*1024*5, backupCount=5)
else:
if has_handler(logging.StreamHandler):
return
fhandler = logging.StreamHandler()
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.DEBUG)
class ClientException(Exception):
pass
class BaseClient(object):
"""
This is the job server client. It polls the server
for new jobs, requests one, and then runs it.
While running a job it reports back with output
from the job. During this operation the server
can respond with commands to the the client. Mainly
to cancel the job.
"""
def __init__(self, client_info):
self.client_info = client_info
self.command_q = Queue()
self.runner_error = False
self.thread_join_wait = 2*60*60 # 2 hours
if self.client_info["log_file"]:
self.set_log_file(self.client_info["log_file"])
elif self.client_info["log_dir"]:
self.set_log_dir(self.client_info["log_dir"])
else:
raise ClientException("log file not set")
setup_logger(self.client_info["log_file"])
try:
self.cancel_signal = InterruptHandler(self.command_q, sig=[signal.SIGUSR1, signal.SIGINT])
self.graceful_signal = InterruptHandler(self.command_q, sig=[signal.SIGUSR2])
except:
# On Windows, SIGUSR1, SIGUSR2 are not defined. Signals don't
# work in general so this is the easiest way to disable
# them but leave all the code in place.
self.cancel_signal = InterruptHandler(self.command_q, sig=[])
self.graceful_signal = InterruptHandler(self.command_q, sig=[])
if self.client_info["ssl_cert"]:
self.client_info["ssl_verify"] = self.client_info["ssl_cert"]
def set_log_dir(self, log_dir):
"""
Sets the log dir. If log_dir is set
the log file name will have a set name of "civet_client_<name>_<pid>.log"
raises Exception if the directory doesn't exist or isn't writable.
"""
if not log_dir:
return
log_dir = os.path.abspath(log_dir)
self.check_log_dir(log_dir)
self.client_info["log_file"] = "%s/civet_client_%s.log" % (log_dir, self.client_info["client_name"])
def check_log_dir(self, log_dir):
"""
Makes sure the log directory exists and is writable
Input:
log_dir: The directory to check if we can write a log file
Raises:
ClientException if unable to write
"""
if not os.path.isdir(log_dir):
raise ClientException('Log directory (%s) does not exist!' % log_dir)
if not os.access(log_dir, os.W_OK):
raise ClientException('Log directory (%s) is not writeable!' % log_dir)
def set_log_file(self, log_file):
"""
Specify a log file to use.
Input:
log_file: The log file to write to
Raises:
ClientException if we can't write to the file
"""
if not log_file:
return
log_file = os.path.abspath(log_file)
log_dir = os.path.dirname(log_file)
self.check_log_dir(log_dir)
self.client_info["log_file"] = log_file
def run_claimed_job(self, server, servers, claimed):
job_info = claimed["job_info"]
job_id = job_info["job_id"]
message_q = Queue()
runner = JobRunner(self.client_info, job_info, message_q, self.command_q)
self.cancel_signal.set_message({"job_id": job_id, "command": "cancel"})
control_q = Queue()
updater = ServerUpdater(server, self.client_info, message_q, self.command_q, control_q)
for entry in servers:
if entry != server:
control_q.put({"server": entry, "message": "Running job on another server"})
else:
control_q.put({"server": entry, "message": "Job {}: {}".format(job_id, job_info["recipe_name"])})
updater_thread = Thread(target=ServerUpdater.run, args=(updater,))
updater_thread.start();
runner.run_job()
if not runner.stopped and not runner.canceled:
logger.info("Joining message_q")
message_q.join()
control_q.put({"command": "Quit"}) # Any command will stop the ServerUpdater
# We want to wait for a little while here, if necessary.
# It could be that the server is temporarily down and if
# we just wait long enough for it to come back we can finish cleanly.
# However, we don't want to hang forever.
logger.info("Joining ServerUpdater")
updater_thread.join(self.thread_join_wait)
if updater_thread.isAlive():
logger.warning("Failed to join ServerUpdater thread. Job {}: '{}' not updated correctly".format(
job_id, job_info["recipe_name"]))
self.command_q.queue.clear()
self.runner_error = runner.error
def run(self):
"""
Main client loop. Polls the server for jobs and runs them.
"""
while True:
do_poll = True
try:
getter = JobGetter(self.client_info)
claimed = getter.find_job()
if claimed:
server = self.client_info["server"]
self.run_claimed_job(server, [server], claimed)
# finished the job, look for a new one immediately
do_poll = False
except Exception:
logger.warning("Error: %s" % traceback.format_exc())
if self.cancel_signal.triggered or self.graceful_signal.triggered:
logger.info("Received signal...exiting")
break
if self.runner_error:
logger.info("Error occurred in runner...exiting")
break
if self.client_info["single_shot"]:
break
if do_poll:
time.sleep(self.client_info["poll"])
| 36.610092
| 113
| 0.638266
|
1fb63a4d2035d9755e4d9c146f37c5c500eb3612
| 11,939
|
py
|
Python
|
chives/timelord/timelord_state.py
|
ChivesWorld/chives-blockchain
|
56734ef0719f7bf844213823bb95b0fcc642d222
|
[
"Apache-2.0"
] | 7
|
2021-12-26T11:05:19.000Z
|
2022-02-24T10:42:45.000Z
|
chives/timelord/timelord_state.py
|
ChivesWorld/chives-blockchain
|
56734ef0719f7bf844213823bb95b0fcc642d222
|
[
"Apache-2.0"
] | 8
|
2021-12-14T17:27:29.000Z
|
2022-03-29T18:18:22.000Z
|
chives/timelord/timelord_state.py
|
ChivesWorld/chives-blockchain
|
56734ef0719f7bf844213823bb95b0fcc642d222
|
[
"Apache-2.0"
] | 1
|
2021-12-09T23:51:12.000Z
|
2021-12-09T23:51:12.000Z
|
import logging
from typing import List, Optional, Tuple, Union
from chives.consensus.constants import ConsensusConstants
from chives.protocols import timelord_protocol
from chives.timelord.iters_from_block import iters_from_block
from chives.timelord.types import Chain, StateType
from chives.types.blockchain_format.classgroup import ClassgroupElement
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.blockchain_format.slots import ChallengeBlockInfo
from chives.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chives.types.end_of_slot_bundle import EndOfSubSlotBundle
from chives.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class LastState:
"""
Represents the state that the timelord is in, and should execute VDFs on top of. A state can be one of three types:
1. A "peak" or a block
2. An end of sub-slot
3. None, if it's the first sub-slot and there are no blocks yet
Timelords execute VDFs until they reach the next block or sub-slot, at which point the state is changed again.
The state can also be changed arbitrarily to a sub-slot or peak, for example in the case the timelord receives
a new block in the future.
"""
def __init__(self, constants: ConsensusConstants):
self.state_type: StateType = StateType.FIRST_SUB_SLOT
self.peak: Optional[timelord_protocol.NewPeakTimelord] = None
self.subslot_end: Optional[EndOfSubSlotBundle] = None
self.last_ip: uint64 = uint64(0)
self.deficit: uint8 = constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
self.sub_epoch_summary: Optional[SubEpochSummary] = None
self.constants: ConsensusConstants = constants
self.last_weight: uint128 = uint128(0)
self.last_height: uint32 = uint32(0)
self.total_iters: uint128 = uint128(0)
self.last_challenge_sb_or_eos_total_iters = uint128(0)
self.last_block_total_iters: Optional[uint128] = None
self.last_peak_challenge: bytes32 = constants.GENESIS_CHALLENGE
self.difficulty: uint64 = constants.DIFFICULTY_STARTING
self.sub_slot_iters: uint64 = constants.SUB_SLOT_ITERS_STARTING
self.reward_challenge_cache: List[Tuple[bytes32, uint128]] = [(constants.GENESIS_CHALLENGE, uint128(0))]
self.new_epoch = False
self.passed_ses_height_but_not_yet_included = False
self.infused_ses = False
def set_state(self, state: Union[timelord_protocol.NewPeakTimelord, EndOfSubSlotBundle]):
if isinstance(state, timelord_protocol.NewPeakTimelord):
self.state_type = StateType.PEAK
self.peak = state
self.subslot_end = None
_, self.last_ip = iters_from_block(
self.constants,
state.reward_chain_block,
state.sub_slot_iters,
state.difficulty,
)
self.deficit = state.deficit
self.sub_epoch_summary = state.sub_epoch_summary
self.last_weight = state.reward_chain_block.weight
self.last_height = state.reward_chain_block.height
self.total_iters = state.reward_chain_block.total_iters
self.last_peak_challenge = state.reward_chain_block.get_hash()
self.difficulty = state.difficulty
self.sub_slot_iters = state.sub_slot_iters
if state.reward_chain_block.is_transaction_block:
self.last_block_total_iters = self.total_iters
self.reward_challenge_cache = state.previous_reward_challenges
self.last_challenge_sb_or_eos_total_iters = self.peak.last_challenge_sb_or_eos_total_iters
self.new_epoch = False
if (self.peak.reward_chain_block.height + 1) % self.constants.SUB_EPOCH_BLOCKS == 0:
self.passed_ses_height_but_not_yet_included = True
else:
self.passed_ses_height_but_not_yet_included = state.passes_ses_height_but_not_yet_included
elif isinstance(state, EndOfSubSlotBundle):
self.state_type = StateType.END_OF_SUB_SLOT
if self.peak is not None:
self.total_iters = uint128(self.total_iters - self.get_last_ip() + self.sub_slot_iters)
else:
self.total_iters = uint128(self.total_iters + self.sub_slot_iters)
self.peak = None
self.subslot_end = state
self.last_ip = uint64(0)
self.deficit = state.reward_chain.deficit
if state.challenge_chain.new_difficulty is not None:
assert state.challenge_chain.new_sub_slot_iters is not None
self.difficulty = state.challenge_chain.new_difficulty
self.sub_slot_iters = state.challenge_chain.new_sub_slot_iters
self.new_epoch = True
else:
self.new_epoch = False
if state.challenge_chain.subepoch_summary_hash is not None:
self.infused_ses = True
self.passed_ses_height_but_not_yet_included = False
else:
self.infused_ses = False
self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included
self.last_challenge_sb_or_eos_total_iters = self.total_iters
else:
self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included
self.new_epoch = False
self.reward_challenge_cache.append((self.get_challenge(Chain.REWARD_CHAIN), self.total_iters))
log.info(f"Updated timelord peak to {self.get_challenge(Chain.REWARD_CHAIN)}, total iters: {self.total_iters}")
while len(self.reward_challenge_cache) > 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
self.reward_challenge_cache.pop(0)
def get_sub_slot_iters(self) -> uint64:
return self.sub_slot_iters
def can_infuse_block(self, overflow: bool) -> bool:
if overflow and self.new_epoch:
# No overflows in new epoch
return False
if self.state_type == StateType.FIRST_SUB_SLOT or self.state_type == StateType.END_OF_SUB_SLOT:
return True
ss_start_iters = self.get_total_iters() - self.get_last_ip()
already_infused_count: int = 0
for _, total_iters in self.reward_challenge_cache:
if total_iters > ss_start_iters:
already_infused_count += 1
if already_infused_count >= self.constants.MAX_SUB_SLOT_BLOCKS:
return False
return True
def get_weight(self) -> uint128:
return self.last_weight
def get_height(self) -> uint32:
return self.last_height
def get_total_iters(self) -> uint128:
return self.total_iters
def get_last_peak_challenge(self) -> Optional[bytes32]:
return self.last_peak_challenge
def get_difficulty(self) -> uint64:
return self.difficulty
def get_last_ip(self) -> uint64:
return self.last_ip
def get_deficit(self) -> uint8:
return self.deficit
def just_infused_sub_epoch_summary(self) -> bool:
"""
Returns true if state is an end of sub-slot, and that end of sub-slot infused a sub epoch summary
"""
return self.state_type == StateType.END_OF_SUB_SLOT and self.infused_ses
def get_next_sub_epoch_summary(self) -> Optional[SubEpochSummary]:
if self.state_type == StateType.FIRST_SUB_SLOT or self.state_type == StateType.END_OF_SUB_SLOT:
# Can only infuse SES after a peak (in an end of sub slot)
return None
assert self.peak is not None
if self.passed_ses_height_but_not_yet_included and self.get_deficit() == 0:
# This will mean we will include the ses in the next sub-slot
return self.sub_epoch_summary
return None
def get_last_block_total_iters(self) -> Optional[uint128]:
return self.last_block_total_iters
def get_passed_ses_height_but_not_yet_included(self) -> bool:
return self.passed_ses_height_but_not_yet_included
def get_challenge(self, chain: Chain) -> Optional[bytes32]:
if self.state_type == StateType.FIRST_SUB_SLOT:
assert self.peak is None and self.subslot_end is None
if chain == Chain.CHALLENGE_CHAIN:
return self.constants.GENESIS_CHALLENGE
elif chain == Chain.REWARD_CHAIN:
return self.constants.GENESIS_CHALLENGE
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
return None
elif self.state_type == StateType.PEAK:
assert self.peak is not None
reward_chain_block = self.peak.reward_chain_block
if chain == Chain.CHALLENGE_CHAIN:
return reward_chain_block.challenge_chain_ip_vdf.challenge
elif chain == Chain.REWARD_CHAIN:
return reward_chain_block.get_hash()
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
if reward_chain_block.infused_challenge_chain_ip_vdf is not None:
return reward_chain_block.infused_challenge_chain_ip_vdf.challenge
elif self.peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return ChallengeBlockInfo(
reward_chain_block.proof_of_space,
reward_chain_block.challenge_chain_sp_vdf,
reward_chain_block.challenge_chain_sp_signature,
reward_chain_block.challenge_chain_ip_vdf,
).get_hash()
return None
elif self.state_type == StateType.END_OF_SUB_SLOT:
assert self.subslot_end is not None
if chain == Chain.CHALLENGE_CHAIN:
return self.subslot_end.challenge_chain.get_hash()
elif chain == Chain.REWARD_CHAIN:
return self.subslot_end.reward_chain.get_hash()
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
if self.subslot_end.reward_chain.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
assert self.subslot_end.infused_challenge_chain is not None
return self.subslot_end.infused_challenge_chain.get_hash()
return None
return None
def get_initial_form(self, chain: Chain) -> Optional[ClassgroupElement]:
if self.state_type == StateType.FIRST_SUB_SLOT:
return ClassgroupElement.get_default_element()
elif self.state_type == StateType.PEAK:
assert self.peak is not None
reward_chain_block = self.peak.reward_chain_block
if chain == Chain.CHALLENGE_CHAIN:
return reward_chain_block.challenge_chain_ip_vdf.output
if chain == Chain.REWARD_CHAIN:
return ClassgroupElement.get_default_element()
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
if reward_chain_block.infused_challenge_chain_ip_vdf is not None:
return reward_chain_block.infused_challenge_chain_ip_vdf.output
elif self.peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return ClassgroupElement.get_default_element()
else:
return None
elif self.state_type == StateType.END_OF_SUB_SLOT:
if chain == Chain.CHALLENGE_CHAIN or chain == Chain.REWARD_CHAIN:
return ClassgroupElement.get_default_element()
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
assert self.subslot_end is not None
if self.subslot_end.reward_chain.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return ClassgroupElement.get_default_element()
else:
return None
return None
| 49.953975
| 119
| 0.675936
|
1823538da1cf1495ce12b6a984ce5e1965a9a432
| 8,751
|
py
|
Python
|
tensorflow_datasets/audio/speech_command.py
|
haideraltahan/datasets
|
aad5c7ea705949d20817fcc49a892bb2a21532f0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/audio/speech_command.py
|
haideraltahan/datasets
|
aad5c7ea705949d20817fcc49a892bb2a21532f0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/audio/speech_command.py
|
haideraltahan/datasets
|
aad5c7ea705949d20817fcc49a892bb2a21532f0
|
[
"Apache-2.0"
] | null | null | null |
"""Speech Command Dataset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import hashlib
import os
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.ops import io_ops
from tensorflow.python.util import compat
_CITATION = """
@article{warden2018speech,
title={Speech commands: A dataset for limited-vocabulary speech recognition},
author={Warden, Pete},
journal={arXiv preprint arXiv:1804.03209},
year={2018}
}
"""
_DESCRIPTION = """
Speech Command is a audio dataset of spoken words designed to help train and evaluate keyword spotting
systems. Speech Command consist of a set of one-second .wav audio files, each containing a single spoken
English word. These words are from a small set of commands, and are spoken by a
variety of different speakers. The audio files are organized into folders based
on the word they contain, and this data set is designed to help train simple
machine learning models. The data set contains 105,829 audio files in total.
"""
_URL = "https://arxiv.org/abs/1804.03209"
_DL_URL = "https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz"
MAX_NUM_WAVS_PER_CLASS = 2 ** 27 - 1 # ~134M
VALIDATION_PERCENTAGE = 10
TESTING_PERCENTAGE = 10
_SAMPLE_LENGTH = 16000
class SpeechCommand(tfds.core.GeneratorBasedBuilder):
"""Speech Command is a audio dataset of spoken words designed to help train and evaluate keyword spotting
systems."""
VERSION = tfds.core.Version('0.2.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
"audio": tfds.features.Tensor(
shape=(
_SAMPLE_LENGTH,
),
dtype=tf.float32),
"speaker_id": tf.string,
"label": tfds.features.ClassLabel(
names=[
'backward',
'bed',
'bird',
'cat',
'dog',
'down',
'eight',
'five',
'follow',
'forward',
'four',
'go',
'happy',
'house',
'learn',
'left',
'marvin',
'nine',
'no',
'off',
'on',
'one',
'right',
'seven',
'sheila',
'six',
'stop',
'three',
'tree',
'two',
'up',
'visual',
'wow',
'yes',
'zero']),
}),
supervised_keys=(
"speech",
"label"),
urls=[_URL],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
extracted_dirs = dl_manager.download_and_extract(_DL_URL)
print(_DL_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"dirs": extracted_dirs,
"par": 'training'
}),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"dirs": extracted_dirs,
"par": 'validation'
}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"dirs": extracted_dirs,
"par": 'testing'
}),
]
def _generate_examples(self, dirs, par):
"""Yields examples."""
if not os.path.exists(dirs):
return
numerator = 0
for _, directories, _ in os.walk(dirs):
for dir in directories:
for file in os.listdir(os.path.join(dirs, dir)):
if '_background_noise_' in dir:
continue
if par == which_set(file):
record = {
"audio": load_wav_file(
os.path.join(
dirs,
dir,
file)),
"speaker_id": file.split('_')[0],
"label": dir}
numerator += 1
yield numerator, record
def load_wav_file(filename):
"""Loads an audio file and returns a float PCM-encoded array of samples.
This function is from the dataset github page:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/speech_commands/input_data.py
Args:
filename: Path to the .wav file to load.
Returns:
Numpy array holding the sample data as floats between -1.0 and 1.0.
"""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=16000)
return sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: filename}).audio.flatten()
def which_set(
filename,
validation_percentage=VALIDATION_PERCENTAGE,
testing_percentage=TESTING_PERCENTAGE):
"""Determines which data partition the file should belong to.
This function is from the dataset github page:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/speech_commands/input_data.py
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
| 39.958904
| 109
| 0.54828
|
914c51511d63720b9c03f5716252b0303b340b23
| 27
|
py
|
Python
|
password_organizer/cli_menu/__init__.py
|
gbataille/password-organizer
|
62a9a23a32637197d354f5d438cd0917d781cb52
|
[
"MIT"
] | null | null | null |
password_organizer/cli_menu/__init__.py
|
gbataille/password-organizer
|
62a9a23a32637197d354f5d438cd0917d781cb52
|
[
"MIT"
] | null | null | null |
password_organizer/cli_menu/__init__.py
|
gbataille/password-organizer
|
62a9a23a32637197d354f5d438cd0917d781cb52
|
[
"MIT"
] | null | null | null |
from .prompt import prompt
| 13.5
| 26
| 0.814815
|
d55d8e8dfa593335b98a02e1922e331500e1e648
| 12,525
|
py
|
Python
|
test/backend/test_api/test_user.py
|
bnbwebexpertise/linkr
|
657bdd3b5c77b67702b9398da000e19d3f5bb875
|
[
"MIT"
] | 124
|
2016-12-23T02:14:45.000Z
|
2021-11-20T15:25:20.000Z
|
test/backend/test_api/test_user.py
|
bnbwebexpertise/linkr
|
657bdd3b5c77b67702b9398da000e19d3f5bb875
|
[
"MIT"
] | 24
|
2017-05-29T10:15:15.000Z
|
2019-05-23T13:30:58.000Z
|
test/backend/test_api/test_user.py
|
bnbwebexpertise/linkr
|
657bdd3b5c77b67702b9398da000e19d3f5bb875
|
[
"MIT"
] | 19
|
2017-05-15T13:19:07.000Z
|
2021-05-14T02:35:05.000Z
|
import linkr # flake8: noqa: F401
import mock
import util.recaptcha
import util.response
from test.backend.factory import UserFactory
from test.backend.test_case import LinkrTestCase
from test.backend.test_case import mock_config_options
from uri.auth import *
from uri.user import *
class TestUser(LinkrTestCase):
_multiprocess_can_split_ = True
@mock_config_options(server={'allow_open_registration': False})
def test_api_add_new_user_registration_disabled(self):
resp = self.api_utils.request(UserAddURI, data={
'username': 'username',
'password': 'password',
})
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.json['failure'], 'failure_open_registration_disabled')
@mock_config_options(server={'allow_open_registration': True})
def test_api_add_new_user_unauth_new_admin(self):
resp = self.api_utils.request(UserAddURI, data={
'username': 'username',
'password': 'password',
'is_admin': True,
})
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.json['failure'], 'failure_unauth')
@mock_config_options(server={'allow_open_registration': True})
def test_api_add_new_user_non_admin_new_admin(self):
with self.api_utils.authenticated_user():
resp = self.api_utils.request(UserAddURI, data={
'username': 'username',
'password': 'password',
'is_admin': True,
})
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.json['failure'], 'failure_unauth')
@mock_config_options(server={'allow_open_registration': True})
def test_api_add_new_user_unavailable_username(self):
UserFactory.generate(username='username')
resp = self.api_utils.request(UserAddURI, data={
'username': 'username',
'password': 'password',
})
self.assertEqual(resp.status_code, 409)
self.assertEqual(resp.json['failure'], 'failure_unavailable_username')
@mock_config_options(server={'allow_open_registration': True})
def test_api_add_new_user_invalid_username(self):
resp = self.api_utils.request(UserAddURI, data={
'username': 'username with spaces',
'password': 'password',
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json['failure'], 'failure_invalid_username')
@mock_config_options(server={'allow_open_registration': True})
def test_api_add_new_user_valid(self):
resp = self.api_utils.request(UserAddURI, data={
'username': 'username',
'password': 'password',
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['username'], 'username')
def test_api_add_new_user_undefined_error(self):
with mock.patch.object(util.response, 'success') as mock_success:
mock_success.side_effect = ValueError
resp = self.api_utils.request(UserAddURI, data={
'username': 'username',
'password': 'password',
})
self.assertTrue(self.api_utils.is_undefined_error(resp))
def test_api_deactivate_user_nonexistent(self):
user = UserFactory.generate()
with self.api_utils.authenticated_user(is_admin=True):
resp = self.api_utils.request(UserDeactivationURI, data={
'user_id': user.user_id,
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['user_id'], user.user_id)
resp = self.api_utils.request(UserDeactivationURI, data={
'user_id': user.user_id,
})
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.json['failure'], 'failure_nonexistent_user')
def test_api_deactivate_user_unauth(self):
user = UserFactory.generate()
with self.api_utils.authenticated_user():
resp = self.api_utils.request(UserDeactivationURI, data={
'user_id': user.user_id,
})
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.json['failure'], 'failure_unauth')
def test_api_deactivate_user_by_id(self):
with self.api_utils.authenticated_user() as user:
# User should initially be authenticated
resp = self.api_utils.request(AuthCheckURI)
self.assertEqual(resp.status_code, 200)
# Actual account deactivation
resp = self.api_utils.request(UserDeactivationURI, data={
'user_id': user.user_id,
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['user_id'], user.user_id)
# User should no longer be authenticated after deleting his or her own account
resp = self.api_utils.request(AuthCheckURI)
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json['failure'], 'failure_unauth')
def test_api_deactivate_user_admin(self):
user = UserFactory.generate()
with self.api_utils.authenticated_user(is_admin=True) as admin:
# Admin should initially be authenticated
resp = self.api_utils.request(AuthCheckURI)
self.assertEqual(resp.status_code, 200)
# Actual account deactivation of someone else's account
resp = self.api_utils.request(UserDeactivationURI, data={
'user_id': user.user_id,
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['user_id'], user.user_id)
# Admin should still be authenticated since his or her account was not affected
resp = self.api_utils.request(AuthCheckURI)
self.assertEqual(resp.status_code, 200)
def test_api_deactivate_user_current_user(self):
with self.api_utils.authenticated_user() as user:
# User should initially be authenticated
resp = self.api_utils.request(AuthCheckURI)
self.assertEqual(resp.status_code, 200)
# Actual account deactivation
resp = self.api_utils.request(UserDeactivationURI)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['user_id'], user.user_id)
# User should no longer be authenticated after deleting his or her own account
resp = self.api_utils.request(AuthCheckURI)
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json['failure'], 'failure_unauth')
def test_api_deactivate_user_undefined_error(self):
with self.api_utils.authenticated_user():
with mock.patch.object(util.response, 'success') as mock_success:
mock_success.side_effect = ValueError
resp = self.api_utils.request(UserDeactivationURI)
self.assertTrue(self.api_utils.is_undefined_error(resp))
def test_api_update_user_password_invalid_auth(self):
with self.api_utils.authenticated_user():
resp = self.api_utils.request(UserUpdatePasswordURI, data={
'current_password': 'bad password',
'new_password': 'new password',
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json['failure'], 'failure_invalid_auth')
def test_api_update_user_password_valid(self):
with self.api_utils.authenticated_user(username='username'):
resp = self.api_utils.request(UserUpdatePasswordURI, data={
'current_password': 'password',
'new_password': 'new password',
})
self.assertEqual(resp.status_code, 200)
resp = self.api_utils.request(AuthLoginURI, data={
'username': 'username',
'password': 'new password',
'remember_me': False,
})
self.assertEqual(resp.status_code, 200)
def test_api_update_user_password_undefined_error(self):
with self.api_utils.authenticated_user():
with mock.patch.object(util.response, 'success') as mock_success:
mock_success.side_effect = ValueError
resp = self.api_utils.request(UserUpdatePasswordURI, data={
'current_password': 'password',
'new_password': 'new password',
})
self.assertTrue(self.api_utils.is_undefined_error(resp))
def test_api_regenerate_user_api_key_invalid_auth(self):
with self.api_utils.authenticated_user():
resp = self.api_utils.request(UserRegenerateAPIKeyURI, data={
'password': 'invalid',
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json['failure'], 'failure_invalid_auth')
def test_api_regenerate_user_api_key_valid(self):
with self.api_utils.authenticated_user() as user:
old_api_key = user.api_key
resp = self.api_utils.request(UserRegenerateAPIKeyURI, data={
'password': 'password',
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['user_id'], user.user_id)
self.assertNotEqual(old_api_key, user.api_key)
def test_api_regenerate_user_api_key_undefined_error(self):
with self.api_utils.authenticated_user():
with mock.patch.object(util.response, 'success') as mock_success:
mock_success.side_effect = ValueError
resp = self.api_utils.request(UserRegenerateAPIKeyURI, data={
'password': 'password',
})
self.assertTrue(self.api_utils.is_undefined_error(resp))
def test_api_recent_users_unauth(self):
with self.api_utils.authenticated_user():
resp = self.api_utils.request(RecentUsersURI, data={
'page_num': 0,
'num_per_page': 10,
})
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.json['failure'], 'failure_unauth')
def test_api_recent_users_valid(self):
with self.api_utils.authenticated_user(is_admin=True) as admin:
resp = self.api_utils.request(RecentUsersURI, data={
'page_num': 0,
'num_per_page': 10,
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['users'], [admin.as_dict()])
def test_api_recent_users_undefined_error(self):
with self.api_utils.authenticated_user(is_admin=True):
with mock.patch.object(util.response, 'success') as mock_success:
mock_success.side_effect = ValueError
resp = self.api_utils.request(RecentUsersURI, data={
'page_num': 0,
'num_per_page': 10,
})
self.assertTrue(self.api_utils.is_undefined_error(resp))
def test_api_user_search_unauth(self):
with self.api_utils.authenticated_user():
resp = self.api_utils.request(UserSearchURI, data={
'username': 'username',
'page_num': 0,
'num_per_page': 10,
})
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.json['failure'], 'failure_unauth')
def test_api_user_search_valid(self):
with self.api_utils.authenticated_user(is_admin=True) as admin:
resp = self.api_utils.request(UserSearchURI, data={
'username': admin.username,
'page_num': 0,
'num_per_page': 10,
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json['users'], [admin.as_dict()])
def test_api_user_search_undefined_error(self):
with self.api_utils.authenticated_user(is_admin=True) as admin:
with mock.patch.object(util.response, 'success') as mock_success:
mock_success.side_effect = ValueError
resp = self.api_utils.request(UserSearchURI, data={
'username': admin.username,
'page_num': 0,
'num_per_page': 10,
})
self.assertTrue(self.api_utils.is_undefined_error(resp))
| 39.140625
| 91
| 0.628743
|
6ecd0607220c8509865235b714612bd0ff40a74b
| 565
|
py
|
Python
|
test.py
|
bhavnesh2211/Ngo-Scraper
|
256b1166ff7564e0799437d19e86fec0fd549d9f
|
[
"MIT"
] | 6
|
2019-03-03T06:08:24.000Z
|
2019-03-03T07:21:06.000Z
|
test.py
|
bhavnesh2211/Ngo-Scraper
|
256b1166ff7564e0799437d19e86fec0fd549d9f
|
[
"MIT"
] | null | null | null |
test.py
|
bhavnesh2211/Ngo-Scraper
|
256b1166ff7564e0799437d19e86fec0fd549d9f
|
[
"MIT"
] | null | null | null |
# import requests
from pprint import pprint
from bs4 import BeautifulSoup
url = requests.get("https://www.giveindia.org/certified-indian-ngos")
soup = BeautifulSoup(url.text, "html.parser")
dictionary = {}
trs = soup.find("table", class_="table table-bordered")
tr =trs.find("tbody")
tds = tr.find_all("tr")
place = []
work = []
name = []
for i in tds:
a = i.find_all("td")
name.append(a[0].getText())
work.append(a[1].getText())
place.append(a[2].getText())
dictionary["name"] = name
dictionary["work"] = work
dictionary["place"] = place
pprint (dictionary)
| 24.565217
| 69
| 0.693805
|
49ede2c6c4fb02e55b0e63bb25c826b1fbb676f5
| 2,563
|
py
|
Python
|
oase_documents/conf.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 9
|
2020-03-25T07:51:47.000Z
|
2022-02-07T00:07:28.000Z
|
oase_documents/conf.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 1,164
|
2021-01-28T23:16:11.000Z
|
2022-03-28T07:23:10.000Z
|
oase_documents/conf.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 25
|
2020-03-17T06:48:30.000Z
|
2022-02-15T15:13:44.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'OASE_docs'
copyright = '2019 NEC Corporation'
author = 'NEC'
# The full version, including alpha/beta/rc tags
release = '1.5.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinxcontrib.blockdiag']
# Fontpath for blockdiag (truetype font)
blockdiag_fontpath = '/usr/share/fonts/ipa-pgothic/ipagp.ttf'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ja'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'exastro_documents'
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# ソースコードを表示のリンクを非表示
html_show_sourcelink = False
# フッターのところにある「このドキュメントはSphinxで作成しました」という文言を消す。
html_show_sphinx = False
# favion
html_favicon = '_static/favicon.ico'
# LaTeX の docclass 設定
latex_docclass = {'manual': 'jsbook'}
| 32.858974
| 79
| 0.690987
|
a3d136c8d51204a6bc688a035dddf58fe956ae69
| 12,996
|
py
|
Python
|
tensorflow/python/kernel_tests/sparse_concat_op_test.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | 5
|
2018-07-04T22:14:02.000Z
|
2018-07-04T22:21:43.000Z
|
tensorflow/python/kernel_tests/sparse_concat_op_test.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/sparse_concat_op_test.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | 1
|
2018-11-30T01:35:01.000Z
|
2018-11-30T01:35:01.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseConcat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseConcatTest(test.TestCase):
def _SparseTensor_UnknownShape(self,
ind_shape=None,
val_shape=None,
shape_shape=None):
return sparse_tensor.SparseTensor(
array_ops.placeholder(
dtypes.int64, shape=ind_shape),
array_ops.placeholder(
dtypes.float32, shape=val_shape),
array_ops.placeholder(
dtypes.int64, shape=shape_shape))
def _SparseTensorValue_3x3(self):
# [ 1]
# [2 ]
# [3 4]
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x3())
def _SparseTensorValue_3x5(self):
# [ ]
# [ 1 ]
# [2 1 0]
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array([1, 2, 1, 0])
shape = np.array([3, 5])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x5(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x5())
def _SparseTensor_3x2(self):
# [ ]
# [1 ]
# [2 ]
ind = np.array([[1, 0], [2, 0]])
val = np.array([1, 2])
shape = np.array([3, 2])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3(self):
# [ 1 ]
# [1 2]
ind = np.array([[0, 1], [1, 0], [1, 2]])
val = np.array([1, 1, 2])
shape = np.array([2, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self):
ind = np.array([
[0, 0, 1],
[0, 1, 0], [0, 1, 2],
[1, 0, 3],
[1, 1, 1], [1, 1, 3],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x3(self):
# [ a]
# [b ]
# [c d]
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array(["a", "b", "c", "d"])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x5(self):
# [ ]
# [ e ]
# [f g h]
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array(["e", "f", "g", "h"])
shape = np.array([3, 5])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def testConcat1(self):
with self.session(use_gpu=False) as sess:
# concat(A):
# [ 1]
# [2 ]
# [3 4]
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
# Note that we ignore concat_dim in this case since we short-circuit the
# single-input case in python.
for concat_dim in (-2000, 1, 2000):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a])
self.assertEqual(sp_concat.indices.get_shape(), [4, 2])
self.assertEqual(sp_concat.values.get_shape(), [4])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2]])
self.assertAllEqual(concat_out.values, [1, 2, 3, 4])
self.assertAllEqual(concat_out.dense_shape, [3, 3])
def testConcat2(self):
with self.session(use_gpu=False) as sess:
# concat(A, B):
# [ 1 ]
# [2 1 ]
# [3 4 2 1 0]
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x5(), self._SparseTensor_3x5()):
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4],
[2, 0], [2, 2], [2, 3],
[2, 6], [2, 7]])
self.assertAllEqual(concat_out.values, [1, 2, 1, 3, 4, 2, 1, 0])
self.assertAllEqual(concat_out.dense_shape, [3, 8])
def testConcatDim0(self):
with self.session(use_gpu=False) as sess:
# concat(A, D):
# [ 1]
# [2 ]
# [3 4]
# [ 1 ]
# [1 2]
sp_a = self._SparseTensor_3x3()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-2, 0):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_d])
self.assertEqual(sp_concat.indices.get_shape(), [7, 2])
self.assertEqual(sp_concat.values.get_shape(), [7])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(
concat_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2], [3, 1], [4, 0], [4, 2]])
self.assertAllEqual(concat_out.values, np.array([1, 2, 3, 4, 1, 1, 2]))
self.assertAllEqual(concat_out.dense_shape, np.array([5, 3]))
def testConcat3(self):
with self.session(use_gpu=False) as sess:
# concat(A, B, C):
# [ 1 ]
# [2 1 1 ]
# [3 4 2 1 0 2 ]
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
self.assertEqual(sp_concat.indices.get_shape(), [10, 2])
self.assertEqual(sp_concat.values.get_shape(), [10])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4], [1, 8],
[2, 0], [2, 2], [2, 3], [2, 6],
[2, 7], [2, 8]])
self.assertAllEqual(concat_out.values, [1, 2, 1, 1, 3, 4, 2, 1, 0, 2])
self.assertAllEqual(concat_out.dense_shape, [3, 10])
def testConcatNonNumeric(self):
with self.session(use_gpu=False) as sess:
# concat(A, B):
# [ a ]
# [b e ]
# [c d f g h]
sp_a = self._SparseTensor_String3x3()
sp_b = self._SparseTensor_String3x5()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(
concat_out.indices,
[[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]])
self.assertAllEqual(concat_out.values,
[b"a", b"b", b"e", b"c", b"d", b"f", b"g", b"h"])
self.assertAllEqual(concat_out.dense_shape, [3, 8])
def testMismatchedRank(self):
with self.session(use_gpu=False):
sp_a = self._SparseTensor_3x3()
sp_e = self._SparseTensor_2x3x4()
# Rank mismatches can be caught at shape-inference time
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
sparse_ops.sparse_concat(concat_dim, [sp_a, sp_e])
def testMismatchedRankExpandNonconcatDim(self):
with self.session(use_gpu=False):
sp_a = self._SparseTensor_3x3()
sp_e = self._SparseTensor_2x3x4()
# Rank mismatches should be caught at shape-inference time, even for
# expand_nonconcat_dim=True.
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
sparse_ops.sparse_concat(
concat_dim, [sp_a, sp_e], expand_nonconcat_dim=True)
def testMismatchedShapes(self):
with self.session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim,
[sp_a, sp_b, sp_c, sp_d])
# Shape mismatches can only be caught when the op is run
with self.assertRaisesOpError("Input shapes must match"):
sess.run(sp_concat)
def testMismatchedShapesExpandNonconcatDim(self):
with self.session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim0 in (-2, 0):
for concat_dim1 in (-1, 1):
sp_concat_dim0 = sparse_ops.sparse_concat(
concat_dim0, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim1 = sparse_ops.sparse_concat(
concat_dim1, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim0_out = self.evaluate(sp_concat_dim0)
sp_concat_dim1_out = self.evaluate(sp_concat_dim1)
self.assertAllEqual(sp_concat_dim0_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2], [4, 1], [5, 0],
[5, 3], [5, 4], [7, 0], [8, 0], [9, 1], [10, 0],
[10, 2]])
self.assertAllEqual(sp_concat_dim0_out.values,
[1, 2, 3, 4, 1, 2, 1, 0, 1, 2, 1, 1, 2])
self.assertAllEqual(sp_concat_dim0_out.dense_shape, [11, 5])
self.assertAllEqual(sp_concat_dim1_out.indices,
[[0, 2], [0, 11], [1, 0], [1, 4], [1, 8], [1, 10],
[1, 12], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7],
[2, 8]])
self.assertAllEqual(sp_concat_dim1_out.values,
[1, 1, 2, 1, 1, 1, 2, 3, 4, 2, 1, 0, 2])
self.assertAllEqual(sp_concat_dim1_out.dense_shape, [3, 13])
def testShapeInferenceUnknownShapes(self):
with self.session(use_gpu=False):
sp_inputs = [
self._SparseTensor_UnknownShape(),
self._SparseTensor_UnknownShape(val_shape=[3]),
self._SparseTensor_UnknownShape(ind_shape=[1, 3]),
self._SparseTensor_UnknownShape(shape_shape=[3])
]
for concat_dim in (-2, 0):
sp_concat = sparse_ops.sparse_concat(concat_dim, sp_inputs)
self.assertEqual(sp_concat.indices.get_shape().as_list(), [None, 3])
self.assertEqual(sp_concat.values.get_shape().as_list(), [None])
self.assertEqual(sp_concat.dense_shape.get_shape(), [3])
if __name__ == "__main__":
test.main()
| 37.889213
| 80
| 0.577024
|
3a28a7b1ca2d427757b20b4a249b7a0c474bce73
| 2,461
|
py
|
Python
|
src/sot_talos_balance/talos/parameter_server_conf.py
|
olivier-stasse/sot-talos-balance
|
8749650fc5f512a04c349f447a0cfb0d9e0e1e05
|
[
"BSD-2-Clause"
] | null | null | null |
src/sot_talos_balance/talos/parameter_server_conf.py
|
olivier-stasse/sot-talos-balance
|
8749650fc5f512a04c349f447a0cfb0d9e0e1e05
|
[
"BSD-2-Clause"
] | null | null | null |
src/sot_talos_balance/talos/parameter_server_conf.py
|
olivier-stasse/sot-talos-balance
|
8749650fc5f512a04c349f447a0cfb0d9e0e1e05
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 9 13:55:16 2015
@author: adelpret
"""
import numpy as np
from rospkg import RosPack
rospack = RosPack()
NJ = 32
model_path = [rospack.get_path('talos_data') + "/../"]
urdfFileName = rospack.get_path('talos_data') + "/urdf/talos_reduced.urdf"
ImuJointName = "imu_joint"
mapJointNameToID = {
'lhy': 0,
'lhr': 1,
'lhp': 2,
'lk': 3,
'lap': 4,
'lar': 5,
'rhy': 6,
'rhr': 7,
'rhp': 8,
'rk': 9,
'rap': 10,
'rar': 11,
'ty': 12,
'tp': 13,
'lsy': 14,
'lsr': 15,
'lay': 16,
'le': 17,
'lwy': 18,
'lwp': 19,
'lwr': 20,
'lh': 21,
'rsy': 22,
'rsr': 23,
'ray': 24,
're': 25,
'rwy': 26,
'rwp': 27,
'rwr': 28,
'rh': 29,
'hp': 30,
'hy': 31
}
mapJointLimits = {
0: [-0.349065850399, 1.57079632679],
1: [-0.5236, 0.5236],
2: [-2.095, 0.7],
3: [0.0, 2.618],
4: [-1.309, 0.768],
5: [-0.5236, 0.5236],
6: [-1.57079632679, 0.349065850399],
7: [-0.5236, 0.5236],
8: [-2.095, 0.7],
9: [0.0, 2.618],
10: [-1.309, 0.768],
11: [-0.5236, 0.5236],
12: [-1.308996939, 1.308996939],
13: [-0.261799387799, 0.785398163397],
14: [-1.57079632679, 0.523598775598],
15: [0.0, 2.87979326579],
16: [-2.44346095279, 2.44346095279],
17: [-2.35619449019, 0.0],
18: [-2.53072741539, 2.53072741539],
19: [-1.3962634016, 1.3962634016],
20: [-0.698131700798, 0.698131700798],
21: [-1.0471975512, 0.0],
22: [-0.523598775598, 1.57079632679],
23: [-2.87979326579, 0.0],
24: [-2.44346095279, 2.44346095279],
25: [-2.35619449019, 0.0],
26: [-2.53072741539, 2.53072741539],
27: [-1.3962634016, 1.3962634016],
28: [-0.698131700798, 0.698131700798],
29: [-1.0471975512, 0.0],
30: [-0.261799387799, 0.785398163397],
31: [-1.308996939, 1.308996939]
}
vfMax = np.array([100.0, 100.0, 300.0, 80.0, 80.0, 30.0])
vfMin = -vfMax
mapForceIdToForceLimits = {0: [vfMin, vfMax], 1: [vfMin, vfMax], 2: [vfMin, vfMax], 3: [vfMin, vfMax]}
mapNameToForceId = {"rf": 0, "lf": 1, "rh": 2, "lh": 3}
indexOfForceSensors = ()
footFrameNames = {"Right": "leg_right_6_joint", "Left": "leg_left_6_joint"}
rightFootSensorXYZ = (0.0, 0.0, -0.085)
rightFootSoleXYZ = (0.0, 0.0, -0.105)
urdftosot = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31)
| 24.366337
| 118
| 0.534742
|
c6ad39cbef12c71f9faaff964537fb5b7755af85
| 3,958
|
py
|
Python
|
arxiv/canonical/domain/tests/test_file.py
|
arXiv/arxiv-canonical
|
a758ed88a568f23a834288aed4dcf7039c1340cf
|
[
"MIT"
] | 5
|
2019-05-26T22:52:54.000Z
|
2021-11-05T12:27:11.000Z
|
arxiv/canonical/domain/tests/test_file.py
|
arXiv/arxiv-canonical
|
a758ed88a568f23a834288aed4dcf7039c1340cf
|
[
"MIT"
] | 31
|
2019-06-24T13:51:25.000Z
|
2021-11-12T22:27:10.000Z
|
arxiv/canonical/domain/tests/test_file.py
|
arXiv/arxiv-canonical
|
a758ed88a568f23a834288aed4dcf7039c1340cf
|
[
"MIT"
] | 4
|
2019-01-10T22:01:54.000Z
|
2021-11-05T12:26:58.000Z
|
"""Tests for :mod:`arxiv.canonical.domain`."""
from datetime import datetime
from unittest import TestCase
from ..file import URI, Key, CanonicalFile, ContentType
class TestURIForFile(TestCase):
"""URI can refer to a local file."""
def test_file_uri(self):
"""URI is initialized with an absolute path."""
path = '/path/to/some/data'
uri = URI(path)
self.assertTrue(uri.is_file, 'Recognized as a file reference')
self.assertFalse(uri.is_http_url, 'Not an HTTP URI')
self.assertFalse(uri.is_canonical, 'Not a canonical URI')
self.assertEqual(uri.scheme, 'file')
self.assertEqual(uri.path, path, 'Original path is preserved')
def test_file_uri_with_relative_path(self):
"""URI is initialized with a relative path."""
path = 'path/to/some/data'
with self.assertRaises(ValueError):
URI(path)
class TestCanonicalURI(TestCase):
"""URI can refer to a canonical resource."""
def test_canonical_uri(self):
"""URI is initialized with an arXiv canonical URI."""
raw = 'arxiv:///path/to/a/resource'
uri = URI(raw)
self.assertFalse(uri.is_file, 'Not a local file reference')
self.assertFalse(uri.is_http_url, 'Not an HTTP URI')
self.assertTrue(uri.is_canonical, 'Recognized as a canonical URI')
self.assertEqual(uri.scheme, 'arxiv')
self.assertEqual(uri.path, '/path/to/a/resource')
class TestHTTPURI(TestCase):
"""URI can refer to an HTTP URI."""
def test_valid_http_uri(self):
"""URI is initialized with a valid HTTP URI."""
raw = 'http://asdf.com'
uri = URI(raw)
self.assertFalse(uri.is_file, 'Not a local file reference')
self.assertTrue(uri.is_http_url, 'Recognized as an HTTP URI')
self.assertFalse(uri.is_canonical, 'Not a canonical URI')
self.assertEqual(uri.scheme, 'http')
def test_valid_https_uri(self):
"""URI is initialized with a valid HTTPS URI."""
raw = 'https://asdf.com'
uri = URI(raw)
self.assertFalse(uri.is_file, 'Not a local file reference')
self.assertTrue(uri.is_http_url, 'Recognized as an HTTP URI')
self.assertFalse(uri.is_canonical, 'Not a canonical URI')
self.assertEqual(uri.scheme, 'https')
def test_valid_ftp_uri(self):
"""URI is initialized with a valid FTP URI."""
raw = 'ftp://asdf.com/foo'
uri = URI(raw)
self.assertFalse(uri.is_file, 'Not a local file reference')
self.assertFalse(uri.is_http_url, 'Not an HTTP URI')
self.assertFalse(uri.is_canonical, 'Not a canonical URI')
self.assertEqual(uri.scheme, 'ftp')
class TestKey(TestCase):
"""Key is a canonical URI."""
def test_with_absolute_path(self):
"""Key is initialized with an absolute path."""
raw = '/path/to/a/resource'
key = Key(raw)
self.assertTrue(key.is_canonical, 'Key is a canonical URI')
self.assertIsInstance(key, URI, 'Indeed, it is an URI')
self.assertEqual(key.scheme, 'arxiv')
self.assertEqual(str(key), f'arxiv://{raw}')
class TestCanonicalFile(TestCase):
def setUp(self):
"""Given a canonical file."""
self.canonical_file = CanonicalFile(
modified=datetime.now(),
size_bytes=5_324,
content_type=ContentType.json,
filename='foo.json',
ref=URI('arxiv:///key/for/foo.json')
)
def test_dict_transformation(self):
"""Transformation of CanonicalFile to/from dict preserves state."""
self.assertEqual(
self.canonical_file,
CanonicalFile.from_dict(self.canonical_file.to_dict())
)
def test_mime_type(self):
"""MIME type is accessible on the file itself."""
self.assertEqual(self.canonical_file.mime_type,
ContentType.json.mime_type)
| 35.981818
| 75
| 0.635422
|
f31db364baeb7cfddf28dd231e1de3c4d1e739aa
| 3,871
|
py
|
Python
|
ververica_api_sdk/models/deployment_template.py
|
justlikemikezz/ververica-api-sdk
|
0eee284b4433f74b35fd2f41d149e619624aaed3
|
[
"RSA-MD"
] | null | null | null |
ververica_api_sdk/models/deployment_template.py
|
justlikemikezz/ververica-api-sdk
|
0eee284b4433f74b35fd2f41d149e619624aaed3
|
[
"RSA-MD"
] | null | null | null |
ververica_api_sdk/models/deployment_template.py
|
justlikemikezz/ververica-api-sdk
|
0eee284b4433f74b35fd2f41d149e619624aaed3
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Application Manager API
Application Manager APIs to control Apache Flink jobs # noqa: E501
OpenAPI spec version: 2.0.1
Contact: platform@ververica.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeploymentTemplate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'metadata': 'DeploymentTemplateMetadata',
'spec': 'DeploymentTemplateSpec'
}
attribute_map = {
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, metadata=None, spec=None): # noqa: E501
"""DeploymentTemplate - a model defined in Swagger""" # noqa: E501
self._metadata = None
self._spec = None
self.discriminator = None
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def metadata(self):
"""Gets the metadata of this DeploymentTemplate. # noqa: E501
:return: The metadata of this DeploymentTemplate. # noqa: E501
:rtype: DeploymentTemplateMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this DeploymentTemplate.
:param metadata: The metadata of this DeploymentTemplate. # noqa: E501
:type: DeploymentTemplateMetadata
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this DeploymentTemplate. # noqa: E501
:return: The spec of this DeploymentTemplate. # noqa: E501
:rtype: DeploymentTemplateSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this DeploymentTemplate.
:param spec: The spec of this DeploymentTemplate. # noqa: E501
:type: DeploymentTemplateSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeploymentTemplate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeploymentTemplate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.260563
| 80
| 0.573754
|
5699962ee64a4f0bcf35f7097aea7a1def34fa7f
| 8,731
|
py
|
Python
|
rpython/memory/gc/inspector.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
rpython/memory/gc/inspector.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
rpython/memory/gc/inspector.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
"""
Utility RPython functions to inspect objects in the GC.
"""
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.objectmodel import free_non_gc_object
from rpython.rlib import rposix, rgc, jit
from rpython.memory.support import AddressDict, get_address_stack
# ---------- implementation of rpython.rlib.rgc.get_rpy_roots() ----------
def _append_rpy_root(obj, gc):
# Can use the gc list, but should not allocate!
# It is essential that the list is not resizable!
lst = gc._list_rpy
index = gc._count_rpy
gc._count_rpy = index + 1
if index < len(lst):
lst[index] = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
#else:
# too many items. This situation is detected in the 'while' loop below
def _do_append_rpy_roots(gc, lst):
gc._count_rpy = 0
gc._list_rpy = lst
gc.enumerate_all_roots(_append_rpy_root, gc)
gc._list_rpy = None
return gc._count_rpy
def get_rpy_roots(gc):
# returns a list that may end with some NULLs
while True:
result = [lltype.nullptr(llmemory.GCREF.TO)] * gc._totalroots_rpy
count = _do_append_rpy_roots(gc, result)
if count <= len(result): # 'count' fits inside the list
return result
count += (count // 8)
gc._totalroots_rpy = count + 10
# ---------- implementation of rpython.rlib.rgc.get_rpy_referents() ----------
def _append_rpy_referent(pointer, gc):
# Can use the gc list, but should not allocate!
# It is essential that the list is not resizable!
lst = gc._list_rpy
index = gc._count_rpy
gc._count_rpy = index + 1
if index < len(lst):
lst[index] = llmemory.cast_adr_to_ptr(pointer.address[0],
llmemory.GCREF)
#else:
# too many items. This situation is detected in the 'while' loop below
def _do_append_rpy_referents(gc, gcref, lst):
gc._count_rpy = 0
gc._list_rpy = lst
gc.trace(llmemory.cast_ptr_to_adr(gcref), _append_rpy_referent, gc)
gc._list_rpy = None
return gc._count_rpy
def get_rpy_referents(gc, gcref):
# returns a list with no NULLs
result = []
while True:
count = _do_append_rpy_referents(gc, gcref, result)
if count <= len(result): # 'count' fits inside the list
if count < len(result):
result = result[:count]
return result
result = [lltype.nullptr(llmemory.GCREF.TO)] * count
# ----------
def get_rpy_memory_usage(gc, gcref):
return gc.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref))
def get_rpy_type_index(gc, gcref):
typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref))
return gc.get_member_index(typeid)
def is_rpy_instance(gc, gcref):
typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref))
return gc.is_rpython_class(typeid)
# ----------
raw_os_write = rffi.llexternal(rposix.UNDERSCORE_ON_WIN32 + 'write',
[rffi.INT, llmemory.Address, rffi.SIZE_T],
rffi.SIZE_T,
sandboxsafe=True, _nowrapper=True)
AddressStack = get_address_stack()
class BaseWalker(object):
_alloc_flavor_ = 'raw'
def __init__(self, gc):
self.gc = gc
self.gcflag = gc.gcflag_extra
if self.gcflag == 0:
self.seen = AddressDict()
self.pending = AddressStack()
def delete(self):
if self.gcflag == 0:
self.seen.delete()
self.pending.delete()
free_non_gc_object(self)
def add_roots(self):
self.gc.enumerate_all_roots(_hd_add_root, self)
pendingroots = self.pending
self.pending = AddressStack()
self.walk(pendingroots)
pendingroots.delete()
self.end_add_roots_marker()
def end_add_roots_marker(self):
pass
def add(self, obj):
if self.gcflag == 0:
if not self.seen.contains(obj):
self.seen.setitem(obj, obj)
self.pending.append(obj)
else:
hdr = self.gc.header(obj)
if (hdr.tid & self.gcflag) == 0:
hdr.tid |= self.gcflag
self.pending.append(obj)
def walk(self, pending):
while pending.non_empty():
self.processobj(pending.pop())
# ----------
# A simplified copy of the above, to make sure we walk again all the
# objects to clear the 'gcflag'.
def unobj(self, obj):
gc = self.gc
gc.trace(obj, self._unref, None)
def _unref(self, pointer, _):
obj = pointer.address[0]
self.unadd(obj)
def unadd(self, obj):
assert self.gcflag != 0
hdr = self.gc.header(obj)
if (hdr.tid & self.gcflag) != 0:
hdr.tid &= ~self.gcflag
self.pending.append(obj)
def clear_gcflag_again(self):
self.gc.enumerate_all_roots(_hd_unadd_root, self)
pendingroots = self.pending
self.pending = AddressStack()
self.unwalk(pendingroots)
pendingroots.delete()
def unwalk(self, pending):
while pending.non_empty():
self.unobj(pending.pop())
def finish_processing(self):
if self.gcflag != 0:
self.clear_gcflag_again()
self.unwalk(self.pending)
def process(self):
self.add_roots()
self.walk(self.pending)
class MemoryPressureCounter(BaseWalker):
def __init__(self, gc):
self.count = 0
BaseWalker.__init__(self, gc)
def processobj(self, obj):
gc = self.gc
typeid = gc.get_type_id(obj)
if gc.has_memory_pressure(typeid):
ofs = gc.get_memory_pressure_ofs(typeid)
val = (obj + ofs).signed[0]
self.count += val
gc.trace(obj, self._ref, None)
def _ref(self, pointer, _):
obj = pointer.address[0]
self.add(obj)
class HeapDumper(BaseWalker):
BUFSIZE = 8192 # words
def __init__(self, gc, fd):
BaseWalker.__init__(self, gc)
self.fd = rffi.cast(rffi.INT, fd)
self.writebuffer = lltype.malloc(rffi.SIGNEDP.TO, self.BUFSIZE,
flavor='raw')
self.buf_count = 0
def delete(self):
lltype.free(self.writebuffer, flavor='raw')
BaseWalker.delete(self)
@jit.dont_look_inside
def flush(self):
if self.buf_count > 0:
bytes = self.buf_count * rffi.sizeof(rffi.SIGNED)
count = raw_os_write(self.fd,
rffi.cast(llmemory.Address, self.writebuffer),
rffi.cast(rffi.SIZE_T, bytes))
if rffi.cast(lltype.Signed, count) != bytes:
raise OSError(rffi.cast(lltype.Signed, rposix._get_errno()),
"raw_os_write failed")
self.buf_count = 0
flush._dont_inline_ = True
def write(self, value):
x = self.buf_count
self.writebuffer[x] = value
x += 1
self.buf_count = x
if x == self.BUFSIZE:
self.flush()
write._always_inline_ = True
# ----------
def write_marker(self):
self.write(0)
self.write(0)
self.write(0)
self.write(-1)
end_add_roots_marker = write_marker
def writeobj(self, obj):
gc = self.gc
typeid = gc.get_type_id(obj)
self.write(llmemory.cast_adr_to_int(obj))
self.write(gc.get_member_index(typeid))
self.write(gc.get_size_incl_hash(obj))
gc.trace(obj, self._writeref, None)
self.write(-1)
processobj = writeobj
def _writeref(self, pointer, _):
obj = pointer.address[0]
self.write(llmemory.cast_adr_to_int(obj))
self.add(obj)
def _hd_add_root(obj, heap_dumper):
heap_dumper.add(obj)
def _hd_unadd_root(obj, heap_dumper):
heap_dumper.unadd(obj)
def dump_rpy_heap(gc, fd):
heapdumper = HeapDumper(gc, fd)
heapdumper.process()
heapdumper.flush()
heapdumper.finish_processing()
heapdumper.delete()
return True
def count_memory_pressure(gc):
counter = MemoryPressureCounter(gc)
counter.process()
counter.finish_processing()
res = counter.count
counter.delete()
return res
def get_typeids_z(gc):
srcaddress = gc.root_walker.gcdata.typeids_z
return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(rgc.ARRAY_OF_CHAR))
def get_typeids_list(gc):
srcaddress = gc.root_walker.gcdata.typeids_list
return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(ARRAY_OF_HALFWORDS))
ARRAY_OF_HALFWORDS = lltype.Array(llgroup.HALFWORD)
| 30.211073
| 79
| 0.61631
|
e0920cef2c5cb6367aab424bf631f76086e8edac
| 696
|
py
|
Python
|
Room/myface_regonition/call_cam.py
|
39xdgy/Self_study
|
c60f840c140bed95d042c678e0174c8c1e4b3a10
|
[
"Apache-2.0"
] | null | null | null |
Room/myface_regonition/call_cam.py
|
39xdgy/Self_study
|
c60f840c140bed95d042c678e0174c8c1e4b3a10
|
[
"Apache-2.0"
] | null | null | null |
Room/myface_regonition/call_cam.py
|
39xdgy/Self_study
|
c60f840c140bed95d042c678e0174c8c1e4b3a10
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import os
global frame
cv2.namedWindow("preview")
vc = cv2.VideoCapture(1)
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
first_pic = True
count = 0
while rval:
cv2.imshow("Preview", frame)
rval, frame = vc.read()
##print(frame)
##encode = face.face_encodings(frame)
##if()
if first_pic:
cv2.imwrite("test.jpg", frame)
first_pic = False
elif count == 20:
count = 0
os.remove("test.jpg")
cv2.imwrite("test.jpg", frame)
count = count + 1
key = cv2.waitKey(20)
if key == 27:
os.remove("test.jpg")
rval = False
cv2.destroyWindow("preview")
| 15.130435
| 41
| 0.564655
|
0ffed790f87483e5c764e09442d02171141409ba
| 7,299
|
py
|
Python
|
ansible/modules/cloud/amazon/route53_zone.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/cloud/amazon/route53_zone.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/cloud/amazon/route53_zone.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: route53_zone
short_description: add or delete Route53 zones
description:
- Creates and deletes Route53 private and public zones
version_added: "2.0"
options:
zone:
description:
- "The DNS zone record (eg: foo.com.)"
required: true
state:
description:
- whether or not the zone should exist or not
required: false
default: true
choices: [ "present", "absent" ]
vpc_id:
description:
- The VPC ID the zone should be a part of (if this is going to be a private zone)
required: false
default: null
vpc_region:
description:
- The VPC Region the zone should be a part of (if this is going to be a private zone)
required: false
default: null
comment:
description:
- Comment associated with the zone
required: false
default: ''
extends_documentation_fragment:
- aws
- ec2
author: "Christopher Troup (@minichate)"
'''
EXAMPLES = '''
# create a public zone
- route53_zone:
zone: example.com
state: present
comment: this is an example
# delete a public zone
- route53_zone:
zone: example.com
state: absent
- name: private zone for devel
route53_zone:
zone: devel.example.com
state: present
vpc_id: '{{ myvpc_id }}'
comment: developer domain
# more complex example
- name: register output after creating zone in parameterized region
route53_zone:
vpc_id: '{{ vpc.vpc_id }}'
vpc_region: '{{ ec2_region }}'
zone: '{{ vpc_dns_zone }}'
state: present
register: zone_out
- debug:
var: zone_out
'''
RETURN='''
comment:
description: optional hosted zone comment
returned: when hosted zone exists
type: string
sample: "Private zone"
name:
description: hosted zone name
returned: when hosted zone exists
type: string
sample: "private.local."
private_zone:
description: whether hosted zone is private or public
returned: when hosted zone exists
type: bool
sample: true
vpc_id:
description: id of vpc attached to private hosted zone
returned: for private hosted zone
type: string
sample: "vpc-1d36c84f"
vpc_region:
description: region of vpc attached to private hosted zone
returned: for private hosted zone
type: string
sample: "eu-west-1"
zone_id:
description: hosted zone id
returned: when hosted zone exists
type: string
sample: "Z6JQG9820BEFMW"
'''
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection
from boto.route53.zone import Zone
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
zone=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
vpc_id=dict(default=None),
vpc_region=dict(default=None),
comment=dict(default='')))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
zone_in = module.params.get('zone').lower()
state = module.params.get('state').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
comment = module.params.get('comment')
if zone_in[-1:] != '.':
zone_in += "."
private_zone = vpc_id is not None and vpc_region is not None
_, _, aws_connect_kwargs = get_aws_connection_info(module)
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
results = conn.get_all_hosted_zones()
zones = {}
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
if vpc_id and 'VPCs' in zone_details:
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
if isinstance(zone_details['VPCs'], dict):
if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
zones[r53zone['Name']] = zone_id
else: # Forward compatibility for when boto fixes that bug
if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
zones[r53zone['Name']] = zone_id
else:
zones[r53zone['Name']] = zone_id
record = {
'private_zone': private_zone,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'comment': comment,
}
if state == 'present' and zone_in in zones:
if private_zone:
details = conn.get_hosted_zone(zones[zone_in])
if 'VPCs' not in details['GetHostedZoneResponse']:
module.fail_json(
msg="Can't change VPC from public to private"
)
vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
current_vpc_id = vpc_details['VPCId']
current_vpc_region = vpc_details['VPCRegion']
if current_vpc_id != vpc_id:
module.fail_json(
msg="Can't change VPC ID once a zone has been created"
)
if current_vpc_region != vpc_region:
module.fail_json(
msg="Can't change VPC Region once a zone has been created"
)
record['zone_id'] = zones[zone_in]
record['name'] = zone_in
module.exit_json(changed=False, set=record)
elif state == 'present':
result = conn.create_hosted_zone(zone_in, **record)
hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
record['zone_id'] = zone_id
record['name'] = zone_in
module.exit_json(changed=True, set=record)
elif state == 'absent' and zone_in in zones:
conn.delete_hosted_zone(zones[zone_in])
module.exit_json(changed=True)
elif state == 'absent':
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| 30.668067
| 97
| 0.640773
|
275ac3def39609eb692fac6337dec796a7685e8f
| 2,833
|
py
|
Python
|
test/test_list_handler.py
|
milonoir/yaml_rulz
|
ee8e3f4d2892aa636832970dddc044d0ca86c691
|
[
"MIT"
] | 1
|
2017-07-14T16:43:10.000Z
|
2017-07-14T16:43:10.000Z
|
test/test_list_handler.py
|
milonoir/yaml_rulz
|
ee8e3f4d2892aa636832970dddc044d0ca86c691
|
[
"MIT"
] | 4
|
2016-09-13T15:14:51.000Z
|
2017-05-22T10:57:20.000Z
|
test/test_list_handler.py
|
milonoir/yaml_rulz
|
ee8e3f4d2892aa636832970dddc044d0ca86c691
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from yaml_rulz.list_handler import ListHandler
TEST_FLAT_YML_LIST_TYPES = {
"ericsson:shelf:0:id": "@ num",
"ericsson:shelf:0:management:address": "@ ipv4",
"ericsson:shelf:0:management:username": "username",
"ericsson:shelf:0:management:password": "password",
"ericsson:shelf:0:blade": [],
"ericsson:shelf:0:blade:0:id": "@ num",
"ericsson:shelf:0:blade:0:nic_assignment": "~ whatever",
"ericsson:shelf:0:blade:1:id": "@ num",
"ericsson:shelf:0:blade:1:nic_assignment": "~ whatever",
"ericsson:shelf:0:blade:1:cic:id": "@ num",
"ericsson:shelf:0:blade:2:id": "@ num",
"ericsson:shelf:0:blade:2:nic_assignment": "~ whatever",
"ericsson:shelf:0:blade:2:cic:id": "@ num",
"ericsson:shelf:0:blade:2:cinder": "",
"ericsson:shelf:1:id": "@ num",
"ericsson:shelf:1:blade": [],
"ericsson:shelf:1:blade:0:id": "@ num",
"ericsson:shelf:1:blade:0:nic_assignment": "~ whatever",
"ericsson:simple_list:0": "~ foo",
"ericsson:simple_list:1": "~ bar",
}
TEST_FLAT_YML_SCALARS = {
"ericsson:shelf": [],
}
TEST_FLAT_YML = TEST_FLAT_YML_SCALARS.copy()
TEST_FLAT_YML.update(TEST_FLAT_YML_LIST_TYPES)
TEST_GROUPS = {
"ericsson:shelf:0": {
"ericsson:shelf:0:id": "@ num",
"ericsson:shelf:0:blade": [],
"ericsson:shelf:0:management:address": "@ ipv4",
"ericsson:shelf:0:management:password": "password",
"ericsson:shelf:0:management:username": "username",
},
"ericsson:shelf:1": {
"ericsson:shelf:1:id": "@ num",
"ericsson:shelf:1:blade": [],
},
"ericsson:shelf:0:blade:0": {
"ericsson:shelf:0:blade:0:nic_assignment": "~ whatever",
"ericsson:shelf:0:blade:0:id": "@ num",
},
"ericsson:shelf:0:blade:1": {
"ericsson:shelf:0:blade:1:nic_assignment": "~ whatever",
"ericsson:shelf:0:blade:1:id": "@ num",
"ericsson:shelf:0:blade:1:cic:id": "@ num",
},
"ericsson:shelf:0:blade:2": {
"ericsson:shelf:0:blade:2:nic_assignment": "~ whatever",
"ericsson:shelf:0:blade:2:cinder": "",
"ericsson:shelf:0:blade:2:id": "@ num",
"ericsson:shelf:0:blade:2:cic:id": "@ num",
},
"ericsson:shelf:1:blade:0": {
"ericsson:shelf:1:blade:0:nic_assignment": "~ whatever",
"ericsson:shelf:1:blade:0:id": "@ num",
},
"ericsson:simple_list:0": {
"ericsson:simple_list:0": "~ foo",
},
"ericsson:simple_list:1": {
"ericsson:simple_list:1": "~ bar",
},
}
class TestListHandler(TestCase):
def setUp(self):
self.handler = ListHandler(TEST_FLAT_YML, ":")
def test_list_handler_init(self):
self.assertEqual(TEST_FLAT_YML_LIST_TYPES, self.handler.list_types)
self.assertEqual(TEST_GROUPS, self.handler.groups)
| 34.975309
| 75
| 0.616661
|
a3be5bcb15803349aa3dfd699497890281793f16
| 3,400
|
py
|
Python
|
Library/Utilities/fftw++-2.05/mpi/testtranspose.py
|
stevend12/SolutioCpp
|
6fa8a12207cd1e7e806a8ef5de93dc137c33856e
|
[
"Apache-2.0"
] | 9
|
2017-06-27T14:04:46.000Z
|
2022-02-17T17:38:03.000Z
|
Library/Utilities/fftw++-2.05/mpi/testtranspose.py
|
stevend12/SolutioCpp
|
6fa8a12207cd1e7e806a8ef5de93dc137c33856e
|
[
"Apache-2.0"
] | null | null | null |
Library/Utilities/fftw++-2.05/mpi/testtranspose.py
|
stevend12/SolutioCpp
|
6fa8a12207cd1e7e806a8ef5de93dc137c33856e
|
[
"Apache-2.0"
] | 3
|
2017-06-23T20:10:44.000Z
|
2021-01-13T10:09:46.000Z
|
#!/usr/bin/python -u
import sys # so that we can return a value at the end.
import random # for randum number generators
import time
import getopt
import os.path
from testutils import *
from math import sqrt
def main(argv):
retval = 0
Print("MPI transpose unit test")
usage = "Usage:\n"\
"./testtranspose.py\n"\
"\t-s\t\tSpecify a short run\n"\
"\t-h\t\tShow usage"
shortrun = False
try:
opts, args = getopt.getopt(argv,"sh")
except getopt.GetoptError:
print "Error in arguments"
print usage
sys.exit(2)
for opt, arg in opts:
if opt in ("-s"):
shortrun = True
if opt in ("-h"):
print usage
sys.exit(0)
if not os.path.isfile("transpose"):
print "Error: transpose executable not present!"
retval += 1
else:
logfile = 'testtranspose.log'
Print("Log in " + logfile + "\n")
log = open(logfile, 'w')
log.close()
start=30
stop=40
if(shortrun):
Xlist = [2,1,random.randint(start,stop)]
Ylist = [2,1,random.randint(start,stop)]
Zlist = [2,1,random.randint(start,stop)]
Plist = [2,1,3,4]
else:
Xlist = [10,9,8,7,6,5,4,3,2,1,random.randint(start,stop)]
Ylist = [9,10,8,7,6,5,4,3,2,1,random.randint(start,stop)]
Zlist = [2,1,3,10,random.randint(start,stop)]
Plist = [2,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
argslist = []
for X in Xlist:
for Y in Ylist:
for Z in Zlist:
for P in Plist:
for a in range(1,int(sqrt(P)+1.5)):
for s in range(0,3):
args = []
args.append("-x" + str(X))
args.append("-y" + str(Y))
args.append("-z" + str(Z))
args.append("-s" + str(s))
args.append("-a" + str(a))
args.append("-tq")
argslist.append(args)
Print("Running " + str(len(argslist)) + " tests:")
tstart = time.time()
failcases = ""
# timeout cutoff in seconds (0 disables timeout)
timeout = 300
nfails = 0
itest = 0
for args in argslist:
print "test", itest, "of", len(argslist), ":",
itest += 1
rtest, cmd = runtest("transpose", P, args, logfile, timeout)
if not rtest == 0:
nfails += 1
failcases += " ".join(cmd)
failcases += "\t(code " + str(rtest) + ")"
failcases += "\n"
try:
if nfails > 0:
print "Failure cases:"
print failcases
retval += 1
print "\n", nfails, "failures out of", len(argslist), "tests."
tend = time.time()
print "\nElapsed time (s):", tend - tstart
except:
pass
sys.exit(retval)
if __name__ == "__main__":
main(sys.argv[1:])
| 29.565217
| 79
| 0.436176
|
03c01f7d5fbe4f53de2e76abcf3ea3504a61f3b5
| 292
|
py
|
Python
|
Level2/stock_price.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
Level2/stock_price.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
Level2/stock_price.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
def solution(prices):
result = [0 for i in range(len(prices))]
for index1 in range(len(prices)):
for index2 in range(index1+1, len(prices)):
result[index1] += 1
if prices[index1] > prices[index2]: # 떨어졌을 경우
break
return result
| 26.545455
| 60
| 0.558219
|
79020e3b97923b38276d349876f359df32550754
| 1,623
|
py
|
Python
|
classes/jogwidget.py
|
comgram/gerbil_gui
|
bacec0047bc2de6bf95b1734af6845896a04aeff
|
[
"MIT"
] | 21
|
2017-03-17T16:34:33.000Z
|
2022-03-12T14:52:40.000Z
|
classes/jogwidget.py
|
comgram/gerbil_gui
|
bacec0047bc2de6bf95b1734af6845896a04aeff
|
[
"MIT"
] | null | null | null |
classes/jogwidget.py
|
comgram/gerbil_gui
|
bacec0047bc2de6bf95b1734af6845896a04aeff
|
[
"MIT"
] | 7
|
2019-06-08T19:45:23.000Z
|
2022-01-04T02:44:41.000Z
|
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget
class JogWidget(QWidget):
def __init__(self, parent, callback):
super(JogWidget, self).__init__(parent)
self.parent = parent
self.callback = callback
self.wx_current = 0
self.wy_current = 0
self.wz_current = 0
self._x_start_screen = 0
self._y_start_screen = 0
self._z_accumulator = 0
def onIdle(self):
self._z_accumulator = 0
def mousePressEvent(self, event):
pos = event.pos()
self._x_start_screen = pos.x()
self._y_start_screen = pos.y()
self._relative_origin_x = self.wx_current
self._relative_origin_y = self.wy_current
def mouseReleaseEvent(self, event):
"""
Safe Feed
"""
pass
#self.callback("F111")
def wheelEvent(self, event):
delta = event.angleDelta().y()
self._z_accumulator += delta
z_goto = self.wz_current + self._z_accumulator / 1000
self.callback("G1 Z{:0.2f} F100".format(z_goto))
def mouseMoveEvent(self, event):
pos = event.pos()
x_current_screen = pos.x()
y_current_screen = pos.y()
x_goto = self._relative_origin_x + (x_current_screen - self._x_start_screen) / 20
y_goto = self._relative_origin_y + (self._y_start_screen - y_current_screen) / 20
self.callback("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
#print("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
| 30.622642
| 89
| 0.585952
|
17a3d3fcf237bfcb0498630f42cf6eaaccf19d38
| 395
|
py
|
Python
|
src/condicional/reajuste.py
|
PedroAugustoDev/Algoritmos-em-Python
|
aae67133c249ca67a597431b3fc6d46d80a54a7e
|
[
"MIT"
] | 1
|
2022-03-08T03:40:44.000Z
|
2022-03-08T03:40:44.000Z
|
src/condicional/reajuste.py
|
PedroAugustoDev/Algoritmos-em-Python
|
aae67133c249ca67a597431b3fc6d46d80a54a7e
|
[
"MIT"
] | null | null | null |
src/condicional/reajuste.py
|
PedroAugustoDev/Algoritmos-em-Python
|
aae67133c249ca67a597431b3fc6d46d80a54a7e
|
[
"MIT"
] | null | null | null |
'''
Autor: Pedro Augusto Lourenço Siqueira
Objetivo: Implementação de um algoritmo para
reajuste do salário, acresentando 5, 10, 15%
de acordo com o salário.
Nível: Básico
'''
SA = float(input('Digite seu salário atual: '))
NA = 0
if SA < 500: NA = SA + SA * .15
elif SA >= 500 and SA < 1000: NA = SA + SA * .10
else: NA = SA + SA * 0.05
print(f'O valor do salário novo é: {NA}')
| 21.944444
| 48
| 0.640506
|
0b79db7e380909ed269e0abddd6e27ab67de5b6b
| 19,736
|
py
|
Python
|
release/stubs.min/System/__init___parts/Uri.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/__init___parts/Uri.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/__init___parts/Uri.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class Uri(object,ISerializable):
"""
Provides an object representation of a uniform resource identifier (URI) and easy access to the parts of the URI.
Uri(uriString: str)
Uri(uriString: str,dontEscape: bool)
Uri(baseUri: Uri,relativeUri: str,dontEscape: bool)
Uri(uriString: str,uriKind: UriKind)
Uri(baseUri: Uri,relativeUri: str)
Uri(baseUri: Uri,relativeUri: Uri)
"""
def Canonicalize(self,*args):
"""
Canonicalize(self: Uri)
Converts the internally stored URI to canonical form.
"""
pass
@staticmethod
def CheckHostName(name):
"""
CheckHostName(name: str) -> UriHostNameType
Determines whether the specified host name is a valid DNS name.
name: The host name to validate. This can be an IPv4 or IPv6 address or an Internet host name.
Returns: A System.UriHostNameType that indicates the type of the host name. If the type of the host name
cannot be determined or if the host name is null or a zero-length string,this method returns
System.UriHostNameType.Unknown.
"""
pass
@staticmethod
def CheckSchemeName(schemeName):
"""
CheckSchemeName(schemeName: str) -> bool
Determines whether the specified scheme name is valid.
schemeName: The scheme name to validate.
Returns: A System.Boolean value that is true if the scheme name is valid; otherwise,false.
"""
pass
def CheckSecurity(self,*args):
"""
CheckSecurity(self: Uri)
Calling this method has no effect.
"""
pass
@staticmethod
def Compare(uri1,uri2,partsToCompare,compareFormat,comparisonType):
"""
Compare(uri1: Uri,uri2: Uri,partsToCompare: UriComponents,compareFormat: UriFormat,comparisonType: StringComparison) -> int
Compares the specified parts of two URIs using the specified comparison rules.
uri1: The first System.Uri.
uri2: The second System.Uri.
partsToCompare: A bitwise combination of the System.UriComponents values that specifies the parts of uri1 and
uri2 to compare.
compareFormat: One of the System.UriFormat values that specifies the character escaping used when the URI
components are compared.
comparisonType: One of the System.StringComparison values.
Returns: An System.Int32 value that indicates the lexical relationship between the compared System.Uri
components.ValueMeaningLess than zerouri1 is less than uri2.Zerouri1 equals uri2.Greater than
zerouri1 is greater than uri2.
"""
pass
def Equals(self,comparand):
"""
Equals(self: Uri,comparand: object) -> bool
Compares two System.Uri instances for equality.
comparand: The System.Uri instance or a URI identifier to compare with the current instance.
Returns: A System.Boolean value that is true if the two instances represent the same URI; otherwise,
false.
"""
pass
def Escape(self,*args):
"""
Escape(self: Uri)
Converts any unsafe or reserved characters in the path component to their hexadecimal character
representations.
"""
pass
@staticmethod
def EscapeDataString(stringToEscape):
"""
EscapeDataString(stringToEscape: str) -> str
Converts a string to its escaped representation.
stringToEscape: The string to escape.
Returns: A System.String that contains the escaped representation of stringToEscape.
"""
pass
def EscapeString(self,*args):
"""
EscapeString(str: str) -> str
Converts a string to its escaped representation.
str: The string to transform to its escaped representation.
Returns: The escaped representation of the string.
"""
pass
@staticmethod
def EscapeUriString(stringToEscape):
"""
EscapeUriString(stringToEscape: str) -> str
Converts a URI string to its escaped representation.
stringToEscape: The string to escape.
Returns: A System.String that contains the escaped representation of stringToEscape.
"""
pass
@staticmethod
def FromHex(digit):
"""
FromHex(digit: Char) -> int
Gets the decimal value of a hexadecimal digit.
digit: The hexadecimal digit (0-9,a-f,A-F) to convert.
Returns: An System.Int32 value that contains a number from 0 to 15 that corresponds to the specified
hexadecimal digit.
"""
pass
def GetComponents(self,components,format):
"""
GetComponents(self: Uri,components: UriComponents,format: UriFormat) -> str
Gets the specified components of the current instance using the specified escaping for special
characters.
components: A bitwise combination of the System.UriComponents values that specifies which parts of the
current instance to return to the caller.
format: One of the System.UriFormat values that controls how special characters are escaped.
Returns: A System.String that contains the components.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: Uri) -> int
Gets the hash code for the URI.
Returns: An System.Int32 containing the hash value generated for this URI.
"""
pass
def GetLeftPart(self,part):
"""
GetLeftPart(self: Uri,part: UriPartial) -> str
Gets the specified portion of a System.Uri instance.
part: One of the System.UriPartial values that specifies the end of the URI portion to return.
Returns: A System.String that contains the specified portion of the System.Uri instance.
"""
pass
def GetObjectData(self,*args):
"""
GetObjectData(self: Uri,serializationInfo: SerializationInfo,streamingContext: StreamingContext)
Returns the data needed to serialize the current instance.
serializationInfo: A System.Runtime.Serialization.SerializationInfo object containing the information required to
serialize the System.Uri.
streamingContext: A System.Runtime.Serialization.StreamingContext object containing the source and destination of
the serialized stream associated with the System.Uri.
"""
pass
@staticmethod
def HexEscape(character):
"""
HexEscape(character: Char) -> str
Converts a specified character into its hexadecimal equivalent.
character: The character to convert to hexadecimal representation.
Returns: The hexadecimal representation of the specified character.
"""
pass
@staticmethod
def HexUnescape(pattern,index):
"""
HexUnescape(pattern: str,index: int) -> (Char,int)
Converts a specified hexadecimal representation of a character to the character.
pattern: The hexadecimal representation of a character.
index: The location in pattern where the hexadecimal representation of a character begins.
Returns: The character represented by the hexadecimal encoding at position index. If the character at
index is not hexadecimal encoded,the character at index is returned. The value of index is
incremented to point to the character following the one returned.
"""
pass
def IsBadFileSystemCharacter(self,*args):
"""
IsBadFileSystemCharacter(self: Uri,character: Char) -> bool
Gets whether a character is invalid in a file system name.
character: The System.Char to test.
Returns: A System.Boolean value that is true if the specified character is invalid; otherwise false.
"""
pass
def IsBaseOf(self,uri):
"""
IsBaseOf(self: Uri,uri: Uri) -> bool
Determines whether the current System.Uri instance is a base of the specified System.Uri
instance.
uri: The specified System.Uri instance to test.
Returns: true if the current System.Uri instance is a base of uri; otherwise,false.
"""
pass
def IsExcludedCharacter(self,*args):
"""
IsExcludedCharacter(character: Char) -> bool
Gets whether the specified character should be escaped.
character: The System.Char to test.
Returns: A System.Boolean value that is true if the specified character should be escaped; otherwise,
false.
"""
pass
@staticmethod
def IsHexDigit(character):
"""
IsHexDigit(character: Char) -> bool
Determines whether a specified character is a valid hexadecimal digit.
character: The character to validate.
Returns: A System.Boolean value that is true if the character is a valid hexadecimal digit; otherwise
false.
"""
pass
@staticmethod
def IsHexEncoding(pattern,index):
"""
IsHexEncoding(pattern: str,index: int) -> bool
Determines whether a character in a string is hexadecimal encoded.
pattern: The string to check.
index: The location in pattern to check for hexadecimal encoding.
Returns: A System.Boolean value that is true if pattern is hexadecimal encoded at the specified location;
otherwise,false.
"""
pass
def IsReservedCharacter(self,*args):
"""
IsReservedCharacter(self: Uri,character: Char) -> bool
Gets whether the specified character is a reserved character.
character: The System.Char to test.
Returns: A System.Boolean value that is true if the specified character is a reserved character
otherwise,false.
"""
pass
def IsWellFormedOriginalString(self):
"""
IsWellFormedOriginalString(self: Uri) -> bool
Indicates whether the string used to construct this System.Uri was well-formed and is not
required to be further escaped.
Returns: A System.Boolean value that is true if the string was well-formed; else false.
"""
pass
@staticmethod
def IsWellFormedUriString(uriString,uriKind):
"""
IsWellFormedUriString(uriString: str,uriKind: UriKind) -> bool
Indicates whether the string is well-formed by attempting to construct a URI with the string and
ensures that the string does not require further escaping.
uriString: The string used to attempt to construct a System.Uri.
uriKind: The type of the System.Uri in uriString.
Returns: A System.Boolean value that is true if the string was well-formed; else false.
"""
pass
def MakeRelative(self,toUri):
"""
MakeRelative(self: Uri,toUri: Uri) -> str
Determines the difference between two System.Uri instances.
toUri: The URI to compare to the current URI.
Returns: If the hostname and scheme of this URI instance and toUri are the same,then this method returns
a System.String that represents a relative URI that,when appended to the current URI instance,
yields the toUri parameter.If the hostname or scheme is different,then this method returns a
System.String that represents the toUri parameter.
"""
pass
def MakeRelativeUri(self,uri):
"""
MakeRelativeUri(self: Uri,uri: Uri) -> Uri
Determines the difference between two System.Uri instances.
uri: The URI to compare to the current URI.
Returns: If the hostname and scheme of this URI instance and uri are the same,then this method returns a
relative System.Uri that,when appended to the current URI instance,yields uri.If the hostname
or scheme is different,then this method returns a System.Uri that represents the uri
parameter.
"""
pass
def Parse(self,*args):
"""
Parse(self: Uri)
Parses the URI of the current instance to ensure it contains all the parts required for a valid
URI.
"""
pass
def ToString(self):
"""
ToString(self: Uri) -> str
Gets a canonical string representation for the specified System.Uri instance.
Returns: A System.String instance that contains the unescaped canonical representation of the System.Uri
instance. All characters are unescaped except #,?,and %.
"""
pass
@staticmethod
def TryCreate(*__args):
"""
TryCreate(baseUri: Uri,relativeUri: Uri) -> (bool,Uri)
Creates a new System.Uri using the specified base and relative System.Uri instances.
baseUri: The base System.Uri.
relativeUri: The relative System.Uri to add to the base System.Uri.
Returns: A System.Boolean value that is true if the System.Uri was successfully created; otherwise,false.
TryCreate(baseUri: Uri,relativeUri: str) -> (bool,Uri)
Creates a new System.Uri using the specified base and relative System.String instances.
baseUri: The base System.Uri.
relativeUri: The relative System.Uri,represented as a System.String,to add to the base System.Uri.
Returns: A System.Boolean value that is true if the System.Uri was successfully created; otherwise,false.
TryCreate(uriString: str,uriKind: UriKind) -> (bool,Uri)
Creates a new System.Uri using the specified System.String instance and a System.UriKind.
uriString: The System.String representing the System.Uri.
uriKind: The type of the Uri.
Returns: A System.Boolean value that is true if the System.Uri was successfully created; otherwise,false.
"""
pass
def Unescape(self,*args):
"""
Unescape(self: Uri,path: str) -> str
Converts the specified string by replacing any escape sequences with their unescaped
representation.
path: The System.String to convert.
Returns: A System.String that contains the unescaped value of the path parameter.
"""
pass
@staticmethod
def UnescapeDataString(stringToUnescape):
"""
UnescapeDataString(stringToUnescape: str) -> str
Converts a string to its unescaped representation.
stringToUnescape: The string to unescape.
Returns: A System.String that contains the unescaped representation of stringToUnescape.
"""
pass
def __cmp__(self,*args):
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,uriString: str)
__new__(cls: type,uriString: str,dontEscape: bool)
__new__(cls: type,baseUri: Uri,relativeUri: str,dontEscape: bool)
__new__(cls: type,uriString: str,uriKind: UriKind)
__new__(cls: type,baseUri: Uri,relativeUri: str)
__new__(cls: type,baseUri: Uri,relativeUri: Uri)
__new__(cls: type,serializationInfo: SerializationInfo,streamingContext: StreamingContext)
"""
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __str__(self,*args):
pass
AbsolutePath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the absolute path of the URI.
Get: AbsolutePath(self: Uri) -> str
"""
AbsoluteUri=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the absolute URI.
Get: AbsoluteUri(self: Uri) -> str
"""
Authority=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the Domain Name System (DNS) host name or IP address and the port number for a server.
Get: Authority(self: Uri) -> str
"""
DnsSafeHost=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an unescaped host name that is safe to use for DNS resolution.
Get: DnsSafeHost(self: Uri) -> str
"""
Fragment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the escaped URI fragment.
Get: Fragment(self: Uri) -> str
"""
Host=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the host component of this instance.
Get: Host(self: Uri) -> str
"""
HostNameType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type of the host name specified in the URI.
Get: HostNameType(self: Uri) -> UriHostNameType
"""
IdnHost=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IdnHost(self: Uri) -> str
"""
IsAbsoluteUri=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether the System.Uri instance is absolute.
Get: IsAbsoluteUri(self: Uri) -> bool
"""
IsDefaultPort=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether the port value of the URI is the default for this scheme.
Get: IsDefaultPort(self: Uri) -> bool
"""
IsFile=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the specified System.Uri is a file URI.
Get: IsFile(self: Uri) -> bool
"""
IsLoopback=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether the specified System.Uri references the local host.
Get: IsLoopback(self: Uri) -> bool
"""
IsUnc=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether the specified System.Uri is a universal naming convention (UNC) path.
Get: IsUnc(self: Uri) -> bool
"""
LocalPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a local operating-system representation of a file name.
Get: LocalPath(self: Uri) -> str
"""
OriginalString=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the original URI string that was passed to the System.Uri constructor.
Get: OriginalString(self: Uri) -> str
"""
PathAndQuery=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Uri.AbsolutePath and System.Uri.Query properties separated by a question mark (?).
Get: PathAndQuery(self: Uri) -> str
"""
Port=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the port number of this URI.
Get: Port(self: Uri) -> int
"""
Query=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets any query information included in the specified URI.
Get: Query(self: Uri) -> str
"""
Scheme=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the scheme name for this URI.
Get: Scheme(self: Uri) -> str
"""
Segments=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an array containing the path segments that make up the specified URI.
Get: Segments(self: Uri) -> Array[str]
"""
UserEscaped=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates that the URI string was completely escaped before the System.Uri instance was created.
Get: UserEscaped(self: Uri) -> bool
"""
UserInfo=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the user name,password,or other user-specific information associated with the specified URI.
Get: UserInfo(self: Uri) -> str
"""
SchemeDelimiter='://'
UriSchemeFile='file'
UriSchemeFtp='ftp'
UriSchemeGopher='gopher'
UriSchemeHttp='http'
UriSchemeHttps='https'
UriSchemeMailto='mailto'
UriSchemeNetPipe='net.pipe'
UriSchemeNetTcp='net.tcp'
UriSchemeNews='news'
UriSchemeNntp='nntp'
| 22.452787
| 215
| 0.686563
|
5c3dbbe50c241f4d1bd4947f4c8db36e04c191ed
| 272
|
py
|
Python
|
mysite/photos/models.py
|
ThatChocolateGuy/python-face-recognition
|
07bb0613cdb5a484baf0cc9f034683dce19c9ee7
|
[
"MIT"
] | 1
|
2019-08-11T03:47:06.000Z
|
2019-08-11T03:47:06.000Z
|
mysite/photos/models.py
|
ThatChocolateGuy/python-face-recognition
|
07bb0613cdb5a484baf0cc9f034683dce19c9ee7
|
[
"MIT"
] | 14
|
2019-02-10T08:20:59.000Z
|
2020-03-05T16:24:08.000Z
|
mysite/photos/models.py
|
ThatChocolateGuy/python-face-recognition
|
07bb0613cdb5a484baf0cc9f034683dce19c9ee7
|
[
"MIT"
] | 1
|
2019-11-18T02:18:40.000Z
|
2019-11-18T02:18:40.000Z
|
from __future__ import unicode_literals
from django.db import models
class Photo(models.Model):
title = models.CharField(max_length=255, blank=True)
file = models.FileField(upload_to='photos/')
uploaded_at = models.DateTimeField(auto_now_add=True)
| 27.2
| 58
| 0.746324
|
fb315a1c1ed2be97b883932991ddd9ed8b3d1951
| 1,367
|
py
|
Python
|
Python3/06_ZigZag_Conversion.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-04-28T09:07:11.000Z
|
2018-04-28T09:07:11.000Z
|
Python3/06_ZigZag_Conversion.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-02-24T16:26:30.000Z
|
2018-02-24T16:26:44.000Z
|
Python3/06_ZigZag_Conversion.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | null | null | null |
#! python3
# __author__ = "YangJiaHao"
# date: 2018/1/28
class Solution:
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
length = len(s)
if length <= 2 or numRows == 1:
return s
strs = []
lag = 2 * (numRows - 1)
for i in range(numRows):
j = 0
while (i + lag * j) < length:
strs.append(s[i + lag * j])
if i > 0 and i < numRows - 1:
step = i + lag * j + (lag - 2 * i)
if step < length:
strs.append(s[step])
j += 1
return "".join(strs)
def convert2(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1 or numRows >= len(s):
return s
L = [''] * numRows
index, step = 0, 1
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(L)
if __name__ == '__main__':
# P A H N
# A P L S I I G
# Y I R
s = "PAYPALISHIRING"
solution = Solution()
s = solution.convert("0123456789", 4)
print(s)
| 23.568966
| 54
| 0.400146
|
5dfaec3ad0f83b1ab123dec999cedcef20d12e1d
| 3,310
|
py
|
Python
|
contrib/scripts/kick_users.py
|
zauguin/synapse
|
ea00f18135ce30e8415526ce68585ea90da5b856
|
[
"Apache-2.0"
] | 2
|
2021-05-14T19:05:03.000Z
|
2021-05-26T23:00:43.000Z
|
contrib/scripts/kick_users.py
|
zauguin/synapse
|
ea00f18135ce30e8415526ce68585ea90da5b856
|
[
"Apache-2.0"
] | null | null | null |
contrib/scripts/kick_users.py
|
zauguin/synapse
|
ea00f18135ce30e8415526ce68585ea90da5b856
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from argparse import ArgumentParser
import json
import requests
import sys
import urllib
def _mkurl(template, kws):
for key in kws:
template = template.replace(key, kws[key])
return template
def main(hs, room_id, access_token, user_id_prefix, why):
if not why:
why = "Automated kick."
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
room_state_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
{
"$HS": hs,
"$ROOM": room_id,
"$TOKEN": access_token
}
)
print "Getting room state => %s" % room_state_url
res = requests.get(room_state_url)
print "HTTP %s" % res.status_code
state_events = res.json()
if "error" in state_events:
print "FATAL"
print state_events
return
kick_list = []
room_name = room_id
for event in state_events:
if not event["type"] == "m.room.member":
if event["type"] == "m.room.name":
room_name = event["content"].get("name")
continue
if not event["content"].get("membership") == "join":
continue
if event["state_key"].startswith(user_id_prefix):
kick_list.append(event["state_key"])
if len(kick_list) == 0:
print "No user IDs match the prefix '%s'" % user_id_prefix
return
print "The following user IDs will be kicked from %s" % room_name
for uid in kick_list:
print uid
doit = raw_input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == 'y':
print "Kicking members..."
# encode them all
kick_list = [urllib.quote(uid) for uid in kick_list]
for uid in kick_list:
kick_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
{
"$HS": hs,
"$UID": uid,
"$ROOM": room_id,
"$TOKEN": access_token
}
)
kick_body = {
"membership": "leave",
"reason": why
}
print "Kicking %s" % uid
res = requests.put(kick_url, data=json.dumps(kick_body))
if res.status_code != 200:
print "ERROR: HTTP %s" % res.status_code
if res.json().get("error"):
print "ERROR: JSON %s" % res.json()
if __name__ == "__main__":
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
parser.add_argument("-t","--token",help="Your access_token")
parser.add_argument("-r","--room",help="The room ID to kick members in")
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
args = parser.parse_args()
if not args.room or not args.token or not args.user_id or not args.homeserver:
parser.print_help()
sys.exit(1)
else:
main(args.homeserver, args.room, args.token, args.user_id, args.why)
| 35.212766
| 101
| 0.567069
|
75416306b74d62c8de9e3e1c78f2585911608495
| 1,104
|
py
|
Python
|
app/Http/insertTunggakanPemerintah.py
|
demsyimanm/retribusiBaru
|
f58ce5b2e3bba1a1f93d8ecd6e7fb23c820b58f0
|
[
"MIT"
] | null | null | null |
app/Http/insertTunggakanPemerintah.py
|
demsyimanm/retribusiBaru
|
f58ce5b2e3bba1a1f93d8ecd6e7fb23c820b58f0
|
[
"MIT"
] | null | null | null |
app/Http/insertTunggakanPemerintah.py
|
demsyimanm/retribusiBaru
|
f58ce5b2e3bba1a1f93d8ecd6e7fb23c820b58f0
|
[
"MIT"
] | null | null | null |
import csv
import MySQLdb
with open('../../public/upload/03 201510 nunggak pemerintah ada periode tagih.txt', 'rb') as csvfile:
i = 0
db_tunggak = MySQLdb.connect("10.151.63.12", "retribusi", "retribusi", "retribusi")
cursor_tunggak = db_tunggak.cursor()
spamreader = csv.reader(csvfile, delimiter='~', quotechar='|')
header = []
data = []
for row in spamreader:
if (i==0):
header = row
header[0] = "PELANGGAN_ID"
header.append("BULAN")
header.append("TAHUN")
else:
data = row
data.append("1")
data.append("2016")
header_string =""
j = 0
for x in header:
if (j!=0):
header_string += ", "
header_string += x.replace('"','\\"').replace("'","\\'")
j+=1
data_string = ""
j = 0
for x in data:
if (j!=0):
data_string += ", "
data_string += '"' + x.replace('"','\\"').replace("'","\\'").strip() + '"'
j+=1
sql = '''INSERT IGNORE INTO tunggakanpemerintah (''' + header_string + ''') VALUES (''' + data_string + ''')'''
sql_exec = cursor_tunggak.execute(sql)
# print sql
db_tunggak.commit()
i+=1
| 25.090909
| 114
| 0.57337
|
e26f6e3aa4806d98298f5b97014db478639446bc
| 12,118
|
py
|
Python
|
python_modules/libraries/dagster-airflow/dagster_airflow_tests/test_dagster_pipeline_factory/test_dependency_structure_translation.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
python_modules/libraries/dagster-airflow/dagster_airflow_tests/test_dagster_pipeline_factory/test_dependency_structure_translation.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow_tests/test_dagster_pipeline_factory/test_dependency_structure_translation.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-12-08T18:13:19.000Z
|
2021-12-08T18:13:19.000Z
|
# pylint: disable=pointless-statement
from airflow.models.dag import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.dates import days_ago
from airflow.utils.helpers import chain
from dagster.core.snap import PipelineSnapshot
from dagster.serdes import serialize_pp
from dagster_airflow.dagster_job_factory import make_dagster_job_from_airflow_dag
from dagster_airflow.dagster_pipeline_factory import make_dagster_pipeline_from_airflow_dag
default_args = {
"owner": "dagster",
"start_date": days_ago(1),
}
def test_one_task_dag(snapshot):
dag = DAG(
dag_id="one_task_dag",
default_args=default_args,
schedule_interval=None,
)
dummy_operator = DummyOperator(
task_id="dummy_operator",
dag=dag,
)
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_two_task_dag_no_dep(snapshot):
dag = DAG(
dag_id="two_task_dag_no_dep",
default_args=default_args,
schedule_interval=None,
)
dummy_operator_1 = DummyOperator(
task_id="dummy_operator_1",
dag=dag,
)
dummy_operator_2 = DummyOperator(
task_id="dummy_operator_2",
dag=dag,
)
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_two_task_dag_with_dep(snapshot):
dag = DAG(
dag_id="two_task_dag_with_dep",
default_args=default_args,
schedule_interval=None,
)
dummy_operator_1 = DummyOperator(
task_id="dummy_operator_1",
dag=dag,
)
dummy_operator_2 = DummyOperator(
task_id="dummy_operator_2",
dag=dag,
)
dummy_operator_1 >> dummy_operator_2
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_diamond_task_dag(snapshot):
dag = DAG(
dag_id="diamond_task_dag",
default_args=default_args,
schedule_interval=None,
)
dummy_operator_1 = DummyOperator(
task_id="dummy_operator_1",
dag=dag,
)
dummy_operator_2 = DummyOperator(
task_id="dummy_operator_2",
dag=dag,
)
dummy_operator_3 = DummyOperator(
task_id="dummy_operator_3",
dag=dag,
)
dummy_operator_4 = DummyOperator(
task_id="dummy_operator_4",
dag=dag,
)
dummy_operator_1 >> dummy_operator_2
dummy_operator_1 >> dummy_operator_3
dummy_operator_2 >> dummy_operator_4
dummy_operator_3 >> dummy_operator_4
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_multi_root_dag(snapshot):
dag = DAG(
dag_id="multi_root_dag",
default_args=default_args,
schedule_interval=None,
)
dummy_operator_1 = DummyOperator(
task_id="dummy_operator_1",
dag=dag,
)
dummy_operator_2 = DummyOperator(
task_id="dummy_operator_2",
dag=dag,
)
dummy_operator_3 = DummyOperator(
task_id="dummy_operator_3",
dag=dag,
)
dummy_operator_4 = DummyOperator(
task_id="dummy_operator_4",
dag=dag,
)
dummy_operator_1 >> dummy_operator_4
dummy_operator_2 >> dummy_operator_4
dummy_operator_3 >> dummy_operator_4
dag.tree_view()
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_multi_leaf_dag(snapshot):
dag = DAG(
dag_id="multi_leaf_dag",
default_args=default_args,
schedule_interval=None,
)
dummy_operator_1 = DummyOperator(
task_id="dummy_operator_1",
dag=dag,
)
dummy_operator_2 = DummyOperator(
task_id="dummy_operator_2",
dag=dag,
)
dummy_operator_3 = DummyOperator(
task_id="dummy_operator_3",
dag=dag,
)
dummy_operator_4 = DummyOperator(
task_id="dummy_operator_4",
dag=dag,
)
dummy_operator_1 >> dummy_operator_2
dummy_operator_1 >> dummy_operator_3
dummy_operator_1 >> dummy_operator_4
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_complex_dag(snapshot):
dag = DAG(dag_id="complex_dag", default_args=default_args, schedule_interval=None)
# Create
create_entry_group = DummyOperator(
task_id="create_entry_group",
dag=dag,
)
create_entry_group_result = DummyOperator(
task_id="create_entry_group_result",
dag=dag,
)
create_entry_group_result2 = DummyOperator(
task_id="create_entry_group_result2",
dag=dag,
)
create_entry_gcs = DummyOperator(
task_id="create_entry_gcs",
dag=dag,
)
create_entry_gcs_result = DummyOperator(
task_id="create_entry_gcs_result",
dag=dag,
)
create_entry_gcs_result2 = DummyOperator(
task_id="create_entry_gcs_result2",
dag=dag,
)
create_tag = DummyOperator(
task_id="create_tag",
dag=dag,
)
create_tag_result = DummyOperator(
task_id="create_tag_result",
dag=dag,
)
create_tag_result2 = DummyOperator(
task_id="create_tag_result2",
dag=dag,
)
create_tag_template = DummyOperator(
task_id="create_tag_template",
dag=dag,
)
create_tag_template_result = DummyOperator(
task_id="create_tag_template_result",
dag=dag,
)
create_tag_template_result2 = DummyOperator(
task_id="create_tag_template_result2",
dag=dag,
)
create_tag_template_field = DummyOperator(
task_id="create_tag_template_field",
dag=dag,
)
create_tag_template_field_result = DummyOperator(
task_id="create_tag_template_field_result",
dag=dag,
)
create_tag_template_field_result2 = DummyOperator(
task_id="create_tag_template_field_result",
dag=dag,
)
# Delete
delete_entry = DummyOperator(
task_id="delete_entry",
dag=dag,
)
create_entry_gcs >> delete_entry
delete_entry_group = DummyOperator(
task_id="delete_entry_group",
dag=dag,
)
create_entry_group >> delete_entry_group
delete_tag = DummyOperator(
task_id="delete_tag",
dag=dag,
)
create_tag >> delete_tag
delete_tag_template_field = DummyOperator(
task_id="delete_tag_template_field",
dag=dag,
)
delete_tag_template = DummyOperator(
task_id="delete_tag_template",
dag=dag,
)
# Get
get_entry_group = DummyOperator(
task_id="get_entry_group",
dag=dag,
)
get_entry_group_result = DummyOperator(
task_id="get_entry_group_result",
dag=dag,
)
get_entry = DummyOperator(
task_id="get_entry",
dag=dag,
)
get_entry_result = DummyOperator(
task_id="get_entry_result",
dag=dag,
)
get_tag_template = DummyOperator(
task_id="get_tag_template",
dag=dag,
)
get_tag_template_result = DummyOperator(
task_id="get_tag_template_result",
dag=dag,
)
# List
list_tags = DummyOperator(
task_id="list_tags",
dag=dag,
)
list_tags_result = DummyOperator(
task_id="list_tags_result",
dag=dag,
)
# Lookup
lookup_entry = DummyOperator(
task_id="lookup_entry",
dag=dag,
)
lookup_entry_result = DummyOperator(
task_id="lookup_entry_result",
dag=dag,
)
# Rename
rename_tag_template_field = DummyOperator(
task_id="rename_tag_template_field",
dag=dag,
)
# Search
search_catalog = DummyOperator(
task_id="search_catalog",
dag=dag,
)
search_catalog_result = DummyOperator(
task_id="search_catalog_result",
dag=dag,
)
# Update
update_entry = DummyOperator(
task_id="update_entry",
dag=dag,
)
update_tag = DummyOperator(
task_id="update_tag",
dag=dag,
)
update_tag_template = DummyOperator(
task_id="update_tag_template",
dag=dag,
)
update_tag_template_field = DummyOperator(
task_id="update_tag_template_field",
dag=dag,
)
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry_group,
delete_entry,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry >> delete_entry
lookup_entry >> lookup_entry_result
# Rename
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field
snapshot.assert_match(
serialize_pp(
PipelineSnapshot.from_pipeline_def(
make_dagster_pipeline_from_airflow_dag(dag=dag)
).dep_structure_snapshot
)
)
def test_one_task_dag_to_job():
dag = DAG(
dag_id="dag-with.dot-dash",
default_args=default_args,
schedule_interval=None,
)
dummy_operator = DummyOperator(
task_id="dummy_operator",
dag=dag,
)
job_def = make_dagster_job_from_airflow_dag(dag=dag)
assert job_def.name == "airflow_dag_with_dot_dash"
assert len([job_def.solids]) == 1
result = job_def.execute_in_process()
assert result.success
step_success_events = [evt for evt in result.all_node_events if evt.is_step_success]
assert len(step_success_events) == 1
assert step_success_events[0].step_key == "airflow_dummy_operator"
| 26.116379
| 91
| 0.662073
|
cadd4924d825b19c9754bae16e62d042017510bc
| 5,742
|
py
|
Python
|
sdk/python/pulumi_aws/glacier/vault_lock.py
|
Charliekenney23/pulumi-aws
|
55bd0390160d27350b297834026fee52114a2d41
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/glacier/vault_lock.py
|
Charliekenney23/pulumi-aws
|
55bd0390160d27350b297834026fee52114a2d41
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/glacier/vault_lock.py
|
Charliekenney23/pulumi-aws
|
55bd0390160d27350b297834026fee52114a2d41
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class VaultLock(pulumi.CustomResource):
complete_lock: pulumi.Output[bool]
"""
Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the Terraform resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time.
"""
ignore_deletion_error: pulumi.Output[bool]
"""
Allow Terraform to ignore the error returned when attempting to delete the Glacier Lock Policy. This can be used to delete or recreate the Glacier Vault via Terraform, for example, if the Glacier Vault Lock policy permits that action. This should only be used in conjunction with `complete_lock` being set to `true`.
"""
policy: pulumi.Output[str]
"""
JSON string containing the IAM policy to apply as the Glacier Vault Lock policy.
"""
vault_name: pulumi.Output[str]
"""
The name of the Glacier Vault.
"""
def __init__(__self__, resource_name, opts=None, complete_lock=None, ignore_deletion_error=None, policy=None, vault_name=None, __name__=None, __opts__=None):
"""
Manages a Glacier Vault Lock. You can refer to the [Glacier Developer Guide](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html) for a full explanation of the Glacier Vault Lock functionality.
> **NOTE:** This resource allows you to test Glacier Vault Lock policies by setting the `complete_lock` argument to `false`. When testing policies in this manner, the Glacier Vault Lock automatically expires after 24 hours and Terraform will show this resource as needing recreation after that time. To permanently apply the policy, set the `complete_lock` argument to `true`. When changing `complete_lock` to `true`, it is expected the resource will show as recreating.
!> **WARNING:** Once a Glacier Vault Lock is completed, it is immutable. The deletion of the Glacier Vault Lock is not be possible and attempting to remove it from Terraform will return an error. Set the `ignore_deletion_error` argument to `true` and apply this configuration before attempting to delete this resource via Terraform or use `terraform state rm` to remove this resource from Terraform management.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] complete_lock: Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the Terraform resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time.
:param pulumi.Input[bool] ignore_deletion_error: Allow Terraform to ignore the error returned when attempting to delete the Glacier Lock Policy. This can be used to delete or recreate the Glacier Vault via Terraform, for example, if the Glacier Vault Lock policy permits that action. This should only be used in conjunction with `complete_lock` being set to `true`.
:param pulumi.Input[str] policy: JSON string containing the IAM policy to apply as the Glacier Vault Lock policy.
:param pulumi.Input[str] vault_name: The name of the Glacier Vault.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if complete_lock is None:
raise TypeError("Missing required property 'complete_lock'")
__props__['complete_lock'] = complete_lock
__props__['ignore_deletion_error'] = ignore_deletion_error
if policy is None:
raise TypeError("Missing required property 'policy'")
__props__['policy'] = policy
if vault_name is None:
raise TypeError("Missing required property 'vault_name'")
__props__['vault_name'] = vault_name
super(VaultLock, __self__).__init__(
'aws:glacier/vaultLock:VaultLock',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 67.552941
| 569
| 0.72675
|
95f168e6e88a3d0ef041b8c84cb43cdd7179f7a0
| 128,389
|
py
|
Python
|
test/functional/tests.py
|
IPVL/swift-kilo
|
fe4cdb597f70e40c667b001b446546d75a7a5ab0
|
[
"Apache-2.0"
] | null | null | null |
test/functional/tests.py
|
IPVL/swift-kilo
|
fe4cdb597f70e40c667b001b446546d75a7a5ab0
|
[
"Apache-2.0"
] | null | null | null |
test/functional/tests.py
|
IPVL/swift-kilo
|
fe4cdb597f70e40c667b001b446546d75a7a5ab0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import hashlib
import hmac
import json
import locale
import random
import StringIO
import time
import unittest
import urllib
import uuid
from copy import deepcopy
import eventlet
from nose import SkipTest
from swift.common.http import is_success, is_client_error
from test.functional import normalized_urls, load_constraint, cluster_info
from test.functional import check_response, retry
import test.functional as tf
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
class Utils(object):
@classmethod
def create_ascii_name(cls, length=None):
return uuid.uuid4().hex
@classmethod
def create_utf8_name(cls, length=None):
if length is None:
length = 15
else:
length = int(length)
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
return ''.join([random.choice(utf8_chars)
for x in xrange(length)]).encode('utf-8')
create_name = create_ascii_name
class Base(unittest.TestCase):
def setUp(self):
cls = type(self)
if not cls.set_up:
cls.env.setUp()
cls.set_up = True
def assert_body(self, body):
response_body = self.env.conn.response.read()
self.assert_(response_body == body,
'Body returned: %s' % (response_body))
def assert_status(self, status_or_statuses):
self.assert_(self.env.conn.response.status == status_or_statuses or
(hasattr(status_or_statuses, '__iter__') and
self.env.conn.response.status in status_or_statuses),
'Status returned: %d Expected: %s' %
(self.env.conn.response.status, status_or_statuses))
class Base2(object):
def setUp(self):
Utils.create_name = Utils.create_utf8_name
super(Base2, self).setUp()
def tearDown(self):
Utils.create_name = Utils.create_ascii_name
class TestAccountEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.containers = []
for i in range(10):
cont = cls.account.container(Utils.create_name())
if not cont.create():
raise ResponseError(cls.conn.response)
cls.containers.append(cont)
class TestAccountDev(Base):
env = TestAccountEnv
set_up = False
class TestAccountDevUTF8(Base2, TestAccountDev):
set_up = False
class TestAccount(Base):
env = TestAccountEnv
set_up = False
def testNoAuthToken(self):
self.assertRaises(ResponseError, self.env.account.info,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
self.assertRaises(ResponseError, self.env.account.containers,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
def testInvalidUTF8Path(self):
invalid_utf8 = Utils.create_utf8_name()[::-1]
container = self.env.account.container(invalid_utf8)
self.assert_(not container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assert_body('Invalid UTF8 or contains NULL')
def testVersionOnlyPath(self):
self.env.account.conn.make_request('PUT',
cfg={'version_only_path': True})
self.assert_status(412)
self.assert_body('Bad URL')
def testInvalidPath(self):
was_url = self.env.account.conn.storage_url
if (normalized_urls):
self.env.account.conn.storage_url = '/'
else:
self.env.account.conn.storage_url = "/%s" % was_url
self.env.account.conn.make_request('GET')
try:
self.assert_status(404)
finally:
self.env.account.conn.storage_url = was_url
def testPUT(self):
self.env.account.conn.make_request('PUT')
self.assert_status([403, 405])
def testAccountHead(self):
try_count = 0
while try_count < 5:
try_count += 1
info = self.env.account.info()
for field in ['object_count', 'container_count', 'bytes_used']:
self.assert_(info[field] >= 0)
if info['container_count'] == len(self.env.containers):
break
if try_count < 5:
time.sleep(1)
self.assertEqual(info['container_count'], len(self.env.containers))
self.assert_status(204)
def testContainerSerializedInfo(self):
container_info = {}
for container in self.env.containers:
info = {'bytes': 0}
info['count'] = random.randint(10, 30)
for i in range(info['count']):
file_item = container.file(Utils.create_name())
bytes = random.randint(1, 32768)
file_item.write_random(bytes)
info['bytes'] += bytes
container_info[container.name] = info
for format_type in ['json', 'xml']:
for a in self.env.account.containers(
parms={'format': format_type}):
self.assert_(a['count'] >= 0)
self.assert_(a['bytes'] >= 0)
headers = dict(self.env.conn.response.getheaders())
if format_type == 'json':
self.assertEqual(headers['content-type'],
'application/json; charset=utf-8')
elif format_type == 'xml':
self.assertEqual(headers['content-type'],
'application/xml; charset=utf-8')
def testListingLimit(self):
limit = load_constraint('account_listing_limit')
for l in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
p = {'limit': l}
if l <= limit:
self.assert_(len(self.env.account.containers(parms=p)) <= l)
self.assert_status(200)
else:
self.assertRaises(ResponseError,
self.env.account.containers, parms=p)
self.assert_status(412)
def testContainerListing(self):
a = sorted([c.name for c in self.env.containers])
for format_type in [None, 'json', 'xml']:
b = self.env.account.containers(parms={'format': format_type})
if isinstance(b[0], dict):
b = [x['name'] for x in b]
self.assertEqual(a, b)
def testInvalidAuthToken(self):
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
self.assert_status(401)
def testLastContainerMarker(self):
for format_type in [None, 'json', 'xml']:
containers = self.env.account.containers({'format': format_type})
self.assertEqual(len(containers), len(self.env.containers))
self.assert_status(200)
containers = self.env.account.containers(
parms={'format': format_type, 'marker': containers[-1]})
self.assertEqual(len(containers), 0)
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
def testMarkerLimitContainerList(self):
for format_type in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, 9)
containers = self.env.account.containers(
parms={'format': format_type,
'marker': marker,
'limit': limit})
self.assert_(len(containers) <= limit)
if containers:
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assert_(locale.strcoll(containers[0], marker) > 0)
def testContainersOrderedByName(self):
for format_type in [None, 'json', 'xml']:
containers = self.env.account.containers(
parms={'format': format_type})
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assertEqual(sorted(containers, cmp=locale.strcoll),
containers)
def testQuotedWWWAuthenticateHeader(self):
# check that the www-authenticate header value with the swift realm
# is correctly quoted.
conn = Connection(tf.config)
conn.authenticate()
inserted_html = '<b>Hello World'
hax = 'AUTH_haxx"\nContent-Length: %d\n\n%s' % (len(inserted_html),
inserted_html)
quoted_hax = urllib.quote(hax)
conn.connection.request('GET', '/v1/' + quoted_hax, None, {})
resp = conn.connection.getresponse()
resp_headers = dict(resp.getheaders())
self.assertTrue('www-authenticate' in resp_headers,
'www-authenticate not found in %s' % resp_headers)
actual = resp_headers['www-authenticate']
expected = 'Swift realm="%s"' % quoted_hax
# other middleware e.g. auth_token may also set www-authenticate
# headers in which case actual values will be a comma separated list.
# check that expected value is among the actual values
self.assertTrue(expected in actual,
'%s not found in %s' % (expected, actual))
class TestAccountUTF8(Base2, TestAccount):
set_up = False
class TestAccountNoContainersEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
class TestAccountNoContainers(Base):
env = TestAccountNoContainersEnv
set_up = False
def testGetRequest(self):
for format_type in [None, 'json', 'xml']:
self.assert_(not self.env.account.containers(
parms={'format': format_type}))
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
set_up = False
class TestContainerEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 10
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file_item = cls.container.file(Utils.create_name())
file_item.write_random(cls.file_size)
cls.files.append(file_item.name)
class TestContainerDev(Base):
env = TestContainerEnv
set_up = False
class TestContainerDevUTF8(Base2, TestContainerDev):
set_up = False
class TestContainer(Base):
env = TestContainerEnv
set_up = False
def testContainerNameLimit(self):
limit = load_constraint('max_container_name_length')
for l in (limit - 100, limit - 10, limit - 1, limit,
limit + 1, limit + 10, limit + 100):
cont = self.env.account.container('a' * l)
if l <= limit:
self.assert_(cont.create())
self.assert_status(201)
else:
self.assert_(not cont.create())
self.assert_status(400)
def testFileThenContainerDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
file_item = cont.file(Utils.create_name())
self.assert_(file_item.write_random())
self.assert_(file_item.delete())
self.assert_status(204)
self.assert_(file_item.name not in cont.files())
self.assert_(cont.delete())
self.assert_status(204)
self.assert_(cont.name not in self.env.account.containers())
def testFileListingLimitMarkerPrefix(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
files = sorted([Utils.create_name() for x in xrange(10)])
for f in files:
file_item = cont.file(f)
self.assert_(file_item.write_random())
for i in xrange(len(files)):
f = files[i]
for j in xrange(1, len(files) - i):
self.assert_(cont.files(parms={'limit': j, 'marker': f}) ==
files[i + 1: i + j + 1])
self.assert_(cont.files(parms={'marker': f}) == files[i + 1:])
self.assert_(cont.files(parms={'marker': f, 'prefix': f}) == [])
self.assert_(cont.files(parms={'prefix': f}) == [f])
def testPrefixAndLimit(self):
load_constraint('container_listing_limit')
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
prefix_file_count = 10
limit_count = 2
prefixs = ['alpha/', 'beta/', 'kappa/']
prefix_files = {}
for prefix in prefixs:
prefix_files[prefix] = []
for i in range(prefix_file_count):
file_item = cont.file(prefix + Utils.create_name())
file_item.write()
prefix_files[prefix].append(file_item.name)
for format_type in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'prefix': prefix})
self.assertEqual(files, sorted(prefix_files[prefix]))
for format_type in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'limit': limit_count,
'prefix': prefix})
self.assertEqual(len(files), limit_count)
for file_item in files:
self.assert_(file_item.startswith(prefix))
def testCreate(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.name in self.env.account.containers())
def testContainerFileListOnContainerThatDoesNotExist(self):
for format_type in [None, 'json', 'xml']:
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.files,
parms={'format': format_type})
self.assert_status(404)
def testUtf8Container(self):
valid_utf8 = Utils.create_utf8_name()
invalid_utf8 = valid_utf8[::-1]
container = self.env.account.container(valid_utf8)
self.assert_(container.create(cfg={'no_path_quote': True}))
self.assert_(container.name in self.env.account.containers())
self.assertEqual(container.files(), [])
self.assert_(container.delete())
container = self.env.account.container(invalid_utf8)
self.assert_(not container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assertRaises(ResponseError, container.files,
cfg={'no_path_quote': True})
self.assert_status(412)
def testCreateOnExisting(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.create())
self.assert_status(202)
def testSlashInName(self):
if Utils.create_name == Utils.create_utf8_name:
cont_name = list(unicode(Utils.create_name(), 'utf-8'))
else:
cont_name = list(Utils.create_name())
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
cont_name = ''.join(cont_name)
if Utils.create_name == Utils.create_utf8_name:
cont_name = cont_name.encode('utf-8')
cont = self.env.account.container(cont_name)
self.assert_(not cont.create(cfg={'no_path_quote': True}),
'created container with name %s' % (cont_name))
self.assert_status(404)
self.assert_(cont.name not in self.env.account.containers())
def testDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.delete())
self.assert_status(204)
self.assert_(cont.name not in self.env.account.containers())
def testDeleteOnContainerThatDoesNotExist(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(not cont.delete())
self.assert_status(404)
def testDeleteOnContainerWithFiles(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
file_item = cont.file(Utils.create_name())
file_item.write_random(self.env.file_size)
self.assert_(file_item.name in cont.files())
self.assert_(not cont.delete())
self.assert_status(409)
def testFileCreateInContainerThatDoesNotExist(self):
file_item = File(self.env.conn, self.env.account, Utils.create_name(),
Utils.create_name())
self.assertRaises(ResponseError, file_item.write)
self.assert_status(404)
def testLastFileMarker(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files({'format': format_type})
self.assertEqual(len(files), len(self.env.files))
self.assert_status(200)
files = self.env.container.files(
parms={'format': format_type, 'marker': files[-1]})
self.assertEqual(len(files), 0)
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
def testContainerFileList(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
self.assert_status(200)
if isinstance(files[0], dict):
files = [x['name'] for x in files]
for file_item in self.env.files:
self.assert_(file_item in files)
for file_item in files:
self.assert_(file_item in self.env.files)
def testMarkerLimitFileList(self):
for format_type in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, self.env.file_count - 1)
files = self.env.container.files(parms={'format': format_type,
'marker': marker,
'limit': limit})
if not files:
continue
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assert_(len(files) <= limit)
if files:
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assert_(locale.strcoll(files[0], marker) > 0)
def testFileOrder(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assertEqual(sorted(files, cmp=locale.strcoll), files)
def testContainerInfo(self):
info = self.env.container.info()
self.assert_status(204)
self.assertEqual(info['object_count'], self.env.file_count)
self.assertEqual(info['bytes_used'],
self.env.file_count * self.env.file_size)
def testContainerInfoOnContainerThatDoesNotExist(self):
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.info)
self.assert_status(404)
def testContainerFileListWithLimit(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type,
'limit': 2})
self.assertEqual(len(files), 2)
def testTooLongName(self):
cont = self.env.account.container('x' * 257)
self.assert_(not cont.create(),
'created container with name %s' % (cont.name))
self.assert_status(400)
def testContainerExistenceCachingProblem(self):
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assert_(cont.create())
cont.files()
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assert_(cont.create())
file_item = cont.file(Utils.create_name())
file_item.write_random()
class TestContainerUTF8(Base2, TestContainer):
set_up = False
class TestContainerPathsEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.file_size = 8
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.files = [
'/file1',
'/file A',
'/dir1/',
'/dir2/',
'/dir1/file2',
'/dir1/subdir1/',
'/dir1/subdir2/',
'/dir1/subdir1/file2',
'/dir1/subdir1/file3',
'/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/',
'/dir1/subdir1/subsubdir1/file5',
'/dir1/subdir1/subsubdir1/file6',
'/dir1/subdir1/subsubdir1/file7',
'/dir1/subdir1/subsubdir1/file8',
'/dir1/subdir1/subsubdir2/',
'/dir1/subdir1/subsubdir2/file9',
'/dir1/subdir1/subsubdir2/file0',
'file1',
'dir1/',
'dir2/',
'dir1/file2',
'dir1/subdir1/',
'dir1/subdir2/',
'dir1/subdir1/file2',
'dir1/subdir1/file3',
'dir1/subdir1/file4',
'dir1/subdir1/subsubdir1/',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file6',
'dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir2/',
'dir1/subdir1/subsubdir2/file9',
'dir1/subdir1/subsubdir2/file0',
'dir1/subdir with spaces/',
'dir1/subdir with spaces/file B',
'dir1/subdir+with{whatever/',
'dir1/subdir+with{whatever/file D',
]
stored_files = set()
for f in cls.files:
file_item = cls.container.file(f)
if f.endswith('/'):
file_item.write(hdrs={'Content-Type': 'application/directory'})
else:
file_item.write_random(cls.file_size,
hdrs={'Content-Type':
'application/directory'})
if (normalized_urls):
nfile = '/'.join(filter(None, f.split('/')))
if (f[-1] == '/'):
nfile += '/'
stored_files.add(nfile)
else:
stored_files.add(f)
cls.stored_files = sorted(stored_files)
class TestContainerPaths(Base):
env = TestContainerPathsEnv
set_up = False
def testTraverseContainer(self):
found_files = []
found_dirs = []
def recurse_path(path, count=0):
if count > 10:
raise ValueError('too deep recursion')
for file_item in self.env.container.files(parms={'path': path}):
self.assert_(file_item.startswith(path))
if file_item.endswith('/'):
recurse_path(file_item, count + 1)
found_dirs.append(file_item)
else:
found_files.append(file_item)
recurse_path('')
for file_item in self.env.stored_files:
if file_item.startswith('/'):
self.assert_(file_item not in found_dirs)
self.assert_(file_item not in found_files)
elif file_item.endswith('/'):
self.assert_(file_item in found_dirs)
self.assert_(file_item not in found_files)
else:
self.assert_(file_item in found_files)
self.assert_(file_item not in found_dirs)
found_files = []
found_dirs = []
recurse_path('/')
for file_item in self.env.stored_files:
if not file_item.startswith('/'):
self.assert_(file_item not in found_dirs)
self.assert_(file_item not in found_files)
elif file_item.endswith('/'):
self.assert_(file_item in found_dirs)
self.assert_(file_item not in found_files)
else:
self.assert_(file_item in found_files)
self.assert_(file_item not in found_dirs)
def testContainerListing(self):
for format_type in (None, 'json', 'xml'):
files = self.env.container.files(parms={'format': format_type})
if isinstance(files[0], dict):
files = [str(x['name']) for x in files]
self.assertEqual(files, self.env.stored_files)
for format_type in ('json', 'xml'):
for file_item in self.env.container.files(parms={'format':
format_type}):
self.assert_(int(file_item['bytes']) >= 0)
self.assert_('last_modified' in file_item)
if file_item['name'].endswith('/'):
self.assertEqual(file_item['content_type'],
'application/directory')
def testStructure(self):
def assert_listing(path, file_list):
files = self.env.container.files(parms={'path': path})
self.assertEqual(sorted(file_list, cmp=locale.strcoll), files)
if not normalized_urls:
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
assert_listing('/dir1',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/subdir1',
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/'])
assert_listing('/dir1/subdir2', [])
assert_listing('', ['file1', 'dir1/', 'dir2/'])
else:
assert_listing('', ['file1', 'dir1/', 'dir2/', 'file A'])
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
'dir1/subdir2/', 'dir1/subdir with spaces/',
'dir1/subdir+with{whatever/'])
assert_listing('dir1/subdir1',
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
'dir1/subdir1/file2', 'dir1/subdir1/file3',
'dir1/subdir1/subsubdir1/'])
assert_listing('dir1/subdir1/subsubdir1',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir1/subsubdir1/',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir with spaces/',
['dir1/subdir with spaces/file B'])
class TestFileEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
# creating another account and connection
# for account to account copy tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_size = 128
class TestFileDev(Base):
env = TestFileEnv
set_up = False
class TestFileDevUTF8(Base2, TestFileDev):
set_up = False
class TestFile(Base):
env = TestFileEnv
set_up = False
def testCopy(self):
# makes sure to test encoded characters
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file_item = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
data = file_item.write_random()
file_item.sync_metadata(metadata)
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.copy('%s%s' % (prefix, cont), dest_filename)
self.assert_(dest_filename in cont.files())
file_item = cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
def testCopyAccount(self):
# makes sure to test encoded characters
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file_item = self.env.container.file(source_filename)
metadata = {Utils.create_ascii_name(): Utils.create_name()}
data = file_item.write_random()
file_item.sync_metadata(metadata)
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
acct = self.env.conn.account_name
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.copy_account(acct,
'%s%s' % (prefix, cont),
dest_filename)
self.assert_(dest_filename in cont.files())
file_item = cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
dest_cont = self.env.account2.container(Utils.create_name())
self.assert_(dest_cont.create(hdrs={
'X-Container-Write': self.env.conn.user_acl
}))
acct = self.env.conn2.account_name
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.copy_account(acct,
'%s%s' % (prefix, dest_cont),
dest_filename)
self.assert_(dest_filename in dest_cont.files())
file_item = dest_cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
def testCopy404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename)
self.assert_(not file_item.copy(
'%s%s' % (prefix, self.env.container),
Utils.create_name()))
self.assert_status(404)
self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont),
Utils.create_name()))
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assert_(not file_item.copy(
'%s%s' % (prefix, self.env.container),
Utils.create_name()))
self.assert_status(404)
self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont),
Utils.create_name()))
self.assert_status(404)
# invalid destination container
file_item = self.env.container.file(source_filename)
self.assert_(not file_item.copy(
'%s%s' % (prefix, Utils.create_name()),
Utils.create_name()))
def testCopyAccount404s(self):
acct = self.env.conn.account_name
acct2 = self.env.conn2.account_name
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create(hdrs={
'X-Container-Read': self.env.conn2.user_acl
}))
dest_cont2 = self.env.account2.container(Utils.create_name())
self.assert_(dest_cont2.create(hdrs={
'X-Container-Write': self.env.conn.user_acl,
'X-Container-Read': self.env.conn.user_acl
}))
for acct, cont in ((acct, dest_cont), (acct2, dest_cont2)):
for prefix in ('', '/'):
# invalid source container
source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename)
self.assert_(not file_item.copy_account(
acct,
'%s%s' % (prefix, self.env.container),
Utils.create_name()))
if acct == acct2:
# there is no such source container
# and foreign user can have no permission to read it
self.assert_status(403)
else:
self.assert_status(404)
self.assert_(not file_item.copy_account(
acct,
'%s%s' % (prefix, cont),
Utils.create_name()))
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assert_(not file_item.copy_account(
acct,
'%s%s' % (prefix, self.env.container),
Utils.create_name()))
if acct == acct2:
# there is no such object
# and foreign user can have no permission to read it
self.assert_status(403)
else:
self.assert_status(404)
self.assert_(not file_item.copy_account(
acct,
'%s%s' % (prefix, cont),
Utils.create_name()))
self.assert_status(404)
# invalid destination container
file_item = self.env.container.file(source_filename)
self.assert_(not file_item.copy_account(
acct,
'%s%s' % (prefix, Utils.create_name()),
Utils.create_name()))
if acct == acct2:
# there is no such destination container
# and foreign user can have no permission to write there
self.assert_status(403)
else:
self.assert_status(404)
def testCopyNoDestinationHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
file_item = self.env.container.file(source_filename)
self.assert_(not file_item.copy(Utils.create_name(),
Utils.create_name(),
cfg={'no_destination': True}))
self.assert_status(412)
def testCopyDestinationSlashProblems(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
# no slash
self.assert_(not file_item.copy(Utils.create_name(),
Utils.create_name(),
cfg={'destination': Utils.create_name()}))
self.assert_status(412)
def testCopyFromHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
data = file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = cont.file(dest_filename)
file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % (
prefix, self.env.container.name, source_filename)})
self.assert_(dest_filename in cont.files())
file_item = cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
def testCopyFromAccountHeader(self):
acct = self.env.conn.account_name
src_cont = self.env.account.container(Utils.create_name())
self.assert_(src_cont.create(hdrs={
'X-Container-Read': self.env.conn2.user_acl
}))
source_filename = Utils.create_name()
file_item = src_cont.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
data = file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
dest_cont2 = self.env.account2.container(Utils.create_name())
self.assert_(dest_cont2.create(hdrs={
'X-Container-Write': self.env.conn.user_acl
}))
for cont in (src_cont, dest_cont, dest_cont2):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = cont.file(dest_filename)
file_item.write(hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' % (
prefix,
src_cont.name,
source_filename)})
self.assert_(dest_filename in cont.files())
file_item = cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
def testCopyFromHeader404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
for prefix in ('', '/'):
# invalid source container
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
Utils.create_name(), source_filename)})
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
self.env.container.name, Utils.create_name())})
self.assert_status(404)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
self.env.container.name, source_filename)})
self.assert_status(404)
def testCopyFromAccountHeader404s(self):
acct = self.env.conn2.account_name
src_cont = self.env.account2.container(Utils.create_name())
self.assert_(src_cont.create(hdrs={
'X-Container-Read': self.env.conn.user_acl
}))
source_filename = Utils.create_name()
file_item = src_cont.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
Utils.create_name(),
source_filename)})
# looks like cached responses leak "not found"
# to un-authorized users, not going to fix it now, but...
self.assert_status([403, 404])
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
src_cont,
Utils.create_name())})
self.assert_status(404)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
src_cont,
source_filename)})
self.assert_status(404)
def testNameLimit(self):
limit = load_constraint('max_object_name_length')
for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2):
file_item = self.env.container.file('a' * l)
if l <= limit:
self.assert_(file_item.write())
self.assert_status(201)
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
def testQuestionMarkInName(self):
if Utils.create_name == Utils.create_ascii_name:
file_name = list(Utils.create_name())
file_name[random.randint(2, len(file_name) - 2)] = '?'
file_name = "".join(file_name)
else:
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
file_item = self.env.container.file(file_name)
self.assert_(file_item.write(cfg={'no_path_quote': True}))
self.assert_(file_name not in self.env.container.files())
self.assert_(file_name.split('?')[0] in self.env.container.files())
def testDeleteThen404s(self):
file_item = self.env.container.file(Utils.create_name())
self.assert_(file_item.write_random())
self.assert_status(201)
self.assert_(file_item.delete())
self.assert_status(204)
file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()}
for method in (file_item.info,
file_item.read,
file_item.sync_metadata,
file_item.delete):
self.assertRaises(ResponseError, method)
self.assert_status(404)
def testBlankMetadataName(self):
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = {'': Utils.create_name()}
self.assertRaises(ResponseError, file_item.write_random)
self.assert_status(400)
def testMetadataNumberLimit(self):
number_limit = load_constraint('max_meta_count')
size_limit = load_constraint('max_meta_overall_size')
for i in (number_limit - 10, number_limit - 1, number_limit,
number_limit + 1, number_limit + 10, number_limit + 100):
j = size_limit / (i * 2)
size = 0
metadata = {}
while len(metadata.keys()) < i:
key = Utils.create_ascii_name()
val = Utils.create_name()
if len(key) > j:
key = key[:j]
val = val[:j]
size += len(key) + len(val)
metadata[key] = val
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
if i <= number_limit:
self.assert_(file_item.write())
self.assert_status(201)
self.assert_(file_item.sync_metadata())
self.assert_status((201, 202))
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
file_item.metadata = {}
self.assert_(file_item.write())
self.assert_status(201)
file_item.metadata = metadata
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(400)
def testContentTypeGuessing(self):
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
'zip': 'application/zip'}
container = self.env.account.container(Utils.create_name())
self.assert_(container.create())
for i in file_types.keys():
file_item = container.file(Utils.create_name() + '.' + i)
file_item.write('', cfg={'no_content_type': True})
file_types_read = {}
for i in container.files(parms={'format': 'json'}):
file_types_read[i['name'].split('.')[1]] = i['content_type']
self.assertEqual(file_types, file_types_read)
def testRangedGets(self):
# We set the file_length to a strange multiple here. This is to check
# that ranges still work in the EC case when the requested range
# spans EC segment boundaries. The 1 MiB base value is chosen because
# that's a common EC segment size. The 1.33 multiple is to ensure we
# aren't aligned on segment boundaries
file_length = int(1048576 * 1.33)
range_size = file_length / 10
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
for i in range(0, file_length, range_size):
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
hdrs = {'Range': range_string}
self.assert_(data[i: i + range_size] == file_item.read(hdrs=hdrs),
range_string)
range_string = 'bytes=-%d' % (i)
hdrs = {'Range': range_string}
if i == 0:
# RFC 2616 14.35.1
# "If a syntactically valid byte-range-set includes ... at
# least one suffix-byte-range-spec with a NON-ZERO
# suffix-length, then the byte-range-set is satisfiable.
# Otherwise, the byte-range-set is unsatisfiable.
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
else:
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
range_string = 'bytes=%d-' % (i)
hdrs = {'Range': range_string}
self.assert_(file_item.read(hdrs=hdrs) == data[i - file_length:],
range_string)
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assert_(file_item.read(hdrs=hdrs) == data[-1000:], range_string)
hdrs = {'Range': '0-4'}
self.assert_(file_item.read(hdrs=hdrs) == data, range_string)
# RFC 2616 14.35.1
# "If the entity is shorter than the specified suffix-length, the
# entire entity-body is used."
range_string = 'bytes=-%d' % (file_length + 10)
hdrs = {'Range': range_string}
self.assert_(file_item.read(hdrs=hdrs) == data, range_string)
def testRangedGetsWithLWSinHeader(self):
#Skip this test until webob 1.2 can tolerate LWS in Range header.
file_length = 10000
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
self.assert_(file_item.read(hdrs={'Range': r}) == data[0:1000])
def testFileSizeLimit(self):
limit = load_constraint('max_file_size')
tsecs = 3
def timeout(seconds, method, *args, **kwargs):
try:
with eventlet.Timeout(seconds):
method(*args, **kwargs)
except eventlet.Timeout:
return True
else:
return False
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
limit + 10, limit + 100):
file_item = self.env.container.file(Utils.create_name())
if i <= limit:
self.assert_(timeout(tsecs, file_item.write,
cfg={'set_content_length': i}))
else:
self.assertRaises(ResponseError, timeout, tsecs,
file_item.write,
cfg={'set_content_length': i})
def testNoContentLengthForPut(self):
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write, 'testing',
cfg={'no_content_length': True})
self.assert_status(411)
def testDelete(self):
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(self.env.file_size)
self.assert_(file_item.name in self.env.container.files())
self.assert_(file_item.delete())
self.assert_(file_item.name not in self.env.container.files())
def testBadHeaders(self):
file_length = 100
# no content type on puts should be ok
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(file_length, cfg={'no_content_type': True})
self.assert_status(201)
# content length x
self.assertRaises(ResponseError, file_item.write_random, file_length,
hdrs={'Content-Length': 'X'},
cfg={'no_content_length': True})
self.assert_status(400)
# no content-length
self.assertRaises(ResponseError, file_item.write_random, file_length,
cfg={'no_content_length': True})
self.assert_status(411)
self.assertRaises(ResponseError, file_item.write_random, file_length,
hdrs={'transfer-encoding': 'gzip,chunked'},
cfg={'no_content_length': True})
self.assert_status(501)
# bad request types
#for req in ('LICK', 'GETorHEAD_base', 'container_info',
# 'best_response'):
for req in ('LICK', 'GETorHEAD_base'):
self.env.account.conn.make_request(req)
self.assert_status(405)
# bad range headers
self.assert_(len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) ==
file_length)
self.assert_status(200)
def testMetadataLengthLimits(self):
key_limit = load_constraint('max_meta_name_length')
value_limit = load_constraint('max_meta_value_length')
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
[key_limit + 1, value_limit], [key_limit, 0],
[key_limit, value_limit * 10],
[key_limit * 10, value_limit]]
for l in lengths:
metadata = {'a' * l[0]: 'b' * l[1]}
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
if l[0] <= key_limit and l[1] <= value_limit:
self.assert_(file_item.write())
self.assert_status(201)
self.assert_(file_item.sync_metadata())
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
file_item.metadata = {}
self.assert_(file_item.write())
self.assert_status(201)
file_item.metadata = metadata
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(400)
def testEtagWayoff(self):
file_item = self.env.container.file(Utils.create_name())
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
self.assertRaises(ResponseError, file_item.write_random, hdrs=hdrs)
self.assert_status(422)
def testFileCreate(self):
for i in range(10):
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random()
self.assert_status(201)
self.assert_(data == file_item.read())
self.assert_status(200)
def testHead(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
file_item.write_random(self.env.file_size)
md5 = file_item.md5
file_item = self.env.container.file(file_name)
info = file_item.info()
self.assert_status(200)
self.assertEqual(info['content_length'], self.env.file_size)
self.assertEqual(info['etag'], md5)
self.assertEqual(info['content_type'], content_type)
self.assert_('last_modified' in info)
def testDeleteOfFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.delete)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.delete)
self.assert_status(404)
def testHeadOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.info)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.info)
self.assert_status(404)
def testMetadataOnPost(self):
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(self.env.file_size)
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
self.assert_(file_item.sync_metadata())
self.assert_status((201, 202))
file_item = self.env.container.file(file_item.name)
self.assert_(file_item.initialize())
self.assert_status(200)
self.assertEqual(file_item.metadata, metadata)
def testGetContentType(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
file_item.write_random()
file_item = self.env.container.file(file_name)
file_item.read()
self.assertEqual(content_type, file_item.content_type)
def testGetOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.read)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.read)
self.assert_status(404)
def testPostOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
file_item.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
file_item.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(404)
def testMetadataOnPut(self):
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
file_item.write_random(self.env.file_size)
file_item = self.env.container.file(file_item.name)
self.assert_(file_item.initialize())
self.assert_status(200)
self.assertEqual(file_item.metadata, metadata)
def testSerialization(self):
container = self.env.account.container(Utils.create_name())
self.assert_(container.create())
files = []
for i in (0, 1, 10, 100, 1000, 10000):
files.append({'name': Utils.create_name(),
'content_type': Utils.create_name(), 'bytes': i})
write_time = time.time()
for f in files:
file_item = container.file(f['name'])
file_item.content_type = f['content_type']
file_item.write_random(f['bytes'])
f['hash'] = file_item.md5
f['json'] = False
f['xml'] = False
write_time = time.time() - write_time
for format_type in ['json', 'xml']:
for file_item in container.files(parms={'format': format_type}):
found = False
for f in files:
if f['name'] != file_item['name']:
continue
self.assertEqual(file_item['content_type'],
f['content_type'])
self.assertEqual(int(file_item['bytes']), f['bytes'])
d = datetime.strptime(
file_item['last_modified'].split('.')[0],
"%Y-%m-%dT%H:%M:%S")
lm = time.mktime(d.timetuple())
if 'last_modified' in f:
self.assertEqual(f['last_modified'], lm)
else:
f['last_modified'] = lm
f[format_type] = True
found = True
self.assert_(found, 'Unexpected file %s found in '
'%s listing' % (file_item['name'], format_type))
headers = dict(self.env.conn.response.getheaders())
if format_type == 'json':
self.assertEqual(headers['content-type'],
'application/json; charset=utf-8')
elif format_type == 'xml':
self.assertEqual(headers['content-type'],
'application/xml; charset=utf-8')
lm_diff = max([f['last_modified'] for f in files]) -\
min([f['last_modified'] for f in files])
self.assert_(lm_diff < write_time + 1, 'Diff in last '
'modified times should be less than time to write files')
for f in files:
for format_type in ['json', 'xml']:
self.assert_(f[format_type], 'File %s not found in %s listing'
% (f['name'], format_type))
def testStackedOverwrite(self):
file_item = self.env.container.file(Utils.create_name())
for i in range(1, 11):
data = file_item.write_random(512)
file_item.write(data)
self.assert_(file_item.read() == data)
def testTooLongName(self):
file_item = self.env.container.file('x' * 1025)
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
def testZeroByteFile(self):
file_item = self.env.container.file(Utils.create_name())
self.assert_(file_item.write(''))
self.assert_(file_item.name in self.env.container.files())
self.assert_(file_item.read() == '')
def testEtagResponse(self):
file_item = self.env.container.file(Utils.create_name())
data = StringIO.StringIO(file_item.write_random(512))
etag = File.compute_md5sum(data)
headers = dict(self.env.conn.response.getheaders())
self.assert_('etag' in headers.keys())
header_etag = headers['etag'].strip('"')
self.assertEqual(etag, header_etag)
def testChunkedPut(self):
if (tf.web_front_end == 'apache2'):
raise SkipTest("Chunked PUT can only be tested with apache2 web"
" front end")
def chunks(s, length=3):
i, j = 0, length
while i < len(s):
yield s[i:j]
i, j = j, j + length
data = File.random_data(10000)
etag = File.compute_md5sum(data)
for i in (1, 10, 100, 1000):
file_item = self.env.container.file(Utils.create_name())
for j in chunks(data, i):
file_item.chunked_write(j)
self.assert_(file_item.chunked_write())
self.assert_(data == file_item.read())
info = file_item.info()
self.assertEqual(etag, info['etag'])
class TestFileUTF8(Base2, TestFile):
set_up = False
class TestDloEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.segment_prefix = prefix
for letter in ('a', 'b', 'c', 'd', 'e'):
file_item = cls.container.file("%s/seg_lower%s" % (prefix, letter))
file_item.write(letter * 10)
file_item = cls.container.file("%s/seg_upper%s" % (prefix, letter))
file_item.write(letter.upper() * 10)
man1 = cls.container.file("man1")
man1.write('man1-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
(cls.container.name, prefix)})
man1 = cls.container.file("man2")
man1.write('man2-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_upper" %
(cls.container.name, prefix)})
manall = cls.container.file("manall")
manall.write('manall-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg" %
(cls.container.name, prefix)})
class TestDlo(Base):
env = TestDloEnv
set_up = False
def test_get_manifest(self):
file_item = self.env.container.file('man1')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
file_item = self.env.container.file('man2')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE")
file_item = self.env.container.file('manall')
file_contents = file_item.read()
self.assertEqual(
file_contents,
("aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee" +
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE"))
def test_get_manifest_document_itself(self):
file_item = self.env.container.file('man1')
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
self.assertEqual(file_contents, "man1-contents")
self.assertEqual(file_item.info()['x_object_manifest'],
"%s/%s/seg_lower" %
(self.env.container.name, self.env.segment_prefix))
def test_get_range(self):
file_item = self.env.container.file('man1')
file_contents = file_item.read(size=25, offset=8)
self.assertEqual(file_contents, "aabbbbbbbbbbccccccccccddd")
file_contents = file_item.read(size=1, offset=47)
self.assertEqual(file_contents, "e")
def test_get_range_out_of_range(self):
file_item = self.env.container.file('man1')
self.assertRaises(ResponseError, file_item.read, size=7, offset=50)
self.assert_status(416)
def test_copy(self):
# Adding a new segment, copying the manifest, and then deleting the
# segment proves that the new object is really the concatenated
# segments and not just a manifest.
f_segment = self.env.container.file("%s/seg_lowerf" %
(self.env.segment_prefix))
f_segment.write('ffffffffff')
try:
man1_item = self.env.container.file('man1')
man1_item.copy(self.env.container.name, "copied-man1")
finally:
# try not to leave this around for other tests to stumble over
f_segment.delete()
file_item = self.env.container.file('copied-man1')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
# The copied object must not have X-Object-Manifest
self.assertTrue("x_object_manifest" not in file_item.info())
def test_copy_account(self):
# dlo use same account and same container only
acct = self.env.conn.account_name
# Adding a new segment, copying the manifest, and then deleting the
# segment proves that the new object is really the concatenated
# segments and not just a manifest.
f_segment = self.env.container.file("%s/seg_lowerf" %
(self.env.segment_prefix))
f_segment.write('ffffffffff')
try:
man1_item = self.env.container.file('man1')
man1_item.copy_account(acct,
self.env.container.name,
"copied-man1")
finally:
# try not to leave this around for other tests to stumble over
f_segment.delete()
file_item = self.env.container.file('copied-man1')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
# The copied object must not have X-Object-Manifest
self.assertTrue("x_object_manifest" not in file_item.info())
def test_copy_manifest(self):
# Copying the manifest with multipart-manifest=get query string
# should result in another manifest
try:
man1_item = self.env.container.file('man1')
man1_item.copy(self.env.container.name, "copied-man1",
parms={'multipart-manifest': 'get'})
copied = self.env.container.file("copied-man1")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
self.assertEqual(copied_contents, "man1-contents")
copied_contents = copied.read()
self.assertEqual(
copied_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
self.assertEqual(man1_item.info()['x_object_manifest'],
copied.info()['x_object_manifest'])
finally:
# try not to leave this around for other tests to stumble over
self.env.container.file("copied-man1").delete()
def test_dlo_if_match_get(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.read(hdrs={'If-Match': etag})
self.assert_status(200)
def test_dlo_if_none_match_get(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
def test_dlo_if_match_head(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.info(hdrs={'If-Match': etag})
self.assert_status(200)
def test_dlo_if_none_match_head(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
class TestDloUTF8(Base2, TestDlo):
set_up = False
class TestFileComparisonEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 20
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file_item = cls.container.file(Utils.create_name())
file_item.write_random(cls.file_size)
cls.files.append(file_item)
cls.time_old_f1 = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(time.time() - 86400))
cls.time_old_f2 = time.strftime("%A, %d-%b-%y %H:%M:%S GMT",
time.gmtime(time.time() - 86400))
cls.time_old_f3 = time.strftime("%a %b %d %H:%M:%S %Y",
time.gmtime(time.time() - 86400))
cls.time_new = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(time.time() + 86400))
class TestFileComparison(Base):
env = TestFileComparisonEnv
set_up = False
def testIfMatch(self):
for file_item in self.env.files:
hdrs = {'If-Match': file_item.md5}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testIfNoneMatch(self):
for file_item in self.env.files:
hdrs = {'If-None-Match': 'bogus'}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-None-Match': file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
def testIfModifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Modified-Since': self.env.time_old_f1}
self.assert_(file_item.read(hdrs=hdrs))
self.assert_(file_item.info(hdrs=hdrs))
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(304)
def testIfUnmodifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Unmodified-Since': self.env.time_new}
self.assert_(file_item.read(hdrs=hdrs))
self.assert_(file_item.info(hdrs=hdrs))
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(412)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_new}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus',
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_old_f3}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testLastModified(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file = self.env.container.file(file_name)
file.content_type = content_type
resp = file.write_random_return_resp(self.env.file_size)
put_last_modified = resp.getheader('last-modified')
file = self.env.container.file(file_name)
info = file.info()
self.assert_('last_modified' in info)
last_modified = info['last_modified']
self.assertEqual(put_last_modified, info['last_modified'])
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(304)
hdrs = {'If-Unmodified-Since': last_modified}
self.assert_(file.read(hdrs=hdrs))
class TestFileComparisonUTF8(Base2, TestFileComparison):
set_up = False
class TestSloEnv(object):
slo_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
if cls.slo_enabled is None:
cls.slo_enabled = 'slo' in cluster_info
if not cls.slo_enabled:
return
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
seg_info = {}
for letter, size in (('a', 1024 * 1024),
('b', 1024 * 1024),
('c', 1024 * 1024),
('d', 1024 * 1024),
('e', 1)):
seg_name = "seg_%s" % letter
file_item = cls.container.file(seg_name)
file_item.write(letter * size)
seg_info[seg_name] = {
'size_bytes': size,
'etag': file_item.md5,
'path': '/%s/%s' % (cls.container.name, seg_name)}
file_item = cls.container.file("manifest-abcde")
file_item.write(
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
seg_info['seg_c'], seg_info['seg_d'],
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
file_item = cls.container.file('manifest-cd')
cd_json = json.dumps([seg_info['seg_c'], seg_info['seg_d']])
file_item.write(cd_json, parms={'multipart-manifest': 'put'})
cd_etag = hashlib.md5(seg_info['seg_c']['etag'] +
seg_info['seg_d']['etag']).hexdigest()
file_item = cls.container.file("manifest-bcd-submanifest")
file_item.write(
json.dumps([seg_info['seg_b'],
{'etag': cd_etag,
'size_bytes': (seg_info['seg_c']['size_bytes'] +
seg_info['seg_d']['size_bytes']),
'path': '/%s/%s' % (cls.container.name,
'manifest-cd')}]),
parms={'multipart-manifest': 'put'})
bcd_submanifest_etag = hashlib.md5(
seg_info['seg_b']['etag'] + cd_etag).hexdigest()
file_item = cls.container.file("manifest-abcde-submanifest")
file_item.write(
json.dumps([
seg_info['seg_a'],
{'etag': bcd_submanifest_etag,
'size_bytes': (seg_info['seg_b']['size_bytes'] +
seg_info['seg_c']['size_bytes'] +
seg_info['seg_d']['size_bytes']),
'path': '/%s/%s' % (cls.container.name,
'manifest-bcd-submanifest')},
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
class TestSlo(Base):
env = TestSloEnv
set_up = False
def setUp(self):
super(TestSlo, self).setUp()
if self.env.slo_enabled is False:
raise SkipTest("SLO not enabled")
elif self.env.slo_enabled is not True:
# just some sanity checking
raise Exception(
"Expected slo_enabled to be True/False, got %r" %
(self.env.slo_enabled,))
def test_slo_get_simple_manifest(self):
file_item = self.env.container.file('manifest-abcde')
file_contents = file_item.read()
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
self.assertEqual('a', file_contents[0])
self.assertEqual('a', file_contents[1024 * 1024 - 1])
self.assertEqual('b', file_contents[1024 * 1024])
self.assertEqual('d', file_contents[-2])
self.assertEqual('e', file_contents[-1])
def test_slo_get_nested_manifest(self):
file_item = self.env.container.file('manifest-abcde-submanifest')
file_contents = file_item.read()
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
self.assertEqual('a', file_contents[0])
self.assertEqual('a', file_contents[1024 * 1024 - 1])
self.assertEqual('b', file_contents[1024 * 1024])
self.assertEqual('d', file_contents[-2])
self.assertEqual('e', file_contents[-1])
def test_slo_ranged_get(self):
file_item = self.env.container.file('manifest-abcde')
file_contents = file_item.read(size=1024 * 1024 + 2,
offset=1024 * 1024 - 1)
self.assertEqual('a', file_contents[0])
self.assertEqual('b', file_contents[1])
self.assertEqual('b', file_contents[-2])
self.assertEqual('c', file_contents[-1])
def test_slo_ranged_submanifest(self):
file_item = self.env.container.file('manifest-abcde-submanifest')
file_contents = file_item.read(size=1024 * 1024 + 2,
offset=1024 * 1024 * 2 - 1)
self.assertEqual('b', file_contents[0])
self.assertEqual('c', file_contents[1])
self.assertEqual('c', file_contents[-2])
self.assertEqual('d', file_contents[-1])
def test_slo_etag_is_hash_of_etags(self):
expected_hash = hashlib.md5()
expected_hash.update(hashlib.md5('a' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('b' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('c' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('d' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('e').hexdigest())
expected_etag = expected_hash.hexdigest()
file_item = self.env.container.file('manifest-abcde')
self.assertEqual(expected_etag, file_item.info()['etag'])
def test_slo_etag_is_hash_of_etags_submanifests(self):
def hd(x):
return hashlib.md5(x).hexdigest()
expected_etag = hd(hd('a' * 1024 * 1024) +
hd(hd('b' * 1024 * 1024) +
hd(hd('c' * 1024 * 1024) +
hd('d' * 1024 * 1024))) +
hd('e'))
file_item = self.env.container.file('manifest-abcde-submanifest')
self.assertEqual(expected_etag, file_item.info()['etag'])
def test_slo_etag_mismatch(self):
file_item = self.env.container.file("manifest-a-bad-etag")
try:
file_item.write(
json.dumps([{
'size_bytes': 1024 * 1024,
'etag': 'not it',
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
parms={'multipart-manifest': 'put'})
except ResponseError as err:
self.assertEqual(400, err.status)
else:
self.fail("Expected ResponseError but didn't get it")
def test_slo_size_mismatch(self):
file_item = self.env.container.file("manifest-a-bad-size")
try:
file_item.write(
json.dumps([{
'size_bytes': 1024 * 1024 - 1,
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
parms={'multipart-manifest': 'put'})
except ResponseError as err:
self.assertEqual(400, err.status)
else:
self.fail("Expected ResponseError but didn't get it")
def test_slo_copy(self):
file_item = self.env.container.file("manifest-abcde")
file_item.copy(self.env.container.name, "copied-abcde")
copied = self.env.container.file("copied-abcde")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
def test_slo_copy_account(self):
acct = self.env.conn.account_name
# same account copy
file_item = self.env.container.file("manifest-abcde")
file_item.copy_account(acct, self.env.container.name, "copied-abcde")
copied = self.env.container.file("copied-abcde")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
# copy to different account
acct = self.env.conn2.account_name
dest_cont = self.env.account2.container(Utils.create_name())
self.assert_(dest_cont.create(hdrs={
'X-Container-Write': self.env.conn.user_acl
}))
file_item = self.env.container.file("manifest-abcde")
file_item.copy_account(acct, dest_cont, "copied-abcde")
copied = dest_cont.file("copied-abcde")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
def test_slo_copy_the_manifest(self):
file_item = self.env.container.file("manifest-abcde")
file_item.copy(self.env.container.name, "copied-abcde-manifest-only",
parms={'multipart-manifest': 'get'})
copied = self.env.container.file("copied-abcde-manifest-only")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
try:
json.loads(copied_contents)
except ValueError:
self.fail("COPY didn't copy the manifest (invalid json on GET)")
def test_slo_copy_the_manifest_account(self):
acct = self.env.conn.account_name
# same account
file_item = self.env.container.file("manifest-abcde")
file_item.copy_account(acct,
self.env.container.name,
"copied-abcde-manifest-only",
parms={'multipart-manifest': 'get'})
copied = self.env.container.file("copied-abcde-manifest-only")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
try:
json.loads(copied_contents)
except ValueError:
self.fail("COPY didn't copy the manifest (invalid json on GET)")
# different account
acct = self.env.conn2.account_name
dest_cont = self.env.account2.container(Utils.create_name())
self.assert_(dest_cont.create(hdrs={
'X-Container-Write': self.env.conn.user_acl
}))
file_item.copy_account(acct,
dest_cont,
"copied-abcde-manifest-only",
parms={'multipart-manifest': 'get'})
copied = dest_cont.file("copied-abcde-manifest-only")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
try:
json.loads(copied_contents)
except ValueError:
self.fail("COPY didn't copy the manifest (invalid json on GET)")
def test_slo_get_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_body = manifest.read(parms={'multipart-manifest': 'get'})
self.assertEqual('application/json; charset=utf-8',
manifest.content_type)
try:
json.loads(got_body)
except ValueError:
self.fail("GET with multipart-manifest=get got invalid json")
def test_slo_head_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_info = manifest.info(parms={'multipart-manifest': 'get'})
self.assertEqual('application/json; charset=utf-8',
got_info['content_type'])
def test_slo_if_match_get(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.read(hdrs={'If-Match': etag})
self.assert_status(200)
def test_slo_if_none_match_get(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
def test_slo_if_match_head(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.info(hdrs={'If-Match': etag})
self.assert_status(200)
def test_slo_if_none_match_head(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
class TestSloUTF8(Base2, TestSlo):
set_up = False
class TestObjectVersioningEnv(object):
versioning_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
# Second connection for ACL tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.versions_container = cls.account.container(prefix + "-versions")
if not cls.versions_container.create():
raise ResponseError(cls.conn.response)
cls.container = cls.account.container(prefix + "-objs")
if not cls.container.create(
hdrs={'X-Versions-Location': cls.versions_container.name}):
raise ResponseError(cls.conn.response)
container_info = cls.container.info()
# if versioning is off, then X-Versions-Location won't persist
cls.versioning_enabled = 'versions' in container_info
class TestCrossPolicyObjectVersioningEnv(object):
# tri-state: None initially, then True/False
versioning_enabled = None
multiple_policies_enabled = None
policies = None
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.multiple_policies_enabled is None:
try:
cls.policies = tf.FunctionalStoragePolicyCollection.from_info()
except AssertionError:
pass
if cls.policies and len(cls.policies) > 1:
cls.multiple_policies_enabled = True
else:
cls.multiple_policies_enabled = False
# We have to lie here that versioning is enabled. We actually
# don't know, but it does not matter. We know these tests cannot
# run without multiple policies present. If multiple policies are
# present, we won't be setting this field to any value, so it
# should all still work.
cls.versioning_enabled = True
return
policy = cls.policies.select()
version_policy = cls.policies.exclude(name=policy['name']).select()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
# Second connection for ACL tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.versions_container = cls.account.container(prefix + "-versions")
if not cls.versions_container.create(
{'X-Storage-Policy': policy['name']}):
raise ResponseError(cls.conn.response)
cls.container = cls.account.container(prefix + "-objs")
if not cls.container.create(
hdrs={'X-Versions-Location': cls.versions_container.name,
'X-Storage-Policy': version_policy['name']}):
raise ResponseError(cls.conn.response)
container_info = cls.container.info()
# if versioning is off, then X-Versions-Location won't persist
cls.versioning_enabled = 'versions' in container_info
class TestObjectVersioning(Base):
env = TestObjectVersioningEnv
set_up = False
def setUp(self):
super(TestObjectVersioning, self).setUp()
if self.env.versioning_enabled is False:
raise SkipTest("Object versioning not enabled")
elif self.env.versioning_enabled is not True:
# just some sanity checking
raise Exception(
"Expected versioning_enabled to be True/False, got %r" %
(self.env.versioning_enabled,))
def tearDown(self):
super(TestObjectVersioning, self).tearDown()
try:
# delete versions first!
self.env.versions_container.delete_files()
self.env.container.delete_files()
except ResponseError:
pass
def test_overwriting(self):
container = self.env.container
versions_container = self.env.versions_container
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
versioned_obj.write("aaaaa")
self.assertEqual(0, versions_container.info()['object_count'])
versioned_obj.write("bbbbb")
# the old version got saved off
self.assertEqual(1, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[0]
self.assertEqual(
"aaaaa", versions_container.file(versioned_obj_name).read())
# if we overwrite it again, there are two versions
versioned_obj.write("ccccc")
self.assertEqual(2, versions_container.info()['object_count'])
# as we delete things, the old contents return
self.assertEqual("ccccc", versioned_obj.read())
versioned_obj.delete()
self.assertEqual("bbbbb", versioned_obj.read())
versioned_obj.delete()
self.assertEqual("aaaaa", versioned_obj.read())
versioned_obj.delete()
self.assertRaises(ResponseError, versioned_obj.read)
def test_versioning_dlo(self):
container = self.env.container
versions_container = self.env.versions_container
obj_name = Utils.create_name()
for i in ('1', '2', '3'):
time.sleep(.01) # guarantee that the timestamp changes
obj_name_seg = obj_name + '/' + i
versioned_obj = container.file(obj_name_seg)
versioned_obj.write(i)
versioned_obj.write(i + i)
self.assertEqual(3, versions_container.info()['object_count'])
man_file = container.file(obj_name)
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
(self.env.container.name, obj_name)})
# guarantee that the timestamp changes
time.sleep(.01)
# write manifest file again
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
(self.env.container.name, obj_name)})
self.assertEqual(3, versions_container.info()['object_count'])
self.assertEqual("112233", man_file.read())
def test_versioning_check_acl(self):
container = self.env.container
versions_container = self.env.versions_container
versions_container.create(hdrs={'X-Container-Read': '.r:*,.rlistings'})
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
versioned_obj.write("aaaaa")
self.assertEqual("aaaaa", versioned_obj.read())
versioned_obj.write("bbbbb")
self.assertEqual("bbbbb", versioned_obj.read())
# Use token from second account and try to delete the object
org_token = self.env.account.conn.storage_token
self.env.account.conn.storage_token = self.env.conn2.storage_token
try:
self.assertRaises(ResponseError, versioned_obj.delete)
finally:
self.env.account.conn.storage_token = org_token
# Verify with token from first account
self.assertEqual("bbbbb", versioned_obj.read())
versioned_obj.delete()
self.assertEqual("aaaaa", versioned_obj.read())
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
set_up = False
class TestCrossPolicyObjectVersioning(TestObjectVersioning):
env = TestCrossPolicyObjectVersioningEnv
set_up = False
def setUp(self):
super(TestCrossPolicyObjectVersioning, self).setUp()
if self.env.multiple_policies_enabled is False:
raise SkipTest('Cross policy test requires multiple policies')
elif self.env.multiple_policies_enabled is not True:
# just some sanity checking
raise Exception("Expected multiple_policies_enabled "
"to be True/False, got %r" % (
self.env.versioning_enabled,))
class TestTempurlEnv(object):
tempurl_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.tempurl_enabled is None:
cls.tempurl_enabled = 'tempurl' in cluster_info
if not cls.tempurl_enabled:
return
cls.tempurl_key = Utils.create_name()
cls.tempurl_key2 = Utils.create_name()
cls.account = Account(
cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
cls.account.update_metadata({
'temp-url-key': cls.tempurl_key,
'temp-url-key-2': cls.tempurl_key2
})
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.obj = cls.container.file(Utils.create_name())
cls.obj.write("obj contents")
cls.other_obj = cls.container.file(Utils.create_name())
cls.other_obj.write("other obj contents")
class TestTempurl(Base):
env = TestTempurlEnv
set_up = False
def setUp(self):
super(TestTempurl, self).setUp()
if self.env.tempurl_enabled is False:
raise SkipTest("TempURL not enabled")
elif self.env.tempurl_enabled is not True:
# just some sanity checking
raise Exception(
"Expected tempurl_enabled to be True/False, got %r" %
(self.env.tempurl_enabled,))
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(self.env.obj.path),
self.env.tempurl_key)
self.obj_tempurl_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
def tempurl_sig(self, method, expires, path, key):
return hmac.new(
key,
'%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
hashlib.sha1).hexdigest()
def test_GET(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
# GET tempurls also allow HEAD requests
self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True}))
def test_GET_with_key_2(self):
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(self.env.obj.path),
self.env.tempurl_key2)
parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
def test_PUT(self):
new_obj = self.env.container.file(Utils.create_name())
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'PUT', expires, self.env.conn.make_path(new_obj.path),
self.env.tempurl_key)
put_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
new_obj.write('new obj contents',
parms=put_parms, cfg={'no_auth_token': True})
self.assertEqual(new_obj.read(), "new obj contents")
# PUT tempurls also allow HEAD requests
self.assert_(new_obj.info(parms=put_parms,
cfg={'no_auth_token': True}))
def test_HEAD(self):
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
self.env.tempurl_key)
head_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
self.assert_(self.env.obj.info(parms=head_parms,
cfg={'no_auth_token': True}))
# HEAD tempurls don't allow PUT or GET requests, despite the fact that
# PUT and GET tempurls both allow HEAD requests
self.assertRaises(ResponseError, self.env.other_obj.read,
cfg={'no_auth_token': True},
parms=self.obj_tempurl_parms)
self.assert_status([401])
self.assertRaises(ResponseError, self.env.other_obj.write,
'new contents',
cfg={'no_auth_token': True},
parms=self.obj_tempurl_parms)
self.assert_status([401])
def test_different_object(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
self.assertRaises(ResponseError, self.env.other_obj.read,
cfg={'no_auth_token': True},
parms=self.obj_tempurl_parms)
self.assert_status([401])
def test_changing_sig(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
parms = self.obj_tempurl_parms.copy()
if parms['temp_url_sig'][0] == 'a':
parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
else:
parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
self.assertRaises(ResponseError, self.env.obj.read,
cfg={'no_auth_token': True},
parms=parms)
self.assert_status([401])
def test_changing_expires(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
parms = self.obj_tempurl_parms.copy()
if parms['temp_url_expires'][-1] == '0':
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
else:
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
self.assertRaises(ResponseError, self.env.obj.read,
cfg={'no_auth_token': True},
parms=parms)
self.assert_status([401])
class TestTempurlUTF8(Base2, TestTempurl):
set_up = False
class TestContainerTempurlEnv(object):
tempurl_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.tempurl_enabled is None:
cls.tempurl_enabled = 'tempurl' in cluster_info
if not cls.tempurl_enabled:
return
cls.tempurl_key = Utils.create_name()
cls.tempurl_key2 = Utils.create_name()
cls.account = Account(
cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
# creating another account and connection
# for ACL tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
cls.account2 = Account(
cls.conn2, config2.get('account', config2['username']))
cls.account2 = cls.conn2.get_account()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create({
'x-container-meta-temp-url-key': cls.tempurl_key,
'x-container-meta-temp-url-key-2': cls.tempurl_key2,
'x-container-read': cls.account2.name}):
raise ResponseError(cls.conn.response)
cls.obj = cls.container.file(Utils.create_name())
cls.obj.write("obj contents")
cls.other_obj = cls.container.file(Utils.create_name())
cls.other_obj.write("other obj contents")
class TestContainerTempurl(Base):
env = TestContainerTempurlEnv
set_up = False
def setUp(self):
super(TestContainerTempurl, self).setUp()
if self.env.tempurl_enabled is False:
raise SkipTest("TempURL not enabled")
elif self.env.tempurl_enabled is not True:
# just some sanity checking
raise Exception(
"Expected tempurl_enabled to be True/False, got %r" %
(self.env.tempurl_enabled,))
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(self.env.obj.path),
self.env.tempurl_key)
self.obj_tempurl_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
def tempurl_sig(self, method, expires, path, key):
return hmac.new(
key,
'%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
hashlib.sha1).hexdigest()
def test_GET(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
# GET tempurls also allow HEAD requests
self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True}))
def test_GET_with_key_2(self):
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(self.env.obj.path),
self.env.tempurl_key2)
parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
def test_PUT(self):
new_obj = self.env.container.file(Utils.create_name())
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'PUT', expires, self.env.conn.make_path(new_obj.path),
self.env.tempurl_key)
put_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
new_obj.write('new obj contents',
parms=put_parms, cfg={'no_auth_token': True})
self.assertEqual(new_obj.read(), "new obj contents")
# PUT tempurls also allow HEAD requests
self.assert_(new_obj.info(parms=put_parms,
cfg={'no_auth_token': True}))
def test_HEAD(self):
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
self.env.tempurl_key)
head_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
self.assert_(self.env.obj.info(parms=head_parms,
cfg={'no_auth_token': True}))
# HEAD tempurls don't allow PUT or GET requests, despite the fact that
# PUT and GET tempurls both allow HEAD requests
self.assertRaises(ResponseError, self.env.other_obj.read,
cfg={'no_auth_token': True},
parms=self.obj_tempurl_parms)
self.assert_status([401])
self.assertRaises(ResponseError, self.env.other_obj.write,
'new contents',
cfg={'no_auth_token': True},
parms=self.obj_tempurl_parms)
self.assert_status([401])
def test_different_object(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
self.assertRaises(ResponseError, self.env.other_obj.read,
cfg={'no_auth_token': True},
parms=self.obj_tempurl_parms)
self.assert_status([401])
def test_changing_sig(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
parms = self.obj_tempurl_parms.copy()
if parms['temp_url_sig'][0] == 'a':
parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
else:
parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
self.assertRaises(ResponseError, self.env.obj.read,
cfg={'no_auth_token': True},
parms=parms)
self.assert_status([401])
def test_changing_expires(self):
contents = self.env.obj.read(
parms=self.obj_tempurl_parms,
cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
parms = self.obj_tempurl_parms.copy()
if parms['temp_url_expires'][-1] == '0':
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
else:
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
self.assertRaises(ResponseError, self.env.obj.read,
cfg={'no_auth_token': True},
parms=parms)
self.assert_status([401])
def test_tempurl_keys_visible_to_account_owner(self):
if not tf.cluster_info.get('tempauth'):
raise SkipTest('TEMP AUTH SPECIFIC TEST')
metadata = self.env.container.info()
self.assertEqual(metadata.get('tempurl_key'), self.env.tempurl_key)
self.assertEqual(metadata.get('tempurl_key2'), self.env.tempurl_key2)
def test_tempurl_keys_hidden_from_acl_readonly(self):
if not tf.cluster_info.get('tempauth'):
raise SkipTest('TEMP AUTH SPECIFIC TEST')
original_token = self.env.container.conn.storage_token
self.env.container.conn.storage_token = self.env.conn2.storage_token
metadata = self.env.container.info()
self.env.container.conn.storage_token = original_token
self.assertTrue('tempurl_key' not in metadata,
'Container TempURL key found, should not be visible '
'to readonly ACLs')
self.assertTrue('tempurl_key2' not in metadata,
'Container TempURL key-2 found, should not be visible '
'to readonly ACLs')
class TestContainerTempurlUTF8(Base2, TestContainerTempurl):
set_up = False
class TestSloTempurlEnv(object):
enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.enabled is None:
cls.enabled = 'tempurl' in cluster_info and 'slo' in cluster_info
cls.tempurl_key = Utils.create_name()
cls.account = Account(
cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
cls.account.update_metadata({'temp-url-key': cls.tempurl_key})
cls.manifest_container = cls.account.container(Utils.create_name())
cls.segments_container = cls.account.container(Utils.create_name())
if not cls.manifest_container.create():
raise ResponseError(cls.conn.response)
if not cls.segments_container.create():
raise ResponseError(cls.conn.response)
seg1 = cls.segments_container.file(Utils.create_name())
seg1.write('1' * 1024 * 1024)
seg2 = cls.segments_container.file(Utils.create_name())
seg2.write('2' * 1024 * 1024)
cls.manifest_data = [{'size_bytes': 1024 * 1024,
'etag': seg1.md5,
'path': '/%s/%s' % (cls.segments_container.name,
seg1.name)},
{'size_bytes': 1024 * 1024,
'etag': seg2.md5,
'path': '/%s/%s' % (cls.segments_container.name,
seg2.name)}]
cls.manifest = cls.manifest_container.file(Utils.create_name())
cls.manifest.write(
json.dumps(cls.manifest_data),
parms={'multipart-manifest': 'put'})
class TestSloTempurl(Base):
env = TestSloTempurlEnv
set_up = False
def setUp(self):
super(TestSloTempurl, self).setUp()
if self.env.enabled is False:
raise SkipTest("TempURL and SLO not both enabled")
elif self.env.enabled is not True:
# just some sanity checking
raise Exception(
"Expected enabled to be True/False, got %r" %
(self.env.enabled,))
def tempurl_sig(self, method, expires, path, key):
return hmac.new(
key,
'%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
hashlib.sha1).hexdigest()
def test_GET(self):
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(self.env.manifest.path),
self.env.tempurl_key)
parms = {'temp_url_sig': sig, 'temp_url_expires': str(expires)}
contents = self.env.manifest.read(
parms=parms,
cfg={'no_auth_token': True})
self.assertEqual(len(contents), 2 * 1024 * 1024)
# GET tempurls also allow HEAD requests
self.assert_(self.env.manifest.info(
parms=parms, cfg={'no_auth_token': True}))
class TestSloTempurlUTF8(Base2, TestSloTempurl):
set_up = False
class TestServiceToken(unittest.TestCase):
def setUp(self):
if tf.skip_service_tokens:
raise SkipTest
self.SET_TO_USERS_TOKEN = 1
self.SET_TO_SERVICE_TOKEN = 2
# keystoneauth and tempauth differ in allowing PUT account
# Even if keystoneauth allows it, the proxy-server uses
# allow_account_management to decide if accounts can be created
self.put_account_expect = is_client_error
if tf.swift_test_auth_version != '1':
if cluster_info.get('swift').get('allow_account_management'):
self.put_account_expect = is_success
def _scenario_generator(self):
paths = ((None, None), ('c', None), ('c', 'o'))
for path in paths:
for method in ('PUT', 'POST', 'HEAD', 'GET', 'OPTIONS'):
yield method, path[0], path[1]
for path in reversed(paths):
yield 'DELETE', path[0], path[1]
def _assert_is_authed_response(self, method, container, object, resp):
resp.read()
expect = is_success
if method == 'DELETE' and not container:
expect = is_client_error
if method == 'PUT' and not container:
expect = self.put_account_expect
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
% (resp.status, method, container, object))
def _assert_not_authed_response(self, method, container, object, resp):
resp.read()
expect = is_client_error
if method == 'OPTIONS':
expect = is_success
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
% (resp.status, method, container, object))
def prepare_request(self, method, use_service_account=False,
container=None, obj=None, body=None, headers=None,
x_auth_token=None,
x_service_token=None, dbg=False):
"""
Setup for making the request
When retry() calls the do_request() function, it calls it the
test user's token, the parsed path, a connection and (optionally)
a token from the test service user. We save options here so that
do_request() can make the appropriate request.
:param method: The operation (e.g'. 'HEAD')
:param use_service_account: Optional. Set True to change the path to
be the service account
:param container: Optional. Adds a container name to the path
:param obj: Optional. Adds an object name to the path
:param body: Optional. Adds a body (string) in the request
:param headers: Optional. Adds additional headers.
:param x_auth_token: Optional. Default is SET_TO_USERS_TOKEN. One of:
SET_TO_USERS_TOKEN Put the test user's token in
X-Auth-Token
SET_TO_SERVICE_TOKEN Put the service token in X-Auth-Token
:param x_service_token: Optional. Default is to not set X-Service-Token
to any value. If specified, is one of following:
SET_TO_USERS_TOKEN Put the test user's token in
X-Service-Token
SET_TO_SERVICE_TOKEN Put the service token in
X-Service-Token
:param dbg: Optional. Set true to check request arguments
"""
self.method = method
self.use_service_account = use_service_account
self.container = container
self.obj = obj
self.body = body
self.headers = headers
if x_auth_token:
self.x_auth_token = x_auth_token
else:
self.x_auth_token = self.SET_TO_USERS_TOKEN
self.x_service_token = x_service_token
self.dbg = dbg
def do_request(self, url, token, parsed, conn, service_token=''):
if self.use_service_account:
path = self._service_account(parsed.path)
else:
path = parsed.path
if self.container:
path += '/%s' % self.container
if self.obj:
path += '/%s' % self.obj
headers = {}
if self.body:
headers.update({'Content-Length': len(self.body)})
if self.headers:
headers.update(self.headers)
if self.x_auth_token == self.SET_TO_USERS_TOKEN:
headers.update({'X-Auth-Token': token})
elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN:
headers.update({'X-Auth-Token': service_token})
if self.x_service_token == self.SET_TO_USERS_TOKEN:
headers.update({'X-Service-Token': token})
elif self.x_service_token == self.SET_TO_SERVICE_TOKEN:
headers.update({'X-Service-Token': service_token})
if self.dbg:
print('DEBUG: conn.request: method:%s path:%s'
' body:%s headers:%s' % (self.method, path, self.body,
headers))
conn.request(self.method, path, self.body, headers=headers)
return check_response(conn)
def _service_account(self, path):
parts = path.split('/', 3)
account = parts[2]
try:
project_id = account[account.index('_') + 1:]
except ValueError:
project_id = account
parts[2] = '%s%s' % (tf.swift_test_service_prefix, project_id)
return '/'.join(parts)
def test_user_access_own_auth_account(self):
# This covers ground tested elsewhere (tests a user doing HEAD
# on own account). However, if this fails, none of the remaining
# tests will work
self.prepare_request('HEAD')
resp = retry(self.do_request)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
def test_user_cannot_access_service_account(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj)
resp = retry(self.do_request)
self._assert_not_authed_response(method, container, obj, resp)
def test_service_user_denied_with_x_auth_token(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj,
x_auth_token=self.SET_TO_SERVICE_TOKEN)
resp = retry(self.do_request, service_user=5)
self._assert_not_authed_response(method, container, obj, resp)
def test_service_user_denied_with_x_service_token(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj,
x_auth_token=self.SET_TO_SERVICE_TOKEN,
x_service_token=self.SET_TO_SERVICE_TOKEN)
resp = retry(self.do_request, service_user=5)
self._assert_not_authed_response(method, container, obj, resp)
def test_user_plus_service_can_access_service_account(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj,
x_auth_token=self.SET_TO_USERS_TOKEN,
x_service_token=self.SET_TO_SERVICE_TOKEN)
resp = retry(self.do_request, service_user=5)
self._assert_is_authed_response(method, container, obj, resp)
if __name__ == '__main__':
unittest.main()
| 39.238692
| 79
| 0.57702
|
a7313f2731f83bb28c7efde4ca98825e8c573959
| 1,387
|
py
|
Python
|
src/app/parser_xml.py
|
ralphribeiro/facilita-DOU
|
e695ac0f58369d61fad2723bd5e52ecd80d0b33f
|
[
"MIT"
] | null | null | null |
src/app/parser_xml.py
|
ralphribeiro/facilita-DOU
|
e695ac0f58369d61fad2723bd5e52ecd80d0b33f
|
[
"MIT"
] | null | null | null |
src/app/parser_xml.py
|
ralphribeiro/facilita-DOU
|
e695ac0f58369d61fad2723bd5e52ecd80d0b33f
|
[
"MIT"
] | null | null | null |
import re
from dataclasses import dataclass
import logging
from os import listdir
from os.path import join as osjoin
from xml.etree.ElementTree import parse
@dataclass(frozen=True)
class ItemXml:
url: str
identifica: str
data: str
texto: str
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
def parse_xml(xml_file_path: str, pattern: tuple):
logging.info('Procurando em %s', xml_file_path.split('/')[-1])
doc = parse(xml_file_path)
root = doc.getroot()
child = root.find('article')
url = child.attrib['pdfPage']
child2 = child.find('body')
child3 = child2.find('Identifica')
identifica = child3.text
if not identifica or not re.findall(
pattern[0], identifica, flags=re.IGNORECASE
):
return None
child3 = child2.find('Data')
data = child3.text
child3 = child2.find('Texto')
texto = child3.text
texto = remove_tags(texto)
item = ItemXml(url, identifica, data, texto)
r = re.findall(pattern[1], item.texto, flags=re.IGNORECASE)
return item if len(r) > 0 else None
def get_items(path: str, pattern: tuple):
items = set()
xmls = listdir(path)
for xml in xmls:
if xml.endswith('.xml'):
item = parse_xml(osjoin(path, xml), pattern)
if item:
items.add(item)
return items
| 22.015873
| 66
| 0.638068
|
b4d44a37bd7e6e0e023ef3d099dd3166aeb48d70
| 5,973
|
py
|
Python
|
qmla/exploration_strategies/nv_centre_spin_characterisation/experiment_vary_num_qubits.py
|
Evan1415/QMLA
|
4521f7c08456a4494aed7c1b78d8ded5ea40f3d8
|
[
"MIT"
] | null | null | null |
qmla/exploration_strategies/nv_centre_spin_characterisation/experiment_vary_num_qubits.py
|
Evan1415/QMLA
|
4521f7c08456a4494aed7c1b78d8ded5ea40f3d8
|
[
"MIT"
] | null | null | null |
qmla/exploration_strategies/nv_centre_spin_characterisation/experiment_vary_num_qubits.py
|
Evan1415/QMLA
|
4521f7c08456a4494aed7c1b78d8ded5ea40f3d8
|
[
"MIT"
] | null | null | null |
import random
import sys
import os
import itertools
import pickle
from qmla.exploration_strategies.nv_centre_spin_characterisation import nv_centre_full_access
import qmla.shared_functionality.qinfer_model_interface
import qmla.shared_functionality.probe_set_generation
import qmla.shared_functionality.experiment_design_heuristics
import qmla.shared_functionality.expectation_value_functions
import qmla.shared_functionality.latex_model_names
from qmla import construct_models
class ExperimentNVCentreNQubits(
nv_centre_full_access.FullAccessNVCentre
):
def __init__(
self,
exploration_rules,
**kwargs
):
super().__init__(
exploration_rules=exploration_rules,
**kwargs
)
self.true_model = qmla.utilities.n_qubit_nv_gali_model(n_qubits=2, coupling_terms=['z'])
self.max_num_qubits = 4
self.true_model = qmla.construct_models.alph(self.true_model)
self.initial_models = [
qmla.utilities.n_qubit_nv_gali_model(n, coupling_terms=['z'])
for n in range(2, 1+self.max_num_qubits)
]
self.qhl_models = self.initial_models
# probes
self.system_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.plus_plus_with_phase_difference
self.simulator_probes_generation_subroutine = self.system_probes_generation_subroutine
self.shared_probes = False
self.max_num_probe_qubits = self.max_num_qubits
# experiment design and running
self.expectation_value_subroutine = qmla.shared_functionality.expectation_value_functions.n_qubit_hahn_evolution_double_time_reverse
# self.experimental_dataset = 'NVB_rescale_dataset.p'
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.MultiParticleGuessHeuristic
self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.MixedMultiParticleLinspaceHeuristic
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.VolumeAdaptiveParticleGuessHeuristic
self.qinfer_model_subroutine = qmla.shared_functionality.qinfer_model_interface.QInferNVCentreExperiment
self.latex_string_map_subroutine = qmla.shared_functionality.latex_model_names.pauli_set_latex_name
self.max_time_to_consider = 4.24
# Tree
self.max_num_parameter_estimate = 9
self.max_spawn_depth = 8
self.tree_completed_initially = True
# parameter learning
self.gaussian_prior_means_and_widths = {
}
self.max_num_models_by_shape = {
1: 0,
'other': 1
}
# logistics
self.timing_insurance_factor = 0.75
self.num_processes_to_parallelise_over = 6
def get_true_parameters(
self,
):
self.fixed_true_terms = True
self.true_hamiltonian = None
self.true_params_dict = {}
self.true_params_list = []
def get_measurements_by_time(
self
):
data_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data/NVB_rescale_dataset.p'
)
)
self.log_print(
[
"Getting experimental data from {}".format(data_path)
]
)
self.measurements = pickle.load(
open(
data_path,
'rb'
)
)
return self.measurements
def DEPRECATED_latex_name(
self,
name,
**kwargs
):
# print("[latex name fnc] name:", name)
core_operators = list(sorted(qmla.construct_models.core_operator_dict.keys()))
num_sites = qmla.construct_models.get_num_qubits(name)
p_str = 'P' * num_sites
p_str = '+'
separate_terms = name.split(p_str)
site_connections = {}
# for c in list(itertools.combinations(list(range(num_sites + 1)), 2)):
# site_connections[c] = []
term_type_markers = ['pauliSet', 'transverse']
transverse_axis = None
for term in separate_terms:
components = term.split('_')
if 'pauliSet' in components:
components.remove('pauliSet')
for l in components:
if l[0] == 'd':
dim = int(l.replace('d', ''))
elif l[0] in core_operators:
operators = l.split('J')
else:
sites = l.split('J')
sites = tuple([int(a) for a in sites])
# assumes like-like pauli terms like xx, yy, zz
op = operators[0]
try:
site_connections[sites].append(op)
except:
site_connections[sites] = [op]
elif 'transverse' in components:
components.remove('transverse')
for l in components:
if l[0] == 'd':
transverse_dim = int(l.replace('d', ''))
elif l in core_operators:
transverse_axis = l
ordered_connections = list(sorted(site_connections.keys()))
latex_term = ""
for c in ordered_connections:
if len(site_connections[c]) > 0:
this_term = r"\sigma_{"
this_term += str(c)
this_term += "}"
this_term += "^{"
for t in site_connections[c]:
this_term += "{}".format(t)
this_term += "}"
latex_term += this_term
if transverse_axis is not None:
latex_term += 'T^{}_{}'.format(transverse_axis, transverse_dim)
latex_term = "${}$".format(latex_term)
return latex_term
| 35.553571
| 140
| 0.609911
|
4cfe8138884e6ed0f94dce19081520eec05adccd
| 1,853
|
py
|
Python
|
apps/log_extract/migrations/0016_tasks_ex_data.py
|
qqqqqie/bk-log
|
1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1
|
[
"MIT"
] | 75
|
2021-07-14T09:32:36.000Z
|
2022-03-31T15:26:53.000Z
|
apps/log_extract/migrations/0016_tasks_ex_data.py
|
qqqqqie/bk-log
|
1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1
|
[
"MIT"
] | 561
|
2021-07-14T07:45:47.000Z
|
2022-03-31T11:41:28.000Z
|
apps/log_extract/migrations/0016_tasks_ex_data.py
|
qqqqqie/bk-log
|
1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1
|
[
"MIT"
] | 41
|
2021-07-14T07:39:50.000Z
|
2022-03-25T09:22:18.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Generated by Django 1.11.23 on 2020-09-22 03:11
from __future__ import unicode_literals
import apps.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("log_extract", "0015_migrate_remark_to_process_info"),
]
operations = [
migrations.AddField(
model_name="tasks",
name="ex_data",
field=apps.models.JsonField(blank=True, null=True, verbose_name="额外数据"),
),
]
| 46.325
| 111
| 0.727469
|
7eb2ee109a14e58ff7bb57a9264da21ef01071ae
| 14,879
|
py
|
Python
|
sdk/keyvault/azure-keyvault-secrets/tests/test_secrets_async.py
|
lmcarreiro/azure-sdk-for-python
|
0bde943383725320eaaa1408fa6264fb0cd0febf
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-secrets/tests/test_secrets_async.py
|
lmcarreiro/azure-sdk-for-python
|
0bde943383725320eaaa1408fa6264fb0cd0febf
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-secrets/tests/test_secrets_async.py
|
lmcarreiro/azure-sdk-for-python
|
0bde943383725320eaaa1408fa6264fb0cd0febf
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import hashlib
import os
import logging
import json
from azure.core.exceptions import ResourceNotFoundError
from devtools_testutils import ResourceGroupPreparer
from secrets_async_preparer import AsyncVaultClientPreparer
from secrets_async_test_case import AsyncKeyVaultTestCase
from dateutil import parser as date_parse
# used for logging tests
class MockHandler(logging.Handler):
def __init__(self):
super(MockHandler, self).__init__()
self.messages = []
def emit(self, record):
self.messages.append(record)
class KeyVaultSecretTest(AsyncKeyVaultTestCase):
# incorporate md5 hashing of run identifier into resource group name for uniqueness
name_prefix = "kv-test-" + hashlib.md5(os.environ['RUN_IDENTIFIER'].encode()).hexdigest()[-3:]
def _assert_secret_attributes_equal(self, s1, s2):
self.assertEqual(s1.name, s2.name)
self.assertEqual(s1.vault_url, s2.vault_url)
self.assertEqual(s1.content_type, s2.content_type)
self.assertEqual(s1.enabled, s2.enabled)
self.assertEqual(s1.not_before, s2.not_before)
self.assertEqual(s1.expires_on, s2.expires_on)
self.assertEqual(s1.created_on, s2.created_on)
self.assertEqual(s1.updated_on, s2.updated_on)
self.assertEqual(s1.recovery_level, s2.recovery_level)
self.assertEqual(s1.key_id, s2.key_id)
def _validate_secret_bundle(self, secret_attributes, vault, secret_name, secret_value):
prefix = "/".join(s.strip("/") for s in [vault, "secrets", secret_name])
id = secret_attributes.id
self.assertTrue(id.index(prefix) == 0, "Id should start with '{}', but value is '{}'".format(prefix, id))
self.assertEqual(
secret_attributes.value,
secret_value,
"value should be '{}', but is '{}'".format(secret_value, secret_attributes.value),
)
self.assertTrue(
secret_attributes.properties.created_on and secret_attributes.properties.updated_on,
"Missing required date attributes.",
)
async def _validate_secret_list(self, secrets, expected):
async for secret in secrets:
# TODO: what if secrets contains unexpected entries?
if secret.name in expected.keys():
expected_secret = expected[secret.name]
self._assert_secret_attributes_equal(expected_secret.properties, secret)
del expected[secret.name]
self.assertEqual(len(expected), 0)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer()
@AsyncKeyVaultTestCase.await_prepared_test
async def test_secret_crud_operations(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
secret_name = "crud-secret"
secret_value = self.get_resource_name("crud_secret_value")
# create secret
created = await client.set_secret(secret_name, secret_value)
self._validate_secret_bundle(created, vault_client.vault_url, secret_name, secret_value)
# set secret with optional arguments
not_before = date_parse.parse("2015-02-02T08:00:00.000Z")
enabled = True
tags = {"foo": "created tag"}
created = await client.set_secret(secret_name, secret_value, enabled=enabled, not_before=not_before, tags=tags)
self._validate_secret_bundle(created, vault_client.vault_url, secret_name, secret_value)
self.assertEqual(enabled, created.properties.enabled)
self.assertEqual(not_before, created.properties.not_before)
self.assertEqual(tags, created.properties.tags)
# get secret without version
retrieved_secret = await client.get_secret(created.name, "")
self.assertEqual(created.id, retrieved_secret.id)
self._assert_secret_attributes_equal(created.properties, retrieved_secret.properties)
# get secret with version
secret_with_version = await client.get_secret(created.name, created.properties.version)
self.assertEqual(created.id, retrieved_secret.id)
self._assert_secret_attributes_equal(created.properties, secret_with_version.properties)
async def _update_secret(secret):
content_type = "text/plain"
expires = date_parse.parse("2050-02-02T08:00:00.000Z")
tags = {"foo": "updated tag"}
enabled = not secret.properties.enabled
updated_secret = await client.update_secret_properties(
secret.name,
version=secret.properties.version,
content_type=content_type,
expires_on=expires,
tags=tags,
enabled=enabled,
)
self.assertEqual(tags, updated_secret.tags)
self.assertEqual(secret.id, updated_secret.id)
self.assertEqual(content_type, updated_secret.content_type)
self.assertEqual(expires, updated_secret.expires_on)
self.assertNotEqual(secret.properties.enabled, updated_secret.enabled)
self.assertNotEqual(secret.properties.updated_on, updated_secret.updated_on)
return updated_secret
# update secret with version
if self.is_live:
# wait a second to ensure the secret's update time won't equal its creation time
await asyncio.sleep(1)
updated = await _update_secret(created)
# delete secret
if self.is_playback:
polling_interval = 0
else:
polling_interval = None
deleted = await client.delete_secret(updated.name, _polling_interval=polling_interval)
self.assertIsNotNone(deleted)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer()
@AsyncKeyVaultTestCase.await_prepared_test
async def test_secret_list(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
max_secrets = self.list_test_size
expected = {}
# create many secrets
for x in range(0, max_secrets):
secret_name = "sec{}".format(x)
secret_value = self.get_resource_name("secVal{}".format(x))
secret = None
while not secret:
secret = await client.set_secret(secret_name, secret_value)
expected[secret_name] = secret
# list secrets
result = client.list_properties_of_secrets(max_page_size=max_secrets)
await self._validate_secret_list(result, expected)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer(enable_soft_delete=True)
@AsyncKeyVaultTestCase.await_prepared_test
async def test_list_deleted_secrets(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
expected = {}
# create secrets
for i in range(self.list_test_size):
secret_name = "secret{}".format(i)
secret_value = "value{}".format(i)
expected[secret_name] = await client.set_secret(secret_name, secret_value)
if self.is_playback:
polling_interval = 0
else:
polling_interval = None
# delete them
for secret_name in expected.keys():
await client.delete_secret(secret_name, _polling_interval=polling_interval)
# validate list deleted secrets with attributes
async for deleted_secret in client.list_deleted_secrets():
self.assertIsNotNone(deleted_secret.deleted_date)
self.assertIsNotNone(deleted_secret.scheduled_purge_date)
self.assertIsNotNone(deleted_secret.recovery_id)
expected_secret = expected[deleted_secret.name]
self._assert_secret_attributes_equal(expected_secret.properties, deleted_secret.properties)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer()
@AsyncKeyVaultTestCase.await_prepared_test
async def test_list_versions(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
secret_name = self.get_resource_name("sec")
secret_value = self.get_resource_name("secVal")
max_secrets = self.list_test_size
expected = {}
# create many secret versions
for _ in range(0, max_secrets):
secret = None
while not secret:
secret = await client.set_secret(secret_name, secret_value)
expected[secret.id] = secret
# list secret versions
result = client.list_properties_of_secret_versions(secret_name)
# validate list secret versions with attributes
async for secret in result:
if secret.id in expected.keys():
expected_secret = expected[secret.id]
del expected[secret.id]
self._assert_secret_attributes_equal(expected_secret.properties, secret)
self.assertEqual(len(expected), 0)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer()
@AsyncKeyVaultTestCase.await_prepared_test
async def test_backup_restore(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
secret_name = self.get_resource_name("secbak")
secret_value = self.get_resource_name("secVal")
# create secret
created_bundle = await client.set_secret(secret_name, secret_value)
# backup secret
secret_backup = await client.backup_secret(created_bundle.name)
self.assertIsNotNone(secret_backup, "secret_backup")
# delete secret
if self.is_playback:
polling_interval = 0
else:
polling_interval = None
await client.delete_secret(created_bundle.name, _polling_interval=polling_interval)
# restore secret
restored = await client.restore_secret_backup(secret_backup)
self.assertEqual(created_bundle.id, restored.id)
self._assert_secret_attributes_equal(created_bundle.properties, restored)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer(enable_soft_delete=True)
@AsyncKeyVaultTestCase.await_prepared_test
async def test_recover(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
secrets = {}
# create secrets to recover
for i in range(self.list_test_size):
secret_name = "secret{}".format(i)
secret_value = "value{}".format(i)
secrets[secret_name] = await client.set_secret(secret_name, secret_value)
# delete all secrets
if self.is_playback:
polling_interval = 0
else:
polling_interval = None
for secret_name in secrets.keys():
await client.delete_secret(secret_name, _polling_interval=polling_interval)
# validate all our deleted secrets are returned by list_deleted_secrets
async for deleted_secret in client.list_deleted_secrets():
assert deleted_secret.name in secrets
# recover select secrets
for secret_name in secrets.keys():
await client.recover_deleted_secret(secret_name, _polling_interval=polling_interval)
# validate the recovered secrets exist
await self._poll_until_no_exception(
client.get_secret, *secrets.keys(), expected_exception=ResourceNotFoundError
)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer(enable_soft_delete=True)
@AsyncKeyVaultTestCase.await_prepared_test
async def test_purge(self, vault_client, **kwargs):
self.assertIsNotNone(vault_client)
client = vault_client.secrets
secrets = {}
# create secrets to purge
for i in range(self.list_test_size):
secret_name = "secret{}".format(i)
secret_value = "value{}".format(i)
secrets[secret_name] = await client.set_secret(secret_name, secret_value)
# delete all secrets
if self.is_playback:
polling_interval = 0
else:
polling_interval = None
for secret_name in secrets.keys():
await client.delete_secret(secret_name, _polling_interval=polling_interval)
# validate all our deleted secrets are returned by list_deleted_secrets
async for deleted_secret in client.list_deleted_secrets():
assert deleted_secret.name in secrets
# purge secrets
for secret_name in secrets.keys():
await client.purge_deleted_secret(secret_name)
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer(client_kwargs={'logging_enable': True})
@AsyncKeyVaultTestCase.await_prepared_test
async def test_logging_enabled(self, vault_client, **kwargs):
client = vault_client.secrets
mock_handler = MockHandler()
logger = logging.getLogger('azure')
logger.addHandler(mock_handler)
logger.setLevel(logging.DEBUG)
await client.set_secret("secret-name", "secret-value")
for message in mock_handler.messages:
if message.levelname == 'DEBUG' and message.funcName == 'on_request':
try:
body = json.loads(message.message)
if body['value'] == 'secret-value':
return
except (ValueError, KeyError):
# this means the message is not JSON or has no kty property
pass
assert False, "Expected request body wasn't logged"
@ResourceGroupPreparer(name_prefix=name_prefix)
@AsyncVaultClientPreparer()
@AsyncKeyVaultTestCase.await_prepared_test
async def test_logging_disabled(self, vault_client, **kwargs):
client = vault_client.secrets
mock_handler = MockHandler()
logger = logging.getLogger('azure')
logger.addHandler(mock_handler)
logger.setLevel(logging.DEBUG)
await client.set_secret("secret-name", "secret-value")
for message in mock_handler.messages:
if message.levelname == 'DEBUG' and message.funcName == 'on_request':
try:
body = json.loads(message.message)
assert body["value"] != "secret-value", "Client request body was logged"
except (ValueError, KeyError):
# this means the message is not JSON or has no kty property
pass
| 41.10221
| 119
| 0.671618
|
2b580f9ab3954b6f127ad6309f3e44c9bea888cc
| 2,662
|
py
|
Python
|
gelweb/gel2mdt/templatetags/gel2mdt_extras.py
|
moka-guys/GeL2MDT
|
09bf25b8452e2e887dbf74b1cd4771d234c6166c
|
[
"MIT"
] | null | null | null |
gelweb/gel2mdt/templatetags/gel2mdt_extras.py
|
moka-guys/GeL2MDT
|
09bf25b8452e2e887dbf74b1cd4771d234c6166c
|
[
"MIT"
] | 1
|
2020-02-06T13:17:40.000Z
|
2020-02-06T13:17:40.000Z
|
gelweb/gel2mdt/templatetags/gel2mdt_extras.py
|
byronmews/GeL2MDT
|
1449831f0d7c570b71e7f46fb4dd1fcb805b0325
|
[
"MIT"
] | null | null | null |
"""Copyright (c) 2018 Great Ormond Street Hospital for Children NHS Foundation
Trust & Birmingham Women's and Children's NHS Foundation Trust
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import subprocess
from django import template
from gel2mdt.config import load_config
register = template.Library()
config = load_config.LoadConfig().load()
def is_git_repo():
if subprocess.call(["git", "branch"], stderr=subprocess.STDOUT, stdout=open(os.devnull, 'w')) != 0:
return False
else:
return True
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def sort_by(queryset, order):
return queryset.order_by(order)
@register.simple_tag
def version_number():
if not is_git_repo():
return config["VERSION_NUMBER"]
version_fetch_cmd = "git tag | sort -V | tail -1"
version_fetch_process = subprocess.Popen(
version_fetch_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
version_fetch_out, version_fetch_err = version_fetch_process.communicate()
version = str(version_fetch_out, "utf-8")
return version
@register.simple_tag
def build():
if not is_git_repo():
return ''
build_fetch_cmd = "git log -1 --stat"
build_fetch_process = subprocess.Popen(
build_fetch_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
build_fetch_out, build_fetch_err = build_fetch_process.communicate()
build_fetch = str(build_fetch_out, "utf-8").split(' ')
build_hash = build_fetch[1][:6]
return 'build ' + build_hash
| 34.128205
| 103
| 0.729902
|
0a30b48c6476b3d83b51a91797cceb992c158408
| 1,039
|
py
|
Python
|
silver/models/__init__.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 222
|
2017-01-15T10:30:57.000Z
|
2022-03-08T20:34:46.000Z
|
silver/models/__init__.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 141
|
2017-01-11T10:56:49.000Z
|
2021-10-12T11:51:00.000Z
|
silver/models/__init__.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 76
|
2017-01-10T13:50:27.000Z
|
2022-03-25T21:37:00.000Z
|
# Copyright (c) 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from silver.models.billing_entities import Customer, Provider
from silver.models.documents import Proforma, Invoice, BillingDocumentBase, DocumentEntry, PDF
from silver.models.plans import Plan, MeteredFeature
from silver.models.product_codes import ProductCode
from silver.models.subscriptions import Subscription, MeteredFeatureUnitsLog, BillingLog
from silver.models.payment_methods import PaymentMethod
from silver.models.transactions import Transaction
| 47.227273
| 94
| 0.810395
|
4c197ac3ddbc04ebcd93b28da7c0e704e7c5ce6d
| 2,197
|
py
|
Python
|
astroquery/alma/tests/test_alma_utils.py
|
jfoster17/astroquery
|
807eade21e63f773cf7df8833799afa5eac8b79d
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T00:07:01.000Z
|
2021-03-20T00:07:01.000Z
|
astroquery/alma/tests/test_alma_utils.py
|
jfoster17/astroquery
|
807eade21e63f773cf7df8833799afa5eac8b79d
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/alma/tests/test_alma_utils.py
|
jfoster17/astroquery
|
807eade21e63f773cf7df8833799afa5eac8b79d
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T00:07:05.000Z
|
2021-03-20T00:07:05.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import wcs
from astropy import units as u
from pyregion.parser_helper import Shape
from astropy.tests.helper import pytest, remote_data
from .. import utils
def test_pyregion_subset():
header = dict(naxis=2, crpix1=15, crpix2=15, crval1=0.1, crval2=0.1,
cdelt1=-1./3600, cdelt2=1./3600., ctype1='GLON-CAR',
ctype2='GLAT-CAR')
mywcs = wcs.WCS(header)
# circle with radius 10" at 0.1, 0.1
shape = Shape('circle', (0.1, 0.1, 10./3600.))
shape.coord_format = 'galactic'
shape.coord_list = (0.1, 0.1, 10./3600.)
shape.attr = ([], {})
data = np.ones([40,40])
(xlo,xhi,ylo,yhi), d = utils.pyregion_subset(shape, data, mywcs)
assert d.sum() == 314 # approximately pi
np.testing.assert_almost_equal(xlo, data.shape[0]/2-mywcs.wcs.crpix[0]-1)
np.testing.assert_almost_equal(xhi, data.shape[0]-mywcs.wcs.crpix[0]-1)
np.testing.assert_almost_equal(ylo, data.shape[1]/2-mywcs.wcs.crpix[1]-1)
np.testing.assert_almost_equal(yhi, data.shape[1]-mywcs.wcs.crpix[1]-1)
frq_sup_str = ('[86.26..88.14GHz,976.56kHz, XX YY] U '
'[88.15..90.03GHz,976.56kHz, XX YY] U '
'[98.19..100.07GHz,976.56kHz, XX YY] U '
'[100.15..102.03GHz,976.56kHz, XX YY]')
beamsizes = u.Quantity([ 72.09546309, 70.56599373, 63.41898902, 62.18937958], unit=u.arcsec)
franges = u.Quantity( [[ 86.26, 88.14], [ 88.15, 90.03], [ 98.19, 100.07],
[ 100.15, 102.03]], unit=u.GHz)
def test_parse_frequency_support(frq_sup_str=frq_sup_str, result=franges):
assert np.all(utils.parse_frequency_support(frq_sup_str) == result)
def approximate_primary_beam_sizes(frq_sup_str=frq_sup_str, beamsizes=beamsizes):
assert np.all(utils.approximate_primary_beam_sizes(frq_sup_str) == beamsizes)
@remote_data
def test_make_finder_chart():
result = utils.make_finder_chart('Eta Carinae', 3*u.arcmin, 'Eta Carinae')
images, catalog, hit_mask_public, hit_mask_private = result
assert len(catalog) >= 7
assert len(images) >= 1
assert hit_mask_public[3].mean() >= 49
| 43.078431
| 92
| 0.667274
|
5976963293517ef6fc5e5c15f50c33dc4cc3f183
| 25,111
|
py
|
Python
|
RecoLuminosity/LumiDB/python/lumidbDDL.py
|
DBAnthony/cmssw
|
6406d33feab56ab2af79b00b533f62b5368ac33e
|
[
"Apache-2.0"
] | 1
|
2020-08-12T08:37:04.000Z
|
2020-08-12T08:37:04.000Z
|
RecoLuminosity/LumiDB/python/lumidbDDL.py
|
DBAnthony/cmssw
|
6406d33feab56ab2af79b00b533f62b5368ac33e
|
[
"Apache-2.0"
] | null | null | null |
RecoLuminosity/LumiDB/python/lumidbDDL.py
|
DBAnthony/cmssw
|
6406d33feab56ab2af79b00b533f62b5368ac33e
|
[
"Apache-2.0"
] | 1
|
2019-03-19T13:44:54.000Z
|
2019-03-19T13:44:54.000Z
|
from __future__ import print_function
import coral
from RecoLuminosity.LumiDB import nameDealer,dbUtil
#=======================================================
#
# CREATE
#
#=======================================================
def createTables(schema):
'''
create new tables if not exist
revisions,revisions_id,luminorms,luminorms_entries,luminorms_entries_id,fillscheme
'''
try:
created=[]
db=dbUtil.dbUtil(schema)
if not schema.existsTable(nameDealer.fillschemeTableName()):
print('creating fillscheme table')
fillschemeTab=coral.TableDescription()
fillschemeTab.setName( nameDealer.fillschemeTableName() )
fillschemeTab.insertColumn( 'FILLSCHEME_ID','unsigned long long' )
fillschemeTab.insertColumn( 'FILLSCHEMEPATTERN','string',128,False )
fillschemeTab.insertColumn( 'CORRECTIONFACTOR','float' )
fillschemeTab.setPrimaryKey( 'FILLSCHEME_ID' )
db.createTable(fillschemeTab,withIdTable=True)
created.append( nameDealer.fillschemeTableName() )
if not schema.existsTable(nameDealer.revisionTableName()):
print('creating revisions table')
revisionsTab=coral.TableDescription()
revisionsTab.setName( nameDealer.revisionTableName() )
revisionsTab.insertColumn( 'REVISION_ID','unsigned long long')
revisionsTab.insertColumn( 'BRANCH_ID','unsigned long long')
revisionsTab.insertColumn( 'NAME', 'string',56,False)
revisionsTab.insertColumn( 'BRANCH_NAME', 'string',56,False)
revisionsTab.insertColumn( 'COMMENT', 'string',1024,False)
revisionsTab.insertColumn( 'CTIME', 'time stamp',6)
revisionsTab.setPrimaryKey( 'REVISION_ID' )
revisionsTab.setUniqueConstraint(('NAME'))
db.createTable(revisionsTab,withIdTable=True)
created.append(nameDealer.revisionTableName())
if not schema.existsTable(nameDealer.luminormTableName()):
print('creating luminorms table')
luminormsTab=coral.TableDescription()
luminormsTab.setName( nameDealer.luminormTableName() )
luminormsTab.insertColumn( 'DATA_ID','unsigned long long')
luminormsTab.insertColumn( 'ENTRY_ID','unsigned long long')
luminormsTab.insertColumn( 'ENTRY_NAME','string',56,False)
luminormsTab.insertColumn( 'AMODETAG', 'string',28,False)
luminormsTab.insertColumn( 'NORM_1', 'float')
luminormsTab.insertColumn( 'EGEV_1', 'unsigned int')
luminormsTab.insertColumn( 'NORM_2', 'float')
luminormsTab.insertColumn( 'EGEV_2', 'unsigned int')
luminormsTab.setPrimaryKey( 'DATA_ID' )
db.createTable(luminormsTab,withIdTable=True,withEntryTables=True,withRevMapTable=True)
created.append(nameDealer.luminormTableName())
if not schema.existsTable(nameDealer.lumidataTableName()):
print('creating lumidata table')
lumidataTab=coral.TableDescription()
lumidataTab.setName( nameDealer.lumidataTableName() )
lumidataTab.insertColumn( 'DATA_ID','unsigned long long')
lumidataTab.insertColumn( 'ENTRY_ID','unsigned long long')
lumidataTab.insertColumn( 'ENTRY_NAME','string',56,False)
lumidataTab.insertColumn( 'SOURCE', 'string',128,False)
lumidataTab.insertColumn( 'RUNNUM', 'unsigned int')
lumidataTab.insertColumn( 'NOMINALEGEV', 'float')
lumidataTab.setPrimaryKey( 'DATA_ID' )
db.createTable(lumidataTab,withIdTable=True,withEntryTables=True,withRevMapTable=True)
created.append(nameDealer.lumidataTableName())
if not schema.existsTable(nameDealer.lumisummaryv2TableName() ):
print('creating lumisummaryv2 table')
summary=coral.TableDescription()
summary.setName( nameDealer.lumisummaryv2TableName() )
summary.insertColumn('DATA_ID','unsigned long long')
summary.insertColumn('RUNNUM','unsigned int')
summary.insertColumn('LUMILSNUM','unsigned int')
summary.insertColumn('CMSLSNUM','unsigned int')
summary.insertColumn('INSTLUMI','float')
summary.insertColumn('INSTLUMIERROR','float')
summary.insertColumn('INSTLUMIQUALITY','short')
summary.insertColumn('BEAMSTATUS','string',28,False)
summary.insertColumn('BEAMENERGY','float')
summary.insertColumn('NUMORBIT','unsigned int')
summary.insertColumn('STARTORBIT','unsigned int')
summary.insertColumn('CMSBXINDEXBLOB','blob')
summary.insertColumn('BEAMINTENSITYBLOB_1','blob')
summary.insertColumn('BEAMINTENSITYBLOB_2','blob')
summary.insertColumn('BXLUMIVALUE_OCC1','blob')
summary.insertColumn('BXLUMIVALUE_OCC2','blob')
summary.insertColumn('BXLUMIVALUE_ET','blob')
summary.insertColumn('BXLUMIERROR_OCC1','blob')
summary.insertColumn('BXLUMIERROR_OCC2','blob')
summary.insertColumn('BXLUMIERROR_ET','blob')
summary.insertColumn('BXLUMIQUALITY_OCC1','blob')
summary.insertColumn('BXLUMIQUALITY_OCC2','blob')
summary.insertColumn('BXLUMIQUALITY_ET','blob')
summary.setPrimaryKey(('DATA_ID','LUMILSNUM'))
db.createTable(summary,withIdTable=False)
created.append(nameDealer.lumisummaryv2TableName())
#
# This table exists in the old schema
#
if not schema.existsTable(nameDealer.cmsrunsummaryTableName()):
print('creating cmsrunsummary table')
cmsrunsummary=coral.TableDescription()
cmsrunsummary.setName( nameDealer.cmsrunsummaryTableName() )
cmsrunsummary.insertColumn('RUNNUM','unsigned int')
cmsrunsummary.insertColumn('HLTKEY','string',128,False)
cmsrunsummary.insertColumn('L1KEY','string',128,False)
cmsrunsummary.insertColumn('FILLNUM','unsigned int')
cmsrunsummary.insertColumn('SEQUENCE','string',56,False)
cmsrunsummary.insertColumn('STARTTIME','time stamp',6)
cmsrunsummary.insertColumn('STOPTIME','time stamp',6)
cmsrunsummary.insertColumn('EGEV','unsigned int')
cmsrunsummary.insertColumn('AMODETAG','string',28,False)
cmsrunsummary.insertColumn('FILLSCHEME','string',128,False)
cmsrunsummary.insertColumn('NCOLLIDINGBUNCHES','usigned int')
cmsrunsummary.setPrimaryKey('RUNNUM')
db.createTable(cmsrunsummary,withIdTable=False)
created.append(nameDealer.cmsrunsummaryTableName())
#
# This table exists in the old schema
#
if not schema.existsTable(nameDealer.trghltMapTableName()):
print('creating trghltmap table')
trghlt=coral.TableDescription()
trghlt.setName( nameDealer.trghltMapTableName() )
trghlt.insertColumn( 'HLTKEY','string',128,False )
trghlt.insertColumn( 'HLTPATHNAME','string',256,False )
trghlt.insertColumn( 'L1SEED','string' ,1024,False)
trghlt.setNotNullConstraint('HLTKEY',True)
trghlt.setNotNullConstraint('HLTPATHNAME',True)
trghlt.setNotNullConstraint('L1SEED',True)
db.createTable(trghlt,withIdTable=False)
created.append(nameDealer.trghltMapTableName())
if not schema.existsTable(nameDealer.trgdataTableName()):
print('creating trgdata table')
trgdataTab=coral.TableDescription()
trgdataTab.setName( nameDealer.trgdataTableName() )
trgdataTab.insertColumn( 'DATA_ID','unsigned long long')
trgdataTab.insertColumn( 'ENTRY_ID','unsigned long long')
trgdataTab.insertColumn( 'ENTRY_NAME','string',56,False)
trgdataTab.insertColumn( 'SOURCE', 'string',128,False)
trgdataTab.insertColumn( 'RUNNUM', 'unsigned int')
trgdataTab.insertColumn( 'BITZERONAME', 'string',56,False)
trgdataTab.insertColumn( 'BITNAMECLOB', 'string',6000)
trgdataTab.setPrimaryKey( 'DATA_ID' )
db.createTable(trgdataTab,withIdTable=True,withEntryTables=True,withRevMapTable=True)
created.append(nameDealer.trgdataTableName())
if not schema.existsTable(nameDealer.lstrgTableName()):
print('creating lstrg table')
lstrgTab=coral.TableDescription()
lstrgTab.setName( nameDealer.lstrgTableName() )
lstrgTab.insertColumn( 'DATA_ID','unsigned long long')
lstrgTab.insertColumn( 'RUNNUM', 'unsigned int')
lstrgTab.insertColumn( 'CMSLSNUM', 'unsigned int')
lstrgTab.insertColumn( 'DEADTIMECOUNT', 'unsigned long long')
lstrgTab.insertColumn( 'BITZEROCOUNT', 'unsigned int')
lstrgTab.insertColumn( 'BITZEROPRESCALE', 'unsigned int')
lstrgTab.insertColumn( 'DEADFRAC', 'float')
lstrgTab.insertColumn( 'PRESCALEBLOB', 'blob')
lstrgTab.insertColumn( 'TRGCOUNTBLOB', 'blob')
lstrgTab.setPrimaryKey( ('DATA_ID','CMSLSNUM') )
db.createTable(lstrgTab,withIdTable=False)
created.append( nameDealer.lstrgTableName() )
if not schema.existsTable(nameDealer.hltdataTableName()):
print('creating hltdata table')
hltdataTab=coral.TableDescription()
hltdataTab.setName( nameDealer.hltdataTableName() )
hltdataTab.insertColumn( 'DATA_ID','unsigned long long')
hltdataTab.insertColumn( 'ENTRY_ID','unsigned long long')
hltdataTab.insertColumn( 'ENTRY_NAME','string',56,False)
hltdataTab.insertColumn( 'RUNNUM', 'unsigned int')
hltdataTab.insertColumn( 'SOURCE', 'string',128,False)
hltdataTab.insertColumn( 'NPATH', 'unsigned int')
hltdataTab.insertColumn( 'PATHNAMECLOB', 'string',6000)
hltdataTab.setPrimaryKey( 'DATA_ID' )
db.createTable(hltdataTab,withIdTable=True,withEntryTables=True,withRevMapTable=True)
created.append(nameDealer.hltTableName())
if not schema.existsTable(nameDealer.lshltTableName()):
print('creating lshlt table')
lshltTab=coral.TableDescription()
lshltTab.setName( nameDealer.lshltTableName() )
lshltTab.insertColumn( 'DATA_ID','unsigned long long')
lshltTab.insertColumn( 'RUNNUM', 'unsigned int')
lshltTab.insertColumn( 'CMSLSNUM', 'unsigned int')
lshltTab.insertColumn( 'PRESCALEBLOB', 'blob')
lshltTab.insertColumn( 'HLTCOUNTBLOB', 'blob')
lshltTab.insertColumn( 'HLTACCEPTBLOB', 'blob')
db.createTable(lshltTab,withIdTable=False)
lshltTab.setPrimaryKey( ('DATA_ID','CMSLSNUM') )
created.append(nameDealer.lshltTableName())
if not schema.existsTable(nameDealer.lumivalidationTableName()):
print('creating lumivalidation table')
lumivalidation=coral.TableDescription()
lumivalidation.setName( nameDealer.lumivalidationTableName() )
lumivalidation.insertColumn( 'RUNNUM','unsigned int' )
lumivalidation.insertColumn( 'CMSLSNUM','unsigned int' )
lumivalidation.insertColumn( 'FLAG','string',28,False )
lumivalidation.insertColumn( 'COMMENT','string',1024,False )
lumivalidation.setPrimaryKey( ('RUNNUM','CMSLSNUM') )
lumivalidation.setNotNullConstraint('FLAG',True)
db.createTable(lumivalidation,withIdTable=False)
created.append(nameDealer.lumivalidationTableName())
return created
except :
raise
#=======================================================
#
# DROP
#
#=======================================================
def dropTables(schema,tablelist):
try:
db=dbUtil.dbUtil(schema)
for tablename in tablelist:
if tablename in [nameDealer.luminormTableName(),nameDealer.lumidataTableName(),nameDealer.trgdataTableName(),nameDealer.hltdataTableName()]:
db.dropTable( nameDealer.idTableName(tablename) )
db.dropTable( nameDealer.entryTableName(tablename) )
db.dropTable( nameDealer.revmapTableName(tablename) )
if tablename in [nameDealer.trgTableName(),nameDealer.lumisummaryTableName(),nameDealer.lumisummaryv2TableName(),nameDealer.lumidetailTableName(),nameDealer.hltTableName()]:
db.dropTable( nameDealer.idTableName(tablename) )
db.dropTable( tablename )
except :
raise
def createOldSchema(schema):
'''
create tables of lumidb1 if not exist
'''
try:
created=[]
db=dbUtil.dbUtil(schema)
if not schema.existsTable(nameDealer.lumivalidationTableName()):
lumivalidation=coral.TableDescription()
lumivalidation.setName( nameDealer.lumivalidationTableName() )
lumivalidation.insertColumn( 'RUNNUM','unsigned int' )
lumivalidation.insertColumn( 'CMSLSNUM','unsigned int' )
lumivalidation.insertColumn( 'FLAG','string',28,False )
lumivalidation.insertColumn( 'COMMENT','string',1024,False )
lumivalidation.setPrimaryKey(('RUNNUM','CMSLSNUM'))
lumivalidation.setNotNullConstraint('FLAG',True)
db.createTable(lumivalidation,withIdTable=False)
created.append(nameDealer.lumivalidationTableName())
if not schema.existsTable(nameDealer.cmsrunsummaryTableName()):
cmsrunsummary=coral.TableDescription()
cmsrunsummary.setName( nameDealer.cmsrunsummaryTableName() )
cmsrunsummary.insertColumn('RUNNUM','unsigned int')
cmsrunsummary.insertColumn('HLTKEY','string',128,False)
cmsrunsummary.insertColumn('FILLNUM','unsigned int')
cmsrunsummary.insertColumn('SEQUENCE','string',56,False)
cmsrunsummary.insertColumn('STARTTIME','time stamp',6)
cmsrunsummary.insertColumn('STOPTIME','time stamp',6)
cmsrunsummary.setPrimaryKey('RUNNUM')
cmsrunsummary.setNotNullConstraint('HLTKEY',True)
cmsrunsummary.setNotNullConstraint('FILLNUM',True)
cmsrunsummary.setNotNullConstraint('SEQUENCE',True)
cmsrunsummary.createIndex('cmsrunsummary_fillnum',('FILLNUM'))
cmsrunsummary.createIndex('cmsrunsummary_startime',('STARTTIME'))
db.createTable(cmsrunsummary,withIdTable=False)
created.append(nameDealer.cmsrunsummaryTableName())
if not schema.existsTable(nameDealer.lumisummaryTableName()):
summary=coral.TableDescription()
summary.setName( nameDealer.lumisummaryTableName() )
summary.insertColumn('LUMISUMMARY_ID','unsigned long long')
summary.insertColumn('RUNNUM','unsigned int')
summary.insertColumn('CMSLSNUM','unsigned int')
summary.insertColumn('LUMILSNUM','unsigned int')
summary.insertColumn('LUMIVERSION','string',28,False)
summary.insertColumn('DTNORM','float')
summary.insertColumn('LHCNORM','float')
summary.insertColumn('CMSALIVE','short')
summary.insertColumn('INSTLUMI','float')
summary.insertColumn('INSTLUMIERROR','float')
summary.insertColumn('INSTLUMIQUALITY','short')
summary.insertColumn('BEAMSTATUS','string',28,False)
summary.insertColumn('BEAMENERGY','float')
summary.insertColumn('NUMORBIT','unsigned int')
summary.insertColumn('STARTORBIT','unsigned int')
summary.insertColumn('LUMISECTIONQUALITY','short')
summary.insertColumn('CMSBXINDEXBLOB','blob')
summary.insertColumn('BEAMINTENSITYBLOB_1','blob')
summary.insertColumn('BEAMINTENSITYBLOB_2','blob')
summary.setPrimaryKey('LUMISUMMARY_ID')
summary.setNotNullConstraint('RUNNUM',True)
summary.setNotNullConstraint('CMSLSNUM',True)
summary.setNotNullConstraint('LUMILSNUM',True)
summary.setNotNullConstraint('CMSALIVE',True)
summary.setNotNullConstraint('LUMIVERSION',True)
summary.setNotNullConstraint('DTNORM',True)
summary.setNotNullConstraint('LHCNORM',True)
summary.setNotNullConstraint('INSTLUMI',True)
summary.setNotNullConstraint('INSTLUMIERROR',True)
summary.setNotNullConstraint('INSTLUMIQUALITY',True)
summary.setNotNullConstraint('STARTORBIT',True)
summary.setNotNullConstraint('NUMORBIT',True)
summary.setNotNullConstraint('LUMISECTIONQUALITY',True)
summary.setNotNullConstraint('BEAMENERGY',True)
summary.setNotNullConstraint('BEAMSTATUS',True)
summary.setUniqueConstraint(('RUNNUM','LUMIVERSION','LUMILSNUM'))
summary.createIndex('lumisummary_runnum',('RUNNUM'))
db.createTable(summary,withIdTable=True)
created.append(nameDealer.lumisummaryTableName())
if not schema.existsTable(nameDealer.lumidetailTableName()):
detail=coral.TableDescription()
detail.setName( nameDealer.lumidetailTableName() )
detail.insertColumn('LUMIDETAIL_ID','unsigned long long')
detail.insertColumn('LUMISUMMARY_ID','unsigned long long')
detail.insertColumn('BXLUMIVALUE','blob')
detail.insertColumn('BXLUMIERROR','blob')
detail.insertColumn('BXLUMIQUALITY','blob')
detail.insertColumn('ALGONAME','string',28,False)
detail.setPrimaryKey('LUMIDETAIL_ID')
detail.createForeignKey('DETAILSOURCE','LUMISUMMARY_ID',nameDealer.lumisummaryTableName(),'LUMISUMMARY_ID')
detail.setNotNullConstraint('BXLUMIVALUE',True)
detail.setNotNullConstraint('BXLUMIERROR',True)
detail.setNotNullConstraint('BXLUMIQUALITY',True)
detail.setNotNullConstraint('ALGONAME',True)
detail.setUniqueConstraint(('LUMISUMMARY_ID','ALGONAME'))
db.createTable(detail,withIdTable=True)
created.append(nameDealer.lumidetailTableName())
if not schema.existsTable(nameDealer.trgTableName()):
trg=coral.TableDescription()
trg.setName( nameDealer.trgTableName() )
trg.insertColumn('TRG_ID','unsigned long long')
trg.insertColumn('RUNNUM','unsigned int')
trg.insertColumn('CMSLSNUM','unsigned int')
trg.insertColumn('BITNUM','unsigned int')
trg.insertColumn('BITNAME','string',56,False)
trg.insertColumn('TRGCOUNT','unsigned int')
trg.insertColumn('DEADTIME','unsigned long long')
trg.insertColumn('PRESCALE','unsigned int')
trg.setNotNullConstraint('RUNNUM',True)
trg.setNotNullConstraint('CMSLSNUM',True)
trg.setNotNullConstraint('BITNUM',True)
trg.setNotNullConstraint('BITNAME',True)
trg.setNotNullConstraint('TRGCOUNT',True)
trg.setNotNullConstraint('DEADTIME',True)
trg.setNotNullConstraint('PRESCALE',True)
trg.setPrimaryKey('TRG_ID')
trg.createIndex('trg_runnum',('RUNNUM'))
db.createTable(trg,withIdTable=True)
created.append( nameDealer.trgTableName() )
if not schema.existsTable( nameDealer.hltTableName() ):
hlt=coral.TableDescription()
hlt.setName( nameDealer.hltTableName() )
hlt.insertColumn( 'HLT_ID','unsigned long long')
hlt.insertColumn( 'RUNNUM','unsigned int')
hlt.insertColumn( 'CMSLSNUM','unsigned int')
hlt.insertColumn( 'PATHNAME','string',256,False)
hlt.insertColumn( 'INPUTCOUNT','unsigned int')
hlt.insertColumn( 'ACCEPTCOUNT','unsigned int')
hlt.insertColumn( 'PRESCALE','unsigned int')
hlt.setPrimaryKey( 'HLT_ID' )
hlt.setNotNullConstraint('RUNNUM',True)
hlt.setNotNullConstraint('CMSLSNUM',True)
hlt.setNotNullConstraint('PATHNAME',True)
hlt.setNotNullConstraint('INPUTCOUNT',True)
hlt.setNotNullConstraint('ACCEPTCOUNT',True)
hlt.setNotNullConstraint('PRESCALE',True)
hlt.createIndex('hlt_runnum',('RUNNUM'))
db.createTable(hlt,withIdTable=True)
created.append( nameDealer.hltTableName() )
if not schema.existsTable( nameDealer.trghltMapTableName() ):
trghlt=coral.TableDescription()
trghlt.setName( nameDealer.trghltMapTableName() )
trghlt.insertColumn( 'HLTKEY','string',128,False )
trghlt.insertColumn( 'HLTPATHNAME','string',256,False )
trghlt.insertColumn( 'L1SEED','string',1024,false )
trghlt.setNotNullConstraint('HLTKEY',True)
trghlt.setNotNullConstraint('HLTPATHNAME',True)
trghlt.setNotNullConstraint('L1SEED',True)
db.createTable(trghlt,withIdTable=False)
created.append( nameDealer.trghltMapTableName() )
return created
except:
raise
#=======================================================
#
# MODIFY
#
#=======================================================
def oldToNew(schema):
'''
modify old tables:cmsrunsummary
alter table cmsrunsummary add column(l1key string,egev unsigned int,amodetag string)
'''
try:
tableHandle=schema.tableHandle(nameDealer.cmsrunsummaryTableName())
tableHandle.schemaEditor().insertColumn('L1KEY','string',128,False)
tableHandle.schemaEditor().insertColumn('EGEV','unsigned int')
tableHandle.schemaEditor().insertColumn('AMODETAG','string',28,False)
createTables(schema)
except:
raise
def newToOld(schema):
try:
dropTables(schema,['REVISIONS','LUMINORMS','LUMIDATA','LUMISUMMARYV2','TRGDATA','LSTRG','HLTDATA','LSHLT'])
tableHandle=schema.tableHandle(nameDealer.cmsrunsummaryTableName())
ncol=tableHandle.description().numberOfColumns()
todrop=[]
for i in range(ncol):
colname=tableHandle.description().columnDescription(i).name()
if colname in ['L1KEY','EGEV','AMODETAG']:
todrop.append(colname)
for colname in todrop:
tableHandle.schemaEditor().dropColumn(colname)
except :
raise
#=======================================================
#
# TODO
#
#=======================================================
def createIndices(schema):
'''
'''
pass
#cmsrunsummary.createIndex('cmsrunsummary_fillnum',('FILLNUM'))
#cmsrunsummary.createIndex('cmsrunsummary_startime',('STARTTIME'))
def dropIndices(schema):
'''
'''
pass
def describeIndices(schema):
'''
'''
pass
def createFKConstraints(schema):
'''
'''
pass
def dropFKConstrains(schema):
'''
'''
pass
def createNULLConstraints(schema):
'''
'''
#cmsrunsummary.setNotNullConstraint('HLTKEY',True)
#cmsrunsummary.setNotNullConstraint('FILLNUM',True)
#cmsrunsummary.setNotNullConstraint('SEQUENCE',True)
pass
def dropNULLConstraints(schema):
'''
'''
pass
def createUniqueConstraints(schema):
'''
'''
try:
revtable=schema.tableHandle(nameDealer.revisionTableName())
revtable.schemaEditor().setUniqueConstraint('NAME','revisions_name_uc')
except:
raise
def dropUNIQUEConstraints(schema):
'''
'''
pass
def describe(schema):
'''
'''
pass
if __name__ == "__main__":
import sessionManager
myconstr='oracle://devdb10/cms_xiezhen_dev'
authpath='/afs/cern.ch/user/x/xiezhen'
#myconstr='sqlite_file:test.db'
svc=sessionManager.sessionManager(myconstr,authpath=authpath,debugON=False)
session=svc.openSession(isReadOnly=False,cpp2sqltype=[('unsigned int','NUMBER(10)'),('unsigned long long','NUMBER(20)')])
schema=session.nominalSchema()
session.transaction().start(False)
tables=createTables(schema)
if len(tables)==0:
dropTables(schema,nameDealer.schemaV2Tables())
else:
dropTables(schema,tables)
dropTables(schema,nameDealer.commonTables())
#createUniqueConstraints(schema)
#session.transaction().commit()
#print 'droped new '
session.transaction().start(False)
tables=createOldSchema(schema)
oldToNew(schema)
print('created old ',tables)
session.transaction().commit()
del session
| 48.854086
| 185
| 0.638445
|
e0b7c6d3468597491587b190221cbf6ee808efd5
| 132
|
py
|
Python
|
example/example/backends.py
|
gbozee/django-simple-social-login
|
65c3a6578b334d3b83807d480148ea44739f7c08
|
[
"MIT"
] | 3
|
2017-10-31T05:34:52.000Z
|
2018-10-17T06:18:25.000Z
|
example/example/backends.py
|
gbozee/django-simple-social-login
|
65c3a6578b334d3b83807d480148ea44739f7c08
|
[
"MIT"
] | 4
|
2020-02-11T21:48:09.000Z
|
2021-06-10T17:32:45.000Z
|
example/example/backends.py
|
gbozee/django-simple-social-login
|
65c3a6578b334d3b83807d480148ea44739f7c08
|
[
"MIT"
] | 3
|
2018-05-07T09:49:11.000Z
|
2019-12-14T04:51:07.000Z
|
class MyBackend(object):
def authenticate(self, request, email=None):
# Check the token and return a user.
pass
| 33
| 48
| 0.651515
|
1afda91c668470e066cc27c338b3395e5db4c66e
| 504
|
py
|
Python
|
flask_core/middleware/FilterMiddleware.py
|
secedu/flask-core
|
3d8def8280613825f070d150372f9513f7727b69
|
[
"Apache-2.0"
] | 3
|
2019-03-07T05:33:21.000Z
|
2019-04-22T22:29:34.000Z
|
flask_core/middleware/FilterMiddleware.py
|
secedu/flask-core
|
3d8def8280613825f070d150372f9513f7727b69
|
[
"Apache-2.0"
] | 23
|
2019-03-10T15:03:14.000Z
|
2019-05-16T11:15:22.000Z
|
flask_core/middleware/FilterMiddleware.py
|
secedu/flask-core
|
3d8def8280613825f070d150372f9513f7727b69
|
[
"Apache-2.0"
] | 3
|
2019-03-07T05:33:05.000Z
|
2021-02-17T16:39:18.000Z
|
#!/usr/bin/env python3
class FilterMiddleware(object):
def __init__(self, wsgi_app):
self.wsgi_app = wsgi_app
self.blacklist = ["sqlmap", "dirbuster"]
def __call__(self, environ, start_response):
try:
if any((x for x in self.blacklist if x in environ["HTTP_USER_AGENT"].lower())):
start_response("503 Internal Server Error", [])
return [b"Something went wrong."]
except KeyError:
pass
return None
| 29.647059
| 91
| 0.593254
|
3b3d348ae8ff778061b1bcb70950fb0365316749
| 8,610
|
py
|
Python
|
rgw/v2/tests/nfs_ganesha/test_on_nfs_io.py
|
viduship/ceph-qe-scripts
|
886619fa6600c24cbf989d65868951b9c3decd72
|
[
"MIT"
] | 6
|
2019-04-12T17:45:44.000Z
|
2021-09-14T19:59:05.000Z
|
rgw/v2/tests/nfs_ganesha/test_on_nfs_io.py
|
viduship/ceph-qe-scripts
|
886619fa6600c24cbf989d65868951b9c3decd72
|
[
"MIT"
] | 111
|
2019-12-10T10:41:08.000Z
|
2022-03-31T11:42:30.000Z
|
rgw/v2/tests/nfs_ganesha/test_on_nfs_io.py
|
viduship/ceph-qe-scripts
|
886619fa6600c24cbf989d65868951b9c3decd72
|
[
"MIT"
] | 23
|
2019-05-30T19:48:25.000Z
|
2022-03-24T17:07:19.000Z
|
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
import argparse
import logging
import shutil
import time
import traceback
import v2.utils.utils as utils
import yaml
from v2.lib.exceptions import TestExecError
from v2.lib.nfs_ganesha.nfslib import DoIO
from v2.lib.nfs_ganesha.write_io_info import BasicIOInfoStructure, IOInfoInitialize
# from initialize import PrepNFSGanesha
from v2.tests.nfs_ganesha.initialize import PrepNFSGanesha
from v2.tests.nfs_ganesha.verify_on_s3 import ReadIOInfoOnS3
from v2.utils.log import configure_logging
from v2.utils.test_desc import AddTestInfo
SLEEP_TIME = 60
log = logging.getLogger()
def test_exec(rgw_user_info_file, config):
test_info = AddTestInfo("NFS Basic Ops")
test_info.started_info()
log.info("config:\n%s" % config["config"])
log.info("rgw_user_info_file: %s" % rgw_user_info_file)
io_config = config["config"]
io_op_config = io_config["io_op_config"]
log.info("io_op_config: %s" % io_op_config)
log.info("initiating nfs ganesha")
io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
io_info_initialize.initialize(basic_io_structure.initial())
try:
nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file)
mounted = nfs_ganesha.initialize()
if mounted is False:
raise TestExecError("mount failed")
log.info("authenticating rgw user")
mnt_point = nfs_ganesha.rgw_user_info["nfs_mnt_point"]
if (
nfs_ganesha.rgw_user_info["nfs_version"] == 4
and nfs_ganesha.rgw_user_info["Pseudo"] is not None
):
log.info("nfs version: 4")
log.info("adding Pseudo path to writable mount point")
mnt_point = os.path.join(mnt_point, nfs_ganesha.rgw_user_info["Pseudo"])
log.info("writable mount point with Pseudo: %s" % mnt_point)
if io_op_config.get("create", None) is True:
do_io = DoIO(nfs_ganesha.rgw_user_info, mnt_point)
# base dir creation
for bc in range(io_config["basedir_count"]):
basedir_name_to_create = utils.gen_bucket_name_from_userid(
nfs_ganesha.rgw_user_info["user_id"], rand_no=bc
)
log.info("creating basedir with name: %s" % basedir_name_to_create)
write = do_io.write("basedir", basedir_name_to_create)
if write is False:
raise TestExecError("write failed on mount point")
if io_config["subdir_count"] != 0:
for sd in range(io_config["subdir_count"]):
subdir_name_to_create = utils.gen_bucket_name_from_userid(
basedir_name_to_create + ".subdir", rand_no=sd
)
log.info(
"creating subdir with name: %s" % subdir_name_to_create
)
write = do_io.write(
"subdir",
os.path.join(basedir_name_to_create, subdir_name_to_create),
)
if write is False:
raise TestExecError("write failed on mount point")
if io_config["file_count"] != 0:
for fc in range(io_config["file_count"]):
file_name_to_create = utils.gen_bucket_name_from_userid(
basedir_name_to_create + ".file", rand_no=fc
)
log.info("creating file with name: %s" % file_name_to_create)
file_size = utils.get_file_size(
io_config["objects_size_range"]["min"],
io_config["objects_size_range"]["max"],
)
write = do_io.write(
"file",
os.path.join(basedir_name_to_create, file_name_to_create),
file_size,
)
if write is False:
raise TestExecError("write failed on mount point")
log.info("verification of IO will start after %s seconds" % SLEEP_TIME)
time.sleep(SLEEP_TIME)
log.info("starting IO verification on S3")
read_io_info_on_s3 = ReadIOInfoOnS3()
read_io_info_on_s3.yaml_fname = "io_info.yaml"
read_io_info_on_s3.initialize_verify_io()
bucket_verify = read_io_info_on_s3.verify_if_bucket_created()
if bucket_verify is False:
raise TestExecError("Bucket verification Failed")
log.info("Bucket verified, data intact")
read_io_info_on_s3.verify_if_objects_created()
log.info("objects verified, data intact")
log.info("verification completed, data intact")
if io_op_config.get("delete", None) is True:
log.info("performing delete operation")
# if you delete basedirs, objects and files under them will also be deleted
basedirs_list = read_io_info_on_s3.buckets
list(
[
shutil.rmtree(os.path.abspath(os.path.join(mnt_point, x)))
for x in basedirs_list
]
)
for basedir in basedirs_list:
if os.path.exists(
os.path.abspath(os.path.join(mnt_point, basedir))
):
raise TestExecError("basedir: %s not deleted" % basedir)
log.info("basedirs and subdirs deleted")
if io_op_config.get("move", None) is True:
for each_file in read_io_info_on_s3.objects:
if each_file["type"] == "file":
log.info("performing move operation on %s" % each_file["name"])
current_path = os.path.abspath(
os.path.join(
mnt_point, each_file["bucket"], each_file["name"]
)
)
new_path = os.path.abspath(
os.path.join(
mnt_point,
each_file["bucket"],
each_file["name"] + ".moved",
)
)
moved = utils.exec_shell_cmd(
"sudo mv %s %s" % (current_path, new_path)
)
if moved is False:
raise TestExecError("move failed for :%s" % current_path)
each_file["name"] = os.path.basename(new_path)
log.info("Verification will start after %s seconds" % SLEEP_TIME)
time.sleep(SLEEP_TIME)
log.info("starting verification for moved files")
read_io_info_on_s3.verify_if_objects_created()
log.info("objects verified after move operation, data intact")
test_info.success_status("test success")
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status("test failed")
return 1
except TestExecError as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status("test failed")
return 1
if __name__ == "__main__":
config = {}
test_info = AddTestInfo("nfs ganesha basic IO test and verification on rgw")
parser = argparse.ArgumentParser(description="NFS-Ganesha-RGW Automation")
parser.add_argument("-r", dest="rgw_user_info", help="RGW user info")
parser.add_argument("-c", dest="test_config", help="Test Configuration")
parser.add_argument(
"-log_level",
dest="log_level",
help="Set Log Level [DEBUG, INFO, WARNING, ERROR, CRITICAL]",
default="info",
)
args = parser.parse_args()
rgw_user_info_yaml = args.rgw_user_info
test_config_yaml = args.test_config
log_f_name = os.path.basename(os.path.splitext(test_config_yaml)[0])
configure_logging(f_name=log_f_name, set_level=args.log_level.upper())
with open(test_config_yaml, "r") as f:
doc = yaml.safe_load(f)
test_config = doc
test_exec(rgw_user_info_yaml, test_config)
| 43.484848
| 91
| 0.565273
|
f8098d69fe9e992fc0a4c6c819f048f03d79395d
| 22,663
|
py
|
Python
|
tms_ss/tms_ss_ibs/src/ibs.py
|
robotpilot/ros_tms
|
3d6b6579e89aa9cb216cd3cb6157fabc553c18f1
|
[
"BSD-3-Clause"
] | 54
|
2015-01-06T06:58:28.000Z
|
2021-05-02T07:49:37.000Z
|
tms_ss/tms_ss_ibs/src/ibs.py
|
robotpilot/ros_tms
|
3d6b6579e89aa9cb216cd3cb6157fabc553c18f1
|
[
"BSD-3-Clause"
] | 114
|
2015-01-07T06:42:21.000Z
|
2022-02-12T05:54:04.000Z
|
tms_ss/tms_ss_ibs/src/ibs.py
|
robotpilot/ros_tms
|
3d6b6579e89aa9cb216cd3cb6157fabc553c18f1
|
[
"BSD-3-Clause"
] | 24
|
2015-03-27T08:35:59.000Z
|
2020-06-08T13:05:31.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# ------------------------------------------------------------------------------
# @file : ibs.py
# @brief : Intelligent Board System
# @author : Akio Shigekane, Pyo
# @version: Ver1.1.1 (since 2012.00.00)
# @date : 2015.2.25
# ------------------------------------------------------------------------------
'''
@todo readujust threshold of GetWeightDiff
'''
import serial
import sys
import math
import time
import datetime
import subprocess
import rospy
import tf2_ros
import tf2_geometry_msgs
from tms_msg_db.msg import TmsdbStamped
from tms_msg_db.msg import Tmsdb
from functools import reduce
LC_MAX_SENSOR_NUM = 4
LC_GET_WEIGHT_CNT = 5
# LC_GET_WEIGHT_STABLE = 12
LC_GET_WEIGHT_STABLE = 60
MAX_OBJECT_NUM = 25 # 環境内に存在する全物品数
# 仕様上の固定値
TR3_STX = 0x02
TR3_ETX = 0x03
TR3_CR = 0x0D
TR3_ACK = 0x30
TR3_NACK = 0x31
TR3_UID_SEND = 0x49
TR3_ANT1 = 0x00
TR3_ANT2 = 0x01
TR3_ANT3 = 0x02
TR3_ANT4 = 0x03
TR3_ANT5 = 0x04
TR3_ANT6 = 0x05
TR3_ANT7 = 0x06
TR3_ANT8 = 0x07
TR3_ModeCommand = 0x00
TR3_ModeAutoscan = 0x01
TR3_ModeTriger = 0x02
TR3_ModePolling = 0x03
TR3_ModeEAS = 0x24
TR3_ModeInventory = 0x50
TR3_ModeRDLOOP = 0x58
TR3_UID_SIZE = 8
TR3_TAG_SIZE = 16
# ユーザ設定値
TR3_TAG_MAX = 40 # HardwareMAX: 200
TR3_MAX_COMMAND_SIZE = 30
TR3_USED_ANT_NUM = 2
IC_STAGE_NAME_SIZE = 100
IC_TAG_MAX = 20
IC_STAGES_MAX = 2
IC_OBJECT_IN = 1
IC_OBJECT_OUT = -1
IC_OBJECT_MOVE = 2
IC_OBJECT_STAY = 0
NONE = 0
EXIST = 1
LC_MAX_SENSOR_NUM = 4
D_COUT = sys.stdout.write # デバッグ用アウトプット
rospy.init_node('ibs', anonymous=True)
tfBuffer = tf2_ros.Buffer(rospy.Duration(1200.0))
listener = tf2_ros.TransformListener(tfBuffer)
class CLoadCell(object):
def __init__(self, port):
self.__mPreSensorsWeight = [0] * LC_MAX_SENSOR_NUM
self.__mSensorPosX = [0.0] * LC_MAX_SENSOR_NUM
self.__mSensorPosY = [0.0] * LC_MAX_SENSOR_NUM
self.__mSensorNum = LC_MAX_SENSOR_NUM
self.__ser = serial.Serial(baudrate=115200, timeout=3)
self.__ser.port = port
print "OPENING: LoadCell(port:", port, ")"
self.__OpenPort()
print "OPENED: LoadCell(port:", port, ")"
self.__ClosePort()
print "CLOSED: LoadCell(port:", port, ")"
self.__ResetWeight()
def __OpenPort(self): # 通信関連初期化
D_COUT("LoadCell: opening port...")
self.__ser.open()
D_COUT("\033[1K\r")
def __ClosePort(self):
D_COUT("LoadCell: closing port...")
self.__ser.close()
D_COUT("\033[1K\r")
# 指定されたセンサから重さを取得する
def GetWeight(self, sensor_id, repeat=1):
self.__OpenPort()
buf = []
for _ in xrange(repeat):
self.__ser.flushInput
self.__ser.write(str(sensor_id))
tmp = self.__ser.readline()
# print tmp
buf.append(int(tmp.replace("O", "").replace("K", "").replace('"', "")) * 5)
self.__ClosePort()
return reduce(lambda x, y: x + y, buf) / len(buf)
def __ResetWeight(self, initial=[], num=10):
if initial: # if not empty
self.__mPreSensorsWeight = initial
return
self.__mPreSensorsWeight = [0] * self.__mSensorNum
for i in xrange(num):
for j in xrange(len(self.__mPreSensorsWeight)):
self.__mPreSensorsWeight[j] += self.GetWeight(j)
self.__mPreSensorsWeight = map(lambda x: x / num, self.__mPreSensorsWeight)
def SetSensorPos(self, sensor_num, x_list, y_list):
self.__mSensorNum = sensor_num
self.__mSensorPosX = tuple(x_list)
self.__mSensorPosY = tuple(y_list)
# 重量の増減(物体の増減)があるかをチェック
def GetWeightDiff(self, threshold=20):
# 出力が安定するまで待つ
pre = [0] * self.__mSensorNum
buf = [[0 for i in range(LC_MAX_SENSOR_NUM)] for j in range(LC_GET_WEIGHT_CNT)]
for i in xrange(self.__mSensorNum):
pre[i] = self.GetWeight(i)
cnt = 0 # 繰り返し回数
while cnt < LC_GET_WEIGHT_CNT:
# time,sleep(0.004) #4ms程度は間隔を空ける
time.sleep(0.02)
weight = 0
for i in xrange(self.__mSensorNum):
now = self.GetWeight(i, repeat=1)
weight += math.fabs(now - pre[i])
pre[i] = now
buf[cnt][i] = now
# D_COUT("weight :{0}".format(weight))
# D_COUT("\n")
# D_COUT("\033[1K\r")
if weight < LC_GET_WEIGHT_STABLE:
cnt += 1
else:
cnt = 0
# 出力
pre = [0] * self.__mSensorNum
for i in xrange(LC_GET_WEIGHT_CNT):
for j in xrange(self.__mSensorNum):
pre[j] += buf[i][j]
pre = map(lambda x: x / LC_GET_WEIGHT_CNT, pre)
diffs = map(lambda x, y: x - y, pre, self.__mPreSensorsWeight)
x = y = 0
weight = 0
for i in xrange(self.__mSensorNum):
x += self.__mSensorPosX[i] * math.fabs(diffs[i])
y += self.__mSensorPosY[i] * math.fabs(diffs[i])
weight += diffs[i]
if abs(weight) < threshold:
return 0, 0, 0, diffs
else:
self.__mPreSensorsWeight = pre
x /= math.fabs(weight)
y /= math.fabs(weight)
return weight, x, y, diffs
class CTR3(object):
def __init__(self, port, AnttenaNum):
self.__mActiveAntenna = AnttenaNum
self.__mUIDs = [list() for i in xrange(TR3_USED_ANT_NUM)]
self.__mCommand = [0] * TR3_MAX_COMMAND_SIZE
self.__mCommand[0] = TR3_STX
self.__mCommand[1] = 0x00 # アドレス
self.__ser = serial.Serial(baudrate=38400, timeout=3)
self.__ser.port = port
print "OPENING: TR3(port:", port, ")"
self.__OpenPort()
print "OPENED: TR3(port:", port, ")"
self.__ClosePort()
print "CLOSED: TR3(port:", port, ")"
self.__SetAntenna(self.__mActiveAntenna)
def __OpenPort(self):
D_COUT("TagReader: opening port...")
self.__ser.open()
D_COUT("\033[1K\r")
def __ClosePort(self):
D_COUT("TagReader: closing port...")
self.__ser.close()
D_COUT("\033[1K\r")
# アンテナの指定
def __SetAntenna(self, AN):
self.__mCommand[2] = 0x4E # コマンド
self.__mCommand[3] = 0x02 # データ長
self.__mCommand[4] = 0x9C # コマンド詳細
self.__mCommand[5] = AN # アンテナ指定
self.__OpenPort()
self.AddChecksum()
self.__ser.write("".join(map(chr, self.__mCommand)))
buf = map(ord, self.__ser.read(size=9))
if buf[2] != TR3_ACK:
print "TR3: SendCommandError . SetAntenna"
self.__ser.read(size=100)
return -1
# self.__mActiveAntenna = buf[5] # TODO: get true active antenna number
self.__ClosePort()
return buf[5]
# アンテナの電源ON
# TODO: I dont know this method worked correctly
def __AntennaPowerON(self):
self.__SetAntenna(self.__mActiveAntenna)
self.__mCommand[2] = 0x4E # コマンド
self.__mCommand[3] = 0x02 # データ長
self.__mCommand[4] = 0x9E # コマンド詳細
self.__mCommand[5] = 0x01 # パワーON
self.__OpenPort()
self.AddChecksum()
self.__ser.write("".join(map(chr, self.__mCommand)))
buf = map(ord, self.__ser.read(size=9))
if buf[2] != TR3_ACK:
buf = self.__ser.read(size=100)
print "TR3: SendCommandError . AntennaPowerON"
return False
self.__ClosePort()
return True
# アンテナの電源OFF
def __AntennaPowerOFF(self): # unsigned long num
# TODO: OFFにできないので使用アンテナを切り替える.ONにしたままだとロードセルのADCにノイズが乗る
self.__SetAntenna(self.__mActiveAntenna + 1)
# TODO: 元のプログラムでは下記の通りだったが,動かない.おそらく動作テストもされてない.
# self.__mCommand[2] = 0x4E # コマンド
# self.__mCommand[3] = 0x02 # データ長
# self.__mCommand[4] = 0x9E # コマンド詳細
# self.__mCommand[5] = 0x00 # パワーOFF
# self.__OpenPort()
# self.AddChecksum()
# self.__ser.write("".join(map(chr, self.__mCommand)))
# buf = [chr(0)] * 100
# buf = map(ord, self.__ser.read(size=9))
# if buf[2] != TR3_ACK:
# print "TR3: SendCommandError . AntennaPowerOFF"
# return False
# self.__ClosePort()
# return True
# 各アンテナで計測されているタグIDを全て表示
def PrintTagUIDs(self):
for i in xrange(TR3_USED_ANT_NUM):
print "\n.. ANTENNA ", i + 1, " .."
for num, j in enumerate(self.__mUIDs[i]):
print "{0:>3}.{1}".format(num + 1, j)
# タグの読み取り
def Inventory2(self):
del self.__mUIDs[self.__mActiveAntenna][:]
# アンテナを変更しないと既読込のUIDは返さず,新規UIDのみ返す
self.__mCommand[2] = 0x78 # コマンド
self.__mCommand[3] = 0x03 # データ長
self.__mCommand[4] = 0xF0 # コマンド詳細
self.__mCommand[5] = 0x00 # アンチコリジョン有
self.__mCommand[6] = 0x01 # 出力指定.取得データ数+UIDデータ
self.__OpenPort()
self.AddChecksum()
self.__ser.write("".join(map(chr, self.__mCommand)))
buf = map(ord, self.__ser.read(size=9))
if buf[2] != TR3_ACK:
print "TR3: SendCommandError . Inventory2"
time.sleep(0.1)
self.__ser.read(size=TR3_TAG_SIZE * TR3_TAG_MAX)
return -1
tag_num = buf[5] # 読み込むタグの数
# タグ情報の読込
for i in xrange(tag_num):
hexs = [chr(0)] * 17
buf = map(ord, self.__ser.read(size=TR3_TAG_SIZE))
hexs = "{0:0>2X}{1:0>2X}{2:0>2X}{3:0>2X}{4:0>2X}{5:0>2X}{6:0>2X}{7:0>2X}".format(
buf[12], buf[11], buf[10], buf[9], buf[8], buf[7], buf[6], buf[5])
# print hexs
self.__mUIDs[self.__mActiveAntenna].append(hexs)
self.__ClosePort()
# print self.__mUIDs[self.__mActiveAntenna]
return tag_num
# 通信用サブ関数
def AddChecksum(self):
num = self.__mCommand[3] + 5
self.__mCommand[num - 1] = TR3_ETX
self.__mCommand[num + 1] = TR3_CR
self.__mCommand[num] = 0x00
for i in xrange(num):
self.__mCommand[num] += self.__mCommand[i]
self.__mCommand[num] %= 256
return num + 2
def GetTagDiff(self, diffUID, AN):
diffUID = str()
# print self.__mUIDs, self.__mActiveAntenna
preUIDs = list(self.__mUIDs[self.__mActiveAntenna])
self.__AntennaPowerON()
if self.Inventory2() == -1:
self.__AntennaPowerOFF()
return 0, diffUID
self.__AntennaPowerOFF()
# IDにあってpreIDにない => 追加された物品ID
# type: [str]
increase = list(set(self.__mUIDs[self.__mActiveAntenna]) - set(preUIDs))
# print set(self.__mUIDs[self.__mActiveAntenna]), set(preUIDs)
# preIDにあってIDにない => 取り除かれた物品ID
# type: [str]
decrease = list(set(preUIDs) - set(self.__mUIDs[self.__mActiveAntenna]))
# 増減なし
if (len(increase) == 0) and (len(decrease) == 0):
return 0, diffUID
# 物品追加
if (len(increase) == 1) and (len(decrease) == 0):
diffUID = increase[0]
return 1, diffUID
# 物品除去
if (len(increase) == 0) and (len(decrease) == 1):
diffUID = decrease[0]
return -1, diffUID
# @TODO:maybe, unreachable and wrong branch serquence.
# 複数物品の同時入出時(1個ずつ検出するようにする)
if len(increase) >= 1:
self.__mUIDs[self.__mActiveAntenna].sort()
diffUID = increase[0]
return 1, diffUID
if len(decrease) >= 1:
self.__mUIDs[self.__mActiveAntenna].sort()
diffUID = decrease[0]
return -1, diffUID
return 0, diffUID
class CTagOBJ(object):
def __init__(self):
self.mUID = ""
self.mWeight = 0
self.mDiffs = [0] * LC_MAX_SENSOR_NUM
self.mX = 0.0
self.mY = 0.0
self.mName = ""
self.mComment = ""
self.mUID = ""
class CIntelCab(object):
def __init__(self, lc_port="/dev/ttyACM0", lc_xpos=(0.0, 0.0, 0.0, 0.0), lc_ypos=(0.0, 0.0, 0.0, 0.0),
tr_port="/dev/ttyUSB0", tr_antenna=TR3_ANT1):
self.cLoadCell = CLoadCell(lc_port)
self.cLoadCell.SetSensorPos(len(lc_xpos), lc_xpos, lc_ypos)
self.cTR3 = CTR3(tr_port, tr_antenna)
self.mName = "\0"
self.TagObjList = list()
def PrintObjInfo(self):
print "\n{0::>20}::::::::::".format(self.mName)
for index, cObj in enumerate(self.TagObjList):
print "{0:>3}: UID->".format(index + 1),
print cObj.mUID,
print " Weight={0:>4} X={1:.3f} Y={2:.3f}".format(cObj.mWeight, cObj.mX, cObj.mY),
print "<{0}:{1}>".format(cObj.mName, cObj.mComment)
def UpdateObj(self):
# init static variables
if not hasattr(self, "_CIntelCab__cObjIn"):
self.__cObjIn = CTagOBJ()
if not hasattr(self, "_CIntelCab__cObjOut"):
self.__cObjOut = CTagOBJ()
if not hasattr(self, "_CIntelCab__InOutTag"):
self.__InOutTag = 0
if not hasattr(self, "_CIntelCab__InOutLC"):
self.__InOutLC = 0
cObj = CTagOBJ()
cInOut = CTagOBJ()
value = IC_OBJECT_STAY
# タグの増減チェック
(inout, cObj.mUID) = self.cTR3.GetTagDiff("", 0)
# print "GetTagDiff: ", inout, cObj.mUID
# タグ数増加
if inout > 0:
self.__InOutTag = 1
self.__cObjIn = cObj
# タグ数減少,出庫
elif inout < 0:
for i in xrange(len(self.TagObjList)):
if self.TagObjList[i].mUID == cObj.mUID:
del(self.TagObjList[i])
self.__InOutLC = 0
break
self.__InOutTag = 0
cInOut = cObj
value = IC_OBJECT_OUT
# ロードセルの増減チェック
cObj.mWeight, cObj.mX, cObj.mY, cObj.mDiffs = self.cLoadCell.GetWeightDiff()
print "mWeight:{0} InOutLC:{1}".format(cObj.mWeight, self.__InOutLC),
if (cObj.mWeight > 0) and (self.__InOutTag > 0):
# 入庫
cObj.mUID = self.__cObjIn.mUID
self.TagObjList.append(cObj)
self.__InOutTag = 0
self.__InOutLC = 0
cInOut = cObj
value = IC_OBJECT_IN
elif (cObj.mWeight > 0) and (self.__InOutLC < 0):
# 庫内移動
cnt = TR3_TAG_MAX
for i in xrange(len(self.TagObjList)):
if self.TagObjList[i].mUID == self.__cObjOut.mUID:
cnt = i
break
if cnt != TR3_TAG_MAX:
self.TagObjList[cnt] = cObj
self.TagObjList[cnt].mUID = self.__cObjOut.mUID
self.TagObjList[cnt].mName = self.__cObjOut.mName
self.TagObjList[cnt].mComment = self.__cObjOut.mComment
self.__InOutLC = 0
cInOut = self.TagObjList[cnt]
value = IC_OBJECT_MOVE
else:
# タグ無し物品の入庫
pass
# 持ち上げ
if cObj.mWeight < 0:
comp = 5000
cnt = TR3_TAG_MAX
for i in xrange(len(self.TagObjList)):
sum = 0
for j in xrange(LC_MAX_SENSOR_NUM):
sum += abs(abs(self.TagObjList[i].mDiffs[j]) - abs(cObj.mDiffs[j]))
if sum < comp:
comp = sum
cnt = i
if cnt != TR3_TAG_MAX:
self.__cObjOut = self.TagObjList[cnt]
self.__InOutLC = -1
return value, cInOut
def getWorldFramePos(x, y):
global tfBuffer
offset_x = rospy.get_param('~offset_x')
offset_y = rospy.get_param('~offset_y')
offset_z = rospy.get_param('~offset_z')
frame_id = rospy.get_param('~frame_id')
world = "world_link"
pos = tf2_geometry_msgs.PointStamped()
pos.header.stamp = rospy.Time()
pos.header.frame_id = frame_id
pos.point.x = x + offset_x
pos.point.y = y + offset_y
pos.point.z = offset_z
while not tfBuffer.can_transform(pos.header.frame_id, world, pos.header.stamp):
pass
ret = tfBuffer.transform(pos, world, timeout=rospy.Duration(1.0))
return ret
def main():
print "Hello World"
rfidValue = dict()
rfidValue["E00401004E17F97A"] = {"id": 7001, "name": "chipstar_red"}
rfidValue["E00401004E180E50"] = {"id": 7002, "name": "chipstar_orange"}
rfidValue["E00401004E180E58"] = {"id": 7003, "name": "chipstar_green"}
rfidValue["E00401004E180E60"] = {"id": 7004, "name": "greentea_bottle"}
rfidValue["E00401004E180E68"] = {"id": 7005, "name": "soukentea_bottle"}
rfidValue["E00401004E180EA0"] = {"id": 7006, "name": "cancoffee"}
rfidValue["E00401004E180EA8"] = {"id": 7007, "name": "seasoner_bottle"}
rfidValue["E00401004E181C88"] = {"id": 7008, "name": "dispenser"}
rfidValue["E00401004E181C87"] = {"id": 7009, "name": "soysauce_bottle_black"}
rfidValue["E00401004E181C7F"] = {"id": 7010, "name": "soysauce_bottle_blue"}
rfidValue["E00401004E181C77"] = {"id": 7011, "name": "soysauce_bottle_white"}
rfidValue["E00401004E181C3F"] = {"id": 7012, "name": "pepper_bottle_black"}
rfidValue["E00401004E181C37"] = {"id": 7013, "name": "pepper_bottle_red"}
rfidValue["E00401004E180E47"] = {"id": 7014, "name": "sake_bottle"}
rfidValue["E00401004E180E3F"] = {"id": 7015, "name": "teapot"}
rfidValue["E00401004E180E37"] = {"id": 7016, "name": "chawan"}
rfidValue["E00401004E1805BD"] = {"id": 7017, "name": "teacup1"}
rfidValue["E00401004E180585"] = {"id": 7018, "name": "teacup2"}
rfidValue["E00401004E18057D"] = {"id": 7019, "name": "cup1"}
rfidValue["E00401004E17EF3F"] = {"id": 7020, "name": "cup2"}
rfidValue["E00401004E17EF37"] = {"id": 7021, "name": "mugcup"}
rfidValue["E00401004E17EF2F"] = {"id": 7022, "name": "remote"}
rfidValue["E00401004E17EF27"] = {"id": 7023, "name": "book_red"}
rfidValue["E00401004E17EEEF"] = {"id": 7024, "name": "book_blue"}
rfidValue["E00401004E17EEE7"] = {"id": 7025, "name": "dish"}
# init ROS
# rospy.init_node('ibs', anonymous=True)
db_pub = rospy.Publisher('tms_db_data', TmsdbStamped, queue_size=10)
# rosparamが設定されてるかチェック
if not rospy.has_param('~idSensor'):
print "ros param 'idSensor' isn't exist"
if not rospy.has_param('~idPlace'):
print "ros param 'idPlace' isn't exist"
if not rospy.has_param('~offset_x'):
print "ros param 'offset_x' isn't exist"
if not rospy.has_param('~offset_y'):
print "ros param 'offset_y' isn't exist"
if not rospy.has_param('~offset_z'):
print "ros param 'offset_z' isn't exist"
if not rospy.has_param('~frame_id'):
print "ros param 'frame_id' isn't exist"
if not rospy.has_param('~loadcell_points/x'):
print "ros param 'loadcell_points/x' isn't exist"
if not rospy.has_param('~loadcell_points/y'):
print "ros param 'loadcell_points/y' isn't exist"
# rosparam取得
idSensor = rospy.get_param('~idSensor')
idPlace = rospy.get_param('~idPlace')
PORT_LC0 = rospy.get_param("~PORT_LC0", "/dev/ttyACM0")
PORT_TR = rospy.get_param("~PORT_TR", "/dev/ttyUSB0")
xpos0 = rospy.get_param('~loadcell_points/x', (0.0, 1000.0, 0.0, 1000.0))
ypos0 = rospy.get_param('~loadcell_points/y', (0.0, 0.0, 1000.0, 1000.0))
# 仮想COMポートへのアクセス権取得
cmd_chmod = "sudo -S chmod a+rw " + PORT_LC0
print cmd_chmod + "\n", subprocess.check_output(cmd_chmod.split(" "))
cmd_chmod = "sudo -S chmod a+rw " + PORT_TR
print cmd_chmod + "\n", subprocess.check_output(cmd_chmod.split(" "))
# xpos0 = (16.0, 407.0, 16.0, 407.0)
# ypos0 = (16.0, 16.0, 244.0, 244.0)
cIntelCab = CIntelCab(lc_port=PORT_LC0,
lc_xpos=xpos0,
lc_ypos=ypos0,
tr_port=PORT_TR,
tr_antenna=TR3_ANT1,
)
# 初回時の起動は多少時間がかかるためここで一回実行しておく
cIntelCab.UpdateObj()
# 計測開始
change_flag = False
index = 0
print "\nSTART"
r = rospy.Rate(10)
while not rospy.is_shutdown(): # vector 初期化
r.sleep()
# D_COUT(datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")+"\n")
# cObj: CTagOBJ type
state, cObj = cIntelCab.UpdateObj()
# print "state:", state
if state == IC_OBJECT_STAY:
change_flag = False
elif state == IC_OBJECT_IN:
# Beep(2500,50)
print "\n\n IN : ",
index = int(len(cIntelCab.TagObjList) - 1)
cIntelCab.TagObjList[index].mName = cObj.mName
cIntelCab.TagObjList[index].mComment = cObj.mComment
change_flag = True
elif state == IC_OBJECT_MOVE:
# Beep(2500,50)
print "\n\nMOVE: ",
change_flag = True
elif state == IC_OBJECT_OUT:
# Beep(2500,50); Sleep(50); Beep(2500,50)
print "\n\n OUT: ",
change_flag = True
else:
change_flag = False
if change_flag:
change_flag = False
# 毎回初期化し,庫内にある物品だけ値を更新して送信する
msg = TmsdbStamped()
msg.header.frame_id = "world_link"
msg.header.stamp = rospy.get_rostime() + rospy.Duration(9 * 60 * 60)
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
for tag, v in sorted(rfidValue.items(), key=lambda x: x[1]["id"]):
time.sleep(0.001)
tmp_db = Tmsdb()
tmp_db.time = now
tmp_db.id = v["id"]
tmp_db.name = v["name"]
tmp_db.place = idPlace
tmp_db.sensor = idSensor
tmp_db.tag = tag
exist_rfids = [rfid.mUID for rfid in cIntelCab.TagObjList]
if tag in exist_rfids: # 収納庫内に存在
tmp_db.state = EXIST
world_pos = getWorldFramePos(cObj.mX, cObj.mY)
tmp_db.x = world_pos.point.x
tmp_db.y = world_pos.point.y
tmp_db.z = world_pos.point.z
tmp_db.weight = cObj.mWeight
else: # 収納庫内に存在しない
tmp_db.state = NONE
tmp_db.x = -1.0
tmp_db.y = -1.0
tmp_db.z = -1.0
msg.tmsdb.append(tmp_db)
cIntelCab.PrintObjInfo()
db_pub.publish(msg)
return 0
if __name__ == '__main__':
main()
| 34.812596
| 106
| 0.565989
|
0f137a52d6dc5ea03fb3a06456053365d1e4ed21
| 5,085
|
py
|
Python
|
tests/benchmark/context/test_images.py
|
kambiz-aghaiepour/rally
|
be708bacf0bc898a9538b9b6cb0ba4e1c015c1f2
|
[
"Apache-2.0"
] | 1
|
2020-09-09T19:01:41.000Z
|
2020-09-09T19:01:41.000Z
|
tests/benchmark/context/test_images.py
|
kambiz-aghaiepour/rally
|
be708bacf0bc898a9538b9b6cb0ba4e1c015c1f2
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark/context/test_images.py
|
kambiz-aghaiepour/rally
|
be708bacf0bc898a9538b9b6cb0ba4e1c015c1f2
|
[
"Apache-2.0"
] | 1
|
2020-09-09T19:01:43.000Z
|
2020-09-09T19:01:43.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import jsonschema
import mock
from rally.benchmark.context import images
from rally import exceptions
from tests import fakes
from tests import test
CTX = "rally.benchmark.context"
SCN = "rally.benchmark.scenarios"
class ImageGeneratorTestCase(test.TestCase):
def test_init(self):
context = {}
context["task"] = mock.MagicMock()
context["config"] = {
"images": {
"image_url": "mock_url",
"image_type": "qcow2",
"image_container": "bare",
"images_per_tenant": 4,
}
}
new_context = copy.deepcopy(context)
new_context["images"] = []
images.ImageGenerator(context)
self.assertEqual(new_context, context)
def test_init_validation(self):
context = {}
context["task"] = mock.MagicMock()
context["config"] = {
"images": {
"image_url": "mock_url"
}
}
self.assertRaises(jsonschema.ValidationError,
images.ImageGenerator.validate, context)
@mock.patch("%s.glance.utils.GlanceScenario._create_image" % SCN,
return_value=fakes.FakeImage(id="uuid"))
@mock.patch("%s.images.osclients" % CTX)
def test_setup(self, mock_osclients, mock_image_create):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
image_list = ["uuid"] * 5
image_key = [{'image_id': image_list, 'endpoint': 'endpoint',
'tenant_id': i} for i in range(2)]
user_key = [{'id': i, 'tenant_id': j, 'endpoint': 'endpoint'}
for j in range(2)
for i in range(5)]
real_context = {
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"images": {
"image_url": "mock_url",
"image_type": "qcow2",
"image_container": "bare",
"images_per_tenant": 5,
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"task": mock.MagicMock(),
"users": user_key,
}
new_context = copy.deepcopy(real_context)
new_context["images"] = image_key
images_ctx = images.ImageGenerator(real_context)
images_ctx.setup()
self.assertEqual(new_context, real_context)
@mock.patch("%s.images.osclients" % CTX)
@mock.patch("%s.cleanup.utils.delete_glance_resources" % CTX)
def test_cleanup(self, mock_image_remover, mock_osclients):
image_list = ["uuid"] * 5
image_key = [{'image_id': image_list, 'endpoint': 'endpoint',
'tenant_id': i} for i in range(2)]
user_key = [{'id': i, 'tenant_id': j, 'endpoint': 'endpoint'}
for j in range(2)
for i in range(5)]
context = {
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"images": {
"image_url": "mock_url",
"image_type": "qcow2",
"image_container": "bare",
"images_per_tenant": 5,
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"task": mock.MagicMock(),
"users": user_key,
"images": image_key,
}
images_ctx = images.ImageGenerator(context)
images_ctx.cleanup()
self.assertEqual(2, len(mock_image_remover.mock_calls))
mock_image_remover.side_effect = Exception('failed_deletion')
self.assertRaises(exceptions.ImageCleanUpException, images_ctx.cleanup)
def test_validate_semantic(self):
users = [fakes.FakeClients()]
images.ImageGenerator.validate_semantic(None, None, users, None)
@mock.patch("%s.images.osclients.Clients.glance" % CTX)
def test_validate_semantic_unavailabe(self, mock_glance):
mock_glance.side_effect = Exception("list error")
self.assertRaises(exceptions.InvalidScenarioArgument,
images.ImageGenerator.validate_semantic, None, None,
None, None)
| 33.019481
| 79
| 0.549853
|
3c6c02d4afe0732203d8d74effab019986daf0ea
| 33,486
|
py
|
Python
|
utils/trainer.py
|
guochengqian/KPConv-PyTorch
|
4138c12d276b0d9a765d41b93f2b9fd6e5eb2baa
|
[
"MIT"
] | 2
|
2021-08-04T17:15:04.000Z
|
2021-12-06T14:37:46.000Z
|
utils/trainer.py
|
guochengqian/KPConv-PyTorch-DeepGCN
|
4138c12d276b0d9a765d41b93f2b9fd6e5eb2baa
|
[
"MIT"
] | null | null | null |
utils/trainer.py
|
guochengqian/KPConv-PyTorch-DeepGCN
|
4138c12d276b0d9a765d41b93f2b9fd6e5eb2baa
|
[
"MIT"
] | null | null | null |
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Class handling the training of any model
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import torch
import torch.nn as nn
import numpy as np
import pickle
import os
from os import makedirs, remove
from os.path import exists, join
import time
import sys
# PLY reader
from utils.ply import read_ply, write_ply
# Metrics
from utils.metrics import IoU_from_confusions, fast_confusion
from utils.config import Config
from sklearn.neighbors import KDTree
from models.blocks import KPConv
# ----------------------------------------------------------------------------------------------------------------------
#
# Trainer Class
# \*******************/
#
class ModelTrainer:
# Initialization methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, net, config, chkp_path=None, finetune=False, on_gpu=True):
"""
Initialize training parameters and reload previous model for restore/finetune
:param net: network object
:param config: configuration object
:param chkp_path: path to the checkpoint that needs to be loaded (None for new training)
:param finetune: finetune from checkpoint (True) or restore training from checkpoint (False)
:param on_gpu: Train on GPU or CPU
"""
############
# Parameters
############
# Epoch index
self.epoch = 0
self.step = 0
# Optimizer with specific learning rate for deformable KPConv
deform_params = [v for k, v in net.named_parameters() if 'offset' in k]
other_params = [v for k, v in net.named_parameters() if 'offset' not in k]
deform_lr = config.learning_rate * config.deform_lr_factor
self.optimizer = torch.optim.SGD([{'params': other_params},
{'params': deform_params, 'lr': deform_lr}],
lr=config.learning_rate,
momentum=config.momentum,
weight_decay=config.weight_decay)
# Choose to train on CPU or GPU
if on_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
net.to(self.device)
##########################
# Load previous checkpoint
##########################
if (chkp_path is not None):
if finetune:
checkpoint = torch.load(chkp_path)
net.load_state_dict(checkpoint['model_state_dict'])
net.train()
print("Model restored and ready for finetuning.")
else:
checkpoint = torch.load(chkp_path)
net.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.epoch = checkpoint['epoch']
net.train()
print("Model and training state restored.")
# Path of the result folder
if config.saving:
if config.saving_path is None:
timestamp = time.strftime('%Y%m%d-%H%M%S')
config.saving_path = 'results/Log_{}_{}'.format(config.model, timestamp)
if not exists(config.saving_path):
makedirs(config.saving_path)
print('creating folder at {}'.format(config.saving_path))
config.save()
return
# Training main method
# ------------------------------------------------------------------------------------------------------------------
def train(self, net, training_loader, val_loader, config):
"""
Train the model on a particular dataset.
"""
################
# Initialization
################
if config.saving:
# Training log file
with open(join(config.saving_path, 'training.txt'), "w") as file:
file.write('epochs steps out_loss offset_loss train_accuracy time\n')
# Killing file (simply delete this file when you want to stop the training)
PID_file = join(config.saving_path, 'running_PID.txt')
if not exists(PID_file):
with open(PID_file, "w") as file:
file.write('Launched with PyCharm')
# Checkpoints directory
checkpoint_directory = join(config.saving_path, 'checkpoints')
if not exists(checkpoint_directory):
makedirs(checkpoint_directory)
else:
checkpoint_directory = None
PID_file = None
# Loop variables
t0 = time.time()
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
# Start training loop
for epoch in range(config.max_epoch):
# Remove File for kill signal
if epoch == config.max_epoch - 1 and exists(PID_file):
remove(PID_file)
self.step = 0
for batch in training_loader:
# Check kill signal (running_PID.txt deleted)
if config.saving and not exists(PID_file):
continue
##################
# Processing batch
##################
# New time
t = t[-1:]
t += [time.time()]
if 'cuda' in self.device.type:
batch.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# Forward pass
outputs = net(batch, config)
loss = net.loss(outputs, batch.labels)
acc = net.accuracy(outputs, batch.labels)
t += [time.time()]
# Backward + optimize
loss.backward()
if config.grad_clip_norm > 0:
#torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm)
torch.nn.utils.clip_grad_value_(net.parameters(), config.grad_clip_norm)
self.optimizer.step()
torch.cuda.synchronize(self.device)
t += [time.time()]
# Average timing
if self.step < 2:
mean_dt = np.array(t[1:]) - np.array(t[:-1])
else:
mean_dt = 0.9 * mean_dt + 0.1 * (np.array(t[1:]) - np.array(t[:-1]))
# Console display (only one per second)
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})'
print(message.format(self.epoch, self.step,
loss.item(),
100*acc,
1000 * mean_dt[0],
1000 * mean_dt[1],
1000 * mean_dt[2]))
# Log file
if config.saving:
with open(join(config.saving_path, 'training.txt'), "a") as file:
message = '{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n'
file.write(message.format(self.epoch,
self.step,
net.output_loss,
net.reg_loss,
acc,
t[-1] - t0))
self.step += 1
##############
# End of epoch
##############
# Check kill signal (running_PID.txt deleted)
if config.saving and not exists(PID_file):
break
# Update learning rate
if self.epoch in config.lr_decays:
for param_group in self.optimizer.param_groups:
param_group['lr'] *= config.lr_decays[self.epoch]
# Update epoch
self.epoch += 1
# Saving
if config.saving:
# Get current state dict
save_dict = {'epoch': self.epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'saving_path': config.saving_path}
# Save current state of the network (for restoring purposes)
checkpoint_path = join(checkpoint_directory, 'current_chkp.tar')
torch.save(save_dict, checkpoint_path)
# Save checkpoints occasionally
if (self.epoch + 1) % config.checkpoint_gap == 0:
checkpoint_path = join(checkpoint_directory, 'chkp_{:04d}.tar'.format(self.epoch + 1))
torch.save(save_dict, checkpoint_path)
# Validation
net.eval()
self.validation(net, val_loader, config)
net.train()
print('Finished Training')
return
# Validation methods
# ------------------------------------------------------------------------------------------------------------------
def validation(self, net, val_loader, config: Config):
if config.dataset_task == 'classification':
self.object_classification_validation(net, val_loader, config)
elif config.dataset_task == 'segmentation':
self.object_segmentation_validation(net, val_loader, config)
elif config.dataset_task == 'cloud_segmentation':
self.cloud_segmentation_validation(net, val_loader, config)
elif config.dataset_task == 'slam_segmentation':
self.slam_segmentation_validation(net, val_loader, config)
else:
raise ValueError('No validation method implemented for this network type')
def object_classification_validation(self, net, val_loader, config):
"""
Perform a round of validation and show/save results
:param net: network object
:param val_loader: data loader for validation set
:param config: configuration object
"""
############
# Initialize
############
# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
val_smooth = 0.95
# Number of classes predicted by the model
nc_model = config.num_classes
softmax = torch.nn.Softmax(1)
# Initialize global prediction over all models
if not hasattr(self, 'val_probs'):
self.val_probs = np.zeros((val_loader.dataset.num_models, nc_model))
#####################
# Network predictions
#####################
probs = []
targets = []
obj_inds = []
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
# Start validation loop
with torch.no_grad():
for batch in val_loader:
# New time
t = t[-1:]
t += [time.time()]
if 'cuda' in self.device.type:
batch.to(self.device)
# Forward pass
outputs = net(batch, config)
# Get probs and labels
probs += [softmax(outputs).cpu().detach().numpy()]
targets += [batch.labels.cpu().numpy()]
obj_inds += [batch.model_inds.cpu().numpy()]
torch.cuda.synchronize(self.device)
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(100 * len(obj_inds) / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
# Stack all validation predictions
probs = np.vstack(probs)
targets = np.hstack(targets)
obj_inds = np.hstack(obj_inds)
###################
# Voting validation
###################
self.val_probs[obj_inds] = val_smooth * self.val_probs[obj_inds] + (1-val_smooth) * probs
############
# Confusions
############
validation_labels = np.array(val_loader.dataset.label_values)
# Compute classification results
C1 = fast_confusion(targets,
np.argmax(probs, axis=1),
validation_labels)
# Compute votes confusion
C2 = fast_confusion(val_loader.dataset.input_labels,
np.argmax(self.val_probs, axis=1),
validation_labels)
# Saving (optionnal)
if config.saving:
print("Save confusions")
conf_list = [C1, C2]
file_list = ['val_confs.txt', 'vote_confs.txt']
for conf, conf_file in zip(conf_list, file_list):
test_file = join(config.saving_path, conf_file)
if exists(test_file):
with open(test_file, "a") as text_file:
for line in conf:
for value in line:
text_file.write('%d ' % value)
text_file.write('\n')
else:
with open(test_file, "w") as text_file:
for line in conf:
for value in line:
text_file.write('%d ' % value)
text_file.write('\n')
val_ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
vote_ACC = 100 * np.sum(np.diag(C2)) / (np.sum(C2) + 1e-6)
print('Accuracies : val = {:.1f}% / vote = {:.1f}%'.format(val_ACC, vote_ACC))
return C1
def cloud_segmentation_validation(self, net, val_loader, config, debug=False):
"""
Validation method for cloud segmentation models
"""
############
# Initialize
############
t0 = time.time()
# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
val_smooth = 0.95
softmax = torch.nn.Softmax(1)
# Do not validate if dataset has no validation cloud
if val_loader.dataset.validation_split not in val_loader.dataset.all_splits:
return
# Number of classes including ignored labels
nc_tot = val_loader.dataset.num_classes
# Number of classes predicted by the model
nc_model = config.num_classes
#print(nc_tot)
#print(nc_model)
# Initiate global prediction over validation clouds
if not hasattr(self, 'validation_probs'):
self.validation_probs = [np.zeros((l.shape[0], nc_model))
for l in val_loader.dataset.input_labels]
self.val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0
for label_value in val_loader.dataset.label_values:
if label_value not in val_loader.dataset.ignored_labels:
self.val_proportions[i] = np.sum([np.sum(labels == label_value)
for labels in val_loader.dataset.validation_labels])
i += 1
#####################
# Network predictions
#####################
predictions = []
targets = []
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
t1 = time.time()
# Start validation loop
with torch.no_grad():
for i, batch in enumerate(val_loader):
# New time
t = t[-1:]
t += [time.time()]
if 'cuda' in self.device.type:
batch.to(self.device)
# Forward pass
outputs = net(batch, config)
# Get probs and labels
stacked_probs = softmax(outputs).cpu().detach().numpy()
labels = batch.labels.cpu().numpy()
lengths = batch.lengths[0].cpu().numpy()
in_inds = batch.input_inds.cpu().numpy()
cloud_inds = batch.cloud_inds.cpu().numpy()
torch.cuda.synchronize(self.device)
# Get predictions and labels per instance
# ***************************************
i0 = 0
for b_i, length in enumerate(lengths):
# Get prediction
target = labels[i0:i0 + length]
probs = stacked_probs[i0:i0 + length]
inds = in_inds[i0:i0 + length]
c_i = cloud_inds[b_i]
# Update current probs in whole cloud
self.validation_probs[c_i][inds] = val_smooth * self.validation_probs[c_i][inds] \
+ (1 - val_smooth) * probs
# Stack all prediction for this epoch
predictions.append(probs)
targets.append(target)
i0 += length
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
t2 = time.time()
# Confusions for our subparts of validation set
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
for i, (probs, truth) in enumerate(zip(predictions, targets)):
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(val_loader.dataset.label_values):
if label_value in val_loader.dataset.ignored_labels:
probs = np.insert(probs, l_ind, 0, axis=1)
# Predicted labels
preds = val_loader.dataset.label_values[np.argmax(probs, axis=1)]
# Confusions
Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32)
t3 = time.time()
# Sum all confusions
C = np.sum(Confs, axis=0).astype(np.float32)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))):
if label_value in val_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
# Balance with real validation proportions
C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
t4 = time.time()
# Objects IoU
IoUs = IoU_from_confusions(C)
t5 = time.time()
# Saving (optionnal)
if config.saving:
# Name of saving file
test_file = join(config.saving_path, 'val_IoUs.txt')
# Line to write:
line = ''
for IoU in IoUs:
line += '{:.3f} '.format(IoU)
line = line + '\n'
# Write in file
if exists(test_file):
with open(test_file, "a") as text_file:
text_file.write(line)
else:
with open(test_file, "w") as text_file:
text_file.write(line)
# Save potentials
pot_path = join(config.saving_path, 'potentials')
if not exists(pot_path):
makedirs(pot_path)
files = val_loader.dataset.files
for i, file_path in enumerate(files):
pot_points = np.array(val_loader.dataset.pot_trees[i].data, copy=False)
cloud_name = file_path.split('/')[-1]
pot_name = join(pot_path, cloud_name)
pots = val_loader.dataset.potentials[i].numpy().astype(np.float32)
write_ply(pot_name,
[pot_points.astype(np.float32), pots],
['x', 'y', 'z', 'pots'])
t6 = time.time()
# Print instance mean
mIoU = 100 * np.mean(IoUs)
print('{:s} mean IoU = {:.1f}%'.format(config.dataset, mIoU))
# Save predicted cloud occasionally
if config.saving and (self.epoch + 1) % config.checkpoint_gap == 0:
val_path = join(config.saving_path, 'val_preds_{:d}'.format(self.epoch + 1))
if not exists(val_path):
makedirs(val_path)
files = val_loader.dataset.files
for i, file_path in enumerate(files):
# Get points
points = val_loader.dataset.load_evaluation_points(file_path)
# Get probs on our own ply points
sub_probs = self.validation_probs[i]
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(val_loader.dataset.label_values):
if label_value in val_loader.dataset.ignored_labels:
sub_probs = np.insert(sub_probs, l_ind, 0, axis=1)
# Get the predicted labels
sub_preds = val_loader.dataset.label_values[np.argmax(sub_probs, axis=1).astype(np.int32)]
# Reproject preds on the evaluations points
preds = (sub_preds[val_loader.dataset.test_proj[i]]).astype(np.int32)
# Path of saved validation file
cloud_name = file_path.split('/')[-1]
val_name = join(val_path, cloud_name)
# Save file
labels = val_loader.dataset.validation_labels[i].astype(np.int32)
write_ply(val_name,
[points, preds, labels],
['x', 'y', 'z', 'preds', 'class'])
# Display timings
t7 = time.time()
if debug:
print('\n************************\n')
print('Validation timings:')
print('Init ...... {:.1f}s'.format(t1 - t0))
print('Loop ...... {:.1f}s'.format(t2 - t1))
print('Confs ..... {:.1f}s'.format(t3 - t2))
print('Confs bis . {:.1f}s'.format(t4 - t3))
print('IoU ....... {:.1f}s'.format(t5 - t4))
print('Save1 ..... {:.1f}s'.format(t6 - t5))
print('Save2 ..... {:.1f}s'.format(t7 - t6))
print('\n************************\n')
return
def slam_segmentation_validation(self, net, val_loader, config, debug=True):
"""
Validation method for slam segmentation models
"""
############
# Initialize
############
t0 = time.time()
# Do not validate if dataset has no validation cloud
if val_loader is None:
return
# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
val_smooth = 0.95
softmax = torch.nn.Softmax(1)
# Create folder for validation predictions
if not exists (join(config.saving_path, 'val_preds')):
makedirs(join(config.saving_path, 'val_preds'))
# initiate the dataset validation containers
val_loader.dataset.val_points = []
val_loader.dataset.val_labels = []
# Number of classes including ignored labels
nc_tot = val_loader.dataset.num_classes
#####################
# Network predictions
#####################
predictions = []
targets = []
inds = []
val_i = 0
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
t1 = time.time()
# Start validation loop
with torch.no_grad():
for i, batch in enumerate(val_loader):
# New time
t = t[-1:]
t += [time.time()]
if 'cuda' in self.device.type:
batch.to(self.device)
# Forward pass
outputs = net(batch, config)
# Get probs and labels
stk_probs = softmax(outputs).cpu().detach().numpy()
lengths = batch.lengths[0].cpu().numpy()
f_inds = batch.frame_inds.cpu().numpy()
r_inds_list = batch.reproj_inds
r_mask_list = batch.reproj_masks
labels_list = batch.val_labels
torch.cuda.synchronize(self.device)
# Get predictions and labels per instance
# ***************************************
i0 = 0
for b_i, length in enumerate(lengths):
# Get prediction
probs = stk_probs[i0:i0 + length]
proj_inds = r_inds_list[b_i]
proj_mask = r_mask_list[b_i]
frame_labels = labels_list[b_i]
s_ind = f_inds[b_i, 0]
f_ind = f_inds[b_i, 1]
# Project predictions on the frame points
proj_probs = probs[proj_inds]
# Safe check if only one point:
if proj_probs.ndim < 2:
proj_probs = np.expand_dims(proj_probs, 0)
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(val_loader.dataset.label_values):
if label_value in val_loader.dataset.ignored_labels:
proj_probs = np.insert(proj_probs, l_ind, 0, axis=1)
# Predicted labels
preds = val_loader.dataset.label_values[np.argmax(proj_probs, axis=1)]
# Save predictions in a binary file
filename = '{:s}_{:07d}.npy'.format(val_loader.dataset.sequences[s_ind], f_ind)
filepath = join(config.saving_path, 'val_preds', filename)
if exists(filepath):
frame_preds = np.load(filepath)
else:
frame_preds = np.zeros(frame_labels.shape, dtype=np.uint8)
frame_preds[proj_mask] = preds.astype(np.uint8)
np.save(filepath, frame_preds)
# Save some of the frame pots
if f_ind % 20 == 0:
seq_path = join(val_loader.dataset.path, 'sequences', val_loader.dataset.sequences[s_ind])
velo_file = join(seq_path, 'velodyne', val_loader.dataset.frames[s_ind][f_ind] + '.bin')
frame_points = np.fromfile(velo_file, dtype=np.float32)
frame_points = frame_points.reshape((-1, 4))
write_ply(filepath[:-4] + '_pots.ply',
[frame_points[:, :3], frame_labels, frame_preds],
['x', 'y', 'z', 'gt', 'pre'])
# Update validation confusions
frame_C = fast_confusion(frame_labels,
frame_preds.astype(np.int32),
val_loader.dataset.label_values)
val_loader.dataset.val_confs[s_ind][f_ind, :, :] = frame_C
# Stack all prediction for this epoch
predictions += [preds]
targets += [frame_labels[proj_mask]]
inds += [f_inds[b_i, :]]
val_i += 1
i0 += length
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
t2 = time.time()
# Confusions for our subparts of validation set
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
for i, (preds, truth) in enumerate(zip(predictions, targets)):
# Confusions
Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32)
t3 = time.time()
#######################################
# Results on this subpart of validation
#######################################
# Sum all confusions
C = np.sum(Confs, axis=0).astype(np.float32)
# Balance with real validation proportions
C *= np.expand_dims(val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))):
if label_value in val_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
# Objects IoU
IoUs = IoU_from_confusions(C)
#####################################
# Results on the whole validation set
#####################################
t4 = time.time()
# Sum all validation confusions
C_tot = [np.sum(seq_C, axis=0) for seq_C in val_loader.dataset.val_confs if len(seq_C) > 0]
C_tot = np.sum(np.stack(C_tot, axis=0), axis=0)
if debug:
s = '\n'
for cc in C_tot:
for c in cc:
s += '{:8.1f} '.format(c)
s += '\n'
print(s)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))):
if label_value in val_loader.dataset.ignored_labels:
C_tot = np.delete(C_tot, l_ind, axis=0)
C_tot = np.delete(C_tot, l_ind, axis=1)
# Objects IoU
val_IoUs = IoU_from_confusions(C_tot)
t5 = time.time()
# Saving (optionnal)
if config.saving:
IoU_list = [IoUs, val_IoUs]
file_list = ['subpart_IoUs.txt', 'val_IoUs.txt']
for IoUs_to_save, IoU_file in zip(IoU_list, file_list):
# Name of saving file
test_file = join(config.saving_path, IoU_file)
# Line to write:
line = ''
for IoU in IoUs_to_save:
line += '{:.3f} '.format(IoU)
line = line + '\n'
# Write in file
if exists(test_file):
with open(test_file, "a") as text_file:
text_file.write(line)
else:
with open(test_file, "w") as text_file:
text_file.write(line)
# Print instance mean
mIoU = 100 * np.mean(IoUs)
print('{:s} : subpart mIoU = {:.1f} %'.format(config.dataset, mIoU))
mIoU = 100 * np.mean(val_IoUs)
print('{:s} : val mIoU = {:.1f} %'.format(config.dataset, mIoU))
t6 = time.time()
# Display timings
if debug:
print('\n************************\n')
print('Validation timings:')
print('Init ...... {:.1f}s'.format(t1 - t0))
print('Loop ...... {:.1f}s'.format(t2 - t1))
print('Confs ..... {:.1f}s'.format(t3 - t2))
print('IoU1 ...... {:.1f}s'.format(t4 - t3))
print('IoU2 ...... {:.1f}s'.format(t5 - t4))
print('Save ...... {:.1f}s'.format(t6 - t5))
print('\n************************\n')
return
| 35.69936
| 120
| 0.48032
|
61154d9feb90b7517b4d2d9c4c451bb6a89af52d
| 125
|
py
|
Python
|
Algorithms/Edge/scale-free.py
|
Kanavoy/UODS
|
2da38b749e721b051aeaa6a7bcb3a921aeb5a09c
|
[
"MIT"
] | null | null | null |
Algorithms/Edge/scale-free.py
|
Kanavoy/UODS
|
2da38b749e721b051aeaa6a7bcb3a921aeb5a09c
|
[
"MIT"
] | null | null | null |
Algorithms/Edge/scale-free.py
|
Kanavoy/UODS
|
2da38b749e721b051aeaa6a7bcb3a921aeb5a09c
|
[
"MIT"
] | null | null | null |
def make_graph(opts):
from networkx import barabasi_albert_graph
return barabasi_albert_graph(opts.graph.n, opts.graph.e)
| 41.666667
| 57
| 0.824
|
055f631b74e5b51f9483f6ceb2f8212567d0ded9
| 23,172
|
py
|
Python
|
modules/gans/dcgan.py
|
Matesxs/GAN-Playground
|
20c8f4bf491fd175883d0d812185b336960329e9
|
[
"MIT"
] | 2
|
2020-09-02T21:08:07.000Z
|
2021-12-09T23:03:37.000Z
|
modules/gans/dcgan.py
|
Matesxs/GAN-Playground
|
20c8f4bf491fd175883d0d812185b336960329e9
|
[
"MIT"
] | null | null | null |
modules/gans/dcgan.py
|
Matesxs/GAN-Playground
|
20c8f4bf491fd175883d0d812185b336960329e9
|
[
"MIT"
] | 1
|
2020-12-15T15:32:10.000Z
|
2020-12-15T15:32:10.000Z
|
import os
import numpy as np
from keras.optimizers import Adam, Optimizer
from keras.models import Model
from keras.layers import Input, Dense
from keras.initializers import RandomNormal
from keras.utils import plot_model
from keras.engine.network import Network
import keras.backend as K
from PIL import Image
from cv2 import cv2 as cv
import random
import time
from colorama import Fore
from collections import deque
from typing import Union
import json
from statistics import mean
import imagesize
from multiprocessing.pool import ThreadPool
from ..utils.batch_maker import BatchMaker
from ..models import discriminator_models_spreadsheet, generator_models_spreadsheet
from ..keras_extensions.custom_tensorboard import TensorBoardCustom
from ..utils.helpers import time_to_format, get_paths_of_files_from_path
class DCGAN:
CONTROL_THRESHOLD = 100_000 # Threshold when after whitch we will be testing training process
AGREGATE_STAT_INTERVAL = 1_000 # Interval of saving data
GRADIENT_CHECK_INTERVAL = 10_000 # Interval of checking norm gradient value of combined model
CHECKPOINT_SAVE_INTERVAL = 1_000 # Interval of saving checkpoint
def __init__(self, dataset_path:str,
gen_mod_name: str, disc_mod_name: str,
latent_dim:int,
training_progress_save_path:str,
testing_dataset_path:str=None,
generator_optimizer:Optimizer=Adam(0.0002, 0.5), discriminator_optimizer:Optimizer=Adam(0.0002, 0.5),
discriminator_label_noise:float=None, discriminator_label_noise_decay:float=None, discriminator_label_noise_min:float=0.001,
batch_size: int = 32, buffered_batches:int=20,
generator_weights:Union[str, None]=None, discriminator_weights:Union[str, None]=None,
start_episode:int=0, load_from_checkpoint:bool=False,
check_dataset:bool=True, num_of_loading_workers:int=8):
self.disc_mod_name = disc_mod_name
self.gen_mod_name = gen_mod_name
self.generator_optimizer = generator_optimizer
self.latent_dim = latent_dim
assert self.latent_dim > 0, Fore.RED + "Invalid latent dim" + Fore.RESET
self.batch_size = batch_size
assert self.batch_size > 0, Fore.RED + "Invalid batch size" + Fore.RESET
self.discriminator_label_noise = discriminator_label_noise
self.discriminator_label_noise_decay = discriminator_label_noise_decay
self.discriminator_label_noise_min = discriminator_label_noise_min
self.progress_image_dim = (16, 9)
if start_episode < 0: start_episode = 0
self.episode_counter = start_episode
# Initialize training data folder and logging
self.training_progress_save_path = training_progress_save_path
self.training_progress_save_path = os.path.join(self.training_progress_save_path, f"{self.gen_mod_name}__{self.disc_mod_name}")
self.tensorboard = TensorBoardCustom(log_dir=os.path.join(self.training_progress_save_path, "logs"))
# Create array of input image paths
self.train_data = get_paths_of_files_from_path(dataset_path, only_files=True)
assert self.train_data, Fore.RED + "Training dataset is not loaded" + Fore.RESET
self.testing_data = None
if testing_dataset_path:
self.testing_data = get_paths_of_files_from_path(testing_dataset_path)
assert self.testing_data, Fore.RED + "Testing dataset is not loaded" + Fore.RESET
# Load one image to get shape of it
tmp_image = cv.imread(self.train_data[0])
self.image_shape = tmp_image.shape
self.image_channels = self.image_shape[2]
# Check image size validity
if self.image_shape[0] < 4 or self.image_shape[1] < 4: raise Exception("Images too small, min size (4, 4)")
# Check validity of whole datasets
if check_dataset:
self.validate_dataset()
# Define static vars
if os.path.exists(f"{self.training_progress_save_path}/static_noise.npy"):
self.static_noise = np.load(f"{self.training_progress_save_path}/static_noise.npy")
if self.static_noise.shape[0] != (self.progress_image_dim[0] * self.progress_image_dim[1]):
print(Fore.YELLOW + "Progress image dim changed, restarting static noise!" + Fore.RESET)
os.remove(f"{self.training_progress_save_path}/static_noise.npy")
self.static_noise = np.random.normal(0.0, 1.0, size=(self.progress_image_dim[0] * self.progress_image_dim[1], self.latent_dim))
else:
self.static_noise = np.random.normal(0.0, 1.0, size=(self.progress_image_dim[0] * self.progress_image_dim[1], self.latent_dim))
self.kernel_initializer = RandomNormal(stddev=0.02)
# Load checkpoint
self.initiated = False
loaded_gen_weights_path = None
loaded_disc_weights_path = None
if load_from_checkpoint:
loaded_gen_weights_path, loaded_disc_weights_path = self.load_checkpoint()
# Create batchmaker and start it
self.batch_maker = BatchMaker(self.train_data, self.batch_size, buffered_batches=buffered_batches, num_of_loading_workers=num_of_loading_workers)
self.testing_batchmaker = None
if self.testing_data:
self.testing_batchmaker = BatchMaker(self.testing_data, self.batch_size, buffered_batches=buffered_batches, num_of_loading_workers=num_of_loading_workers)
self.testing_batchmaker.start()
#################################
### Create discriminator ###
#################################
self.discriminator = self.build_discriminator(disc_mod_name)
self.discriminator.compile(loss="binary_crossentropy", optimizer=discriminator_optimizer)
#################################
### Create generator ###
#################################
self.generator = self.build_generator(gen_mod_name)
if self.generator.output_shape[1:] != self.image_shape: raise Exception("Invalid image input size for this generator model")
#################################
### Create combined generator ###
#################################
noise_input = Input(shape=(self.latent_dim,), name="noise_input")
gen_images = self.generator(noise_input)
# Create frozen version of discriminator
frozen_discriminator = Network(self.discriminator.inputs, self.discriminator.outputs, name="frozen_discriminator")
frozen_discriminator.trainable = False
# Discriminator takes images and determinates validity
valid = frozen_discriminator(gen_images)
# Combine models
# Train generator to fool discriminator
self.combined_generator_model = Model(noise_input, valid, name="dcgan_model")
self.combined_generator_model.compile(loss="binary_crossentropy", optimizer=self.generator_optimizer)
# Print all summaries
print("\nDiscriminator Summary:")
self.discriminator.summary()
print("\nGenerator Summary:")
self.generator.summary()
print("\nGAN Summary")
self.combined_generator_model.summary()
# Load weights from checkpoint
try:
if loaded_gen_weights_path: self.generator.load_weights(loaded_gen_weights_path)
except:
print(Fore.YELLOW + "Failed to load generator weights from checkpoint" + Fore.RESET)
try:
if loaded_disc_weights_path: self.discriminator.load_weights(loaded_disc_weights_path)
except:
print(Fore.YELLOW + "Failed to load discriminator weights from checkpoint" + Fore.RESET)
# Load weights from param and override checkpoint weights
if generator_weights: self.generator.load_weights(generator_weights)
if discriminator_weights: self.discriminator.load_weights(discriminator_weights)
# Function for creating gradient generator
def gradient_norm_generator(self):
grads = K.gradients(self.combined_generator_model.total_loss, self.combined_generator_model.trainable_weights)
summed_squares = [K.sum(K.square(g)) for g in grads]
norm = K.sqrt(sum(summed_squares))
inputs = self.combined_generator_model._feed_inputs + self.combined_generator_model._feed_targets + self.combined_generator_model._feed_sample_weights
func = K.function(inputs, [norm])
return func
# Check if datasets have consistent shapes
def validate_dataset(self):
def check_image(image_path):
im_shape = imagesize.get(image_path)
if im_shape[0] != self.image_shape[0] or im_shape[1] != self.image_shape[1]:
return False
return True
print(Fore.BLUE + "Checking dataset validity" + Fore.RESET)
with ThreadPool(processes=8) as p:
res = p.map(check_image, self.train_data)
if not all(res): raise Exception("Inconsistent training dataset")
if self.testing_data:
res = p.map(check_image, self.testing_data)
if not all(res): raise Exception("Inconsistent testing dataset")
print(Fore.BLUE + "Dataset valid" + Fore.RESET)
# Create generator based on template selected by name
def build_generator(self, model_name:str):
noise = Input(shape=(self.latent_dim,))
try:
m = getattr(generator_models_spreadsheet, model_name)(noise, self.image_shape, self.image_channels, self.kernel_initializer)
except Exception as e:
raise Exception(f"Generator model not found!\n{e}")
return Model(noise, m, name="generator_model")
# Create discriminator based on teplate selected by name
def build_discriminator(self, model_name:str, classification:bool=True):
img = Input(shape=self.image_shape)
try:
m = getattr(discriminator_models_spreadsheet, model_name)(img, self.kernel_initializer)
except Exception as e:
raise Exception(f"Discriminator model not found!\n{e}")
if classification:
m = Dense(1, activation="sigmoid")(m)
return Model(img, m, name="discriminator_model")
def train(self, target_episode:int,
feed_prev_gen_batch:bool=False, feed_old_perc_amount:float=0.2,
progress_images_save_interval:int=None, save_raw_progress_images:bool=True, weights_save_interval:int=None,
discriminator_smooth_real_labels:bool=False, discriminator_smooth_fake_labels:bool=False,
generator_smooth_labels:bool=False):
# Function for adding random noise to labels (flipping them)
def noising_labels(labels: np.ndarray, noise_ammount:float=0.01):
array = np.zeros(labels.shape)
for idx in range(labels.shape[0]):
if random.random() < noise_ammount:
array[idx] = 1 - labels[idx]
if array[idx] < 0: array[idx] = -array[idx]
else:
array[idx] = labels[idx]
return labels
# Function for replacing new generated images with old generated images
def replace_random_images(orig_images: np.ndarray, repl_images: deque, perc_ammount:float=0.20):
repl_images = np.array(repl_images)
for idx in range(orig_images.shape[0]):
if random.random() < perc_ammount:
orig_images[idx] = repl_images[random.randint(0, repl_images.shape[0] - 1)]
return orig_images
# Check arguments and input data
assert target_episode > 0, Fore.RED + "Invalid number of epochs" + Fore.RESET
if progress_images_save_interval:
assert progress_images_save_interval <= target_episode, Fore.RED + "Invalid progress save interval" + Fore.RESET
if weights_save_interval:
assert weights_save_interval <= target_episode, Fore.RED + "Invalid weights save interval" + Fore.RESET
# Calculate epochs to go
end_episode = target_episode
target_episode = target_episode - self.episode_counter
assert target_episode > 0, Fore.CYAN + "Training is already finished" + Fore.RESET
# Save noise for progress consistency
if progress_images_save_interval is not None:
if not os.path.exists(self.training_progress_save_path): os.makedirs(self.training_progress_save_path)
np.save(f"{self.training_progress_save_path}/static_noise.npy", self.static_noise)
# Training variables
prev_gen_images = deque(maxlen=3*self.batch_size)
get_gradients = self.gradient_norm_generator()
epochs_time_history = deque(maxlen=self.AGREGATE_STAT_INTERVAL * 50)
# Save starting kernels and biases
if not self.initiated:
self.__save_imgs(save_raw_progress_images)
self.tensorboard.log_kernels_and_biases(self.generator)
self.save_checkpoint()
print(Fore.GREEN + f"Starting training on episode {self.episode_counter} for {target_episode} episodes" + Fore.RESET)
for _ in range(target_episode):
ep_start = time.time()
### Train Discriminator ###
# Select batch of valid images
imgs = self.batch_maker.get_batch()
# Sample noise and generate new images
gen_imgs = self.generator.predict(np.random.normal(0.0, 1.0, (self.batch_size, self.latent_dim)))
# Train discriminator (real as ones and fake as zeros)
if discriminator_smooth_real_labels:
disc_real_labels = np.random.uniform(0.8, 1.0, size=(self.batch_size, 1))
else:
disc_real_labels = np.ones(shape=(self.batch_size, 1))
if discriminator_smooth_fake_labels:
disc_fake_labels = np.random.uniform(0, 0.2, size=(self.batch_size, 1))
else:
disc_fake_labels = np.zeros(shape=(self.batch_size, 1))
if feed_prev_gen_batch:
if len(prev_gen_images) > 0:
tmp_imgs = replace_random_images(gen_imgs, prev_gen_images, feed_old_perc_amount)
prev_gen_images += deque(gen_imgs)
gen_imgs = tmp_imgs
else:
prev_gen_images += deque(gen_imgs)
# Adding random noise to discriminator labels
if self.discriminator_label_noise and self.discriminator_label_noise > 0:
disc_real_labels = noising_labels(disc_real_labels, self.discriminator_label_noise / 2)
disc_fake_labels = noising_labels(disc_fake_labels, self.discriminator_label_noise / 2)
self.discriminator.trainable = True
disc_real_loss = self.discriminator.train_on_batch(imgs, disc_real_labels)
disc_fake_loss = self.discriminator.train_on_batch(gen_imgs, disc_fake_labels)
### Train Generator ###
# Train generator (wants discriminator to recognize fake images as valid)
if generator_smooth_labels:
gen_labels = np.random.uniform(0.8, 1.0, size=(self.batch_size, 1))
else:
gen_labels = np.ones(shape=(self.batch_size, 1))
self.discriminator.trainable = False
gan_loss = self.combined_generator_model.train_on_batch(np.random.normal(0.0, 1.0, (self.batch_size, self.latent_dim)), gen_labels)
self.episode_counter += 1
self.tensorboard.step = self.episode_counter
self.tensorboard.update_stats(disc_real_loss=disc_real_loss, disc_fake_loss=disc_fake_loss, gan_loss=gan_loss, disc_label_noise=self.discriminator_label_noise if self.discriminator_label_noise else 0)
# Decay label noise
if self.discriminator_label_noise and self.discriminator_label_noise_decay:
self.discriminator_label_noise = max([self.discriminator_label_noise_min, (self.discriminator_label_noise * self.discriminator_label_noise_decay)])
if (self.discriminator_label_noise_min == 0) and (self.discriminator_label_noise != 0) and (self.discriminator_label_noise < 0.001):
self.discriminator_label_noise = 0
# Seve stats and print them to console
if self.episode_counter % self.AGREGATE_STAT_INTERVAL == 0:
self.tensorboard.log_kernels_and_biases(self.generator)
# Change color of log according to state of training
print(Fore.GREEN + f"{self.episode_counter}/{end_episode}, Remaining: {time_to_format(mean(epochs_time_history) * (end_episode - self.episode_counter))}\t\t[D-R loss: {round(float(disc_real_loss), 5)}, D-F loss: {round(float(disc_fake_loss), 5)}] [G loss: {round(float(gan_loss), 5)}] - Epsilon: {round(self.discriminator_label_noise, 4) if self.discriminator_label_noise else 0}" + Fore.RESET)
# Save progress
if self.training_progress_save_path is not None and progress_images_save_interval is not None and self.episode_counter % progress_images_save_interval == 0:
self.__save_imgs(save_raw_progress_images)
# Save weights of models
if weights_save_interval is not None and self.episode_counter % weights_save_interval == 0:
self.__save_weights()
# Save checkpoint
if self.episode_counter % self.CHECKPOINT_SAVE_INTERVAL == 0:
self.save_checkpoint()
print(Fore.BLUE + "Checkpoint created" + Fore.RESET)
if self.episode_counter % self.GRADIENT_CHECK_INTERVAL == 0:
# Generate evaluation noise and labels
eval_noise = np.random.normal(0.0, 1.0, (self.batch_size, self.latent_dim))
eval_labels = np.ones(shape=(self.batch_size, 1))
# Create gradient function and evaluate based on eval noise and labels
norm_gradient = get_gradients([eval_noise, eval_labels, np.ones(len(eval_labels))])[0]
# Check norm gradient
if norm_gradient > 100 and self.episode_counter > self.CONTROL_THRESHOLD:
print(Fore.RED + f"Current generator norm gradient: {norm_gradient}")
print("Gradient too high!" + Fore.RESET)
if input("Do you want exit training?\n") == "y": return
elif norm_gradient < 0.2 and self.episode_counter > self.CONTROL_THRESHOLD:
print(Fore.RED + f"Current generator norm gradient: {norm_gradient}")
print("Gradient vanished!" + Fore.RESET)
if input("Do you want exit training?\n") == "y": return
else:
print(Fore.BLUE + f"Current generator norm gradient: {norm_gradient}" + Fore.RESET)
# Change seed
np.random.seed(None)
random.seed()
epochs_time_history.append(time.time() - ep_start)
# Shutdown helper threads
print(Fore.GREEN + "Training Complete - Waiting for other threads to finish" + Fore.RESET)
if self.testing_batchmaker: self.testing_batchmaker.__terminate = True
self.batch_maker.terminate()
self.save_checkpoint()
self.__save_weights()
self.batch_maker.join()
if self.testing_batchmaker: self.testing_batchmaker.join()
print(Fore.GREEN + "All threads finished" + Fore.RESET)
# Function for saving progress images
def __save_imgs(self, save_raw_progress_images:bool=True):
if not os.path.exists(self.training_progress_save_path + "/progress_images"): os.makedirs(self.training_progress_save_path + "/progress_images")
gen_imgs = self.generator.predict(self.static_noise)
# Rescale images 0 to 255
gen_imgs = (0.5 * gen_imgs + 0.5) * 255
final_image = np.zeros(shape=(self.image_shape[0] * self.progress_image_dim[1], self.image_shape[1] * self.progress_image_dim[0], self.image_channels)).astype(np.float32)
cnt = 0
for i in range(self.progress_image_dim[1]):
for j in range(self.progress_image_dim[0]):
if self.image_channels == 3:
final_image[self.image_shape[0] * i:self.image_shape[0] * (i + 1), self.image_shape[1] * j:self.image_shape[1] * (j + 1), :] = gen_imgs[cnt]
else:
final_image[self.image_shape[0] * i:self.image_shape[0] * (i + 1), self.image_shape[1] * j:self.image_shape[1] * (j + 1), 0] = gen_imgs[cnt, :, :, 0]
cnt += 1
final_image = cv.cvtColor(final_image, cv.COLOR_RGB2BGR)
if save_raw_progress_images:
cv.imwrite(f"{self.training_progress_save_path}/progress_images/{self.episode_counter}.png", final_image)
self.tensorboard.write_image(np.reshape(cv.cvtColor(final_image, cv.COLOR_BGR2RGB) / 255, (-1, final_image.shape[0], final_image.shape[1], final_image.shape[2])).astype(np.float32))
def save_models_structure_images(self):
save_path = self.training_progress_save_path + "/model_structures"
if not os.path.exists(save_path): os.makedirs(save_path)
plot_model(self.combined_generator_model, os.path.join(save_path, "combined.png"), expand_nested=True, show_shapes=True)
plot_model(self.generator, os.path.join(save_path, "generator.png"), expand_nested=True, show_shapes=True)
plot_model(self.discriminator, os.path.join(save_path, "discriminator.png"), expand_nested=True, show_shapes=True)
def load_checkpoint(self):
checkpoint_base_path = os.path.join(self.training_progress_save_path, "checkpoint")
if not os.path.exists(os.path.join(checkpoint_base_path, "checkpoint_data.json")): return None, None
with open(os.path.join(checkpoint_base_path, "checkpoint_data.json"), "rb") as f:
data = json.load(f)
if data:
self.episode_counter = int(data["episode"])
if data["disc_label_noise"]:
self.discriminator_label_noise = float(data["disc_label_noise"])
self.initiated = True
return data["gen_path"], data["disc_path"]
return None, None
def save_checkpoint(self):
checkpoint_base_path = os.path.join(self.training_progress_save_path, "checkpoint")
if not os.path.exists(checkpoint_base_path): os.makedirs(checkpoint_base_path)
gen_path = f"{checkpoint_base_path}/generator_{self.gen_mod_name}.h5"
disc_path = f"{checkpoint_base_path}/discriminator_{self.disc_mod_name}.h5"
if os.path.exists(gen_path): os.rename(gen_path, f"{checkpoint_base_path}/generator_{self.gen_mod_name}.h5.lock")
if os.path.exists(disc_path): os.rename(disc_path, f"{checkpoint_base_path}/discriminator_{self.disc_mod_name}.h5.lock")
self.generator.save_weights(gen_path)
self.discriminator.save_weights(disc_path)
if os.path.exists(f"{checkpoint_base_path}/generator_{self.gen_mod_name}.h5.lock"): os.remove(f"{checkpoint_base_path}/generator_{self.gen_mod_name}.h5.lock")
if os.path.exists(f"{checkpoint_base_path}/discriminator_{self.disc_mod_name}.h5.lock"): os.remove(f"{checkpoint_base_path}/discriminator_{self.disc_mod_name}.h5.lock")
data = {
"episode": self.episode_counter,
"gen_path": gen_path,
"disc_path": disc_path,
"disc_label_noise": self.discriminator_label_noise
}
with open(os.path.join(checkpoint_base_path, "checkpoint_data.json"), "w", encoding='utf-8') as f:
json.dump(data, f)
def __save_weights(self):
save_dir = self.training_progress_save_path + "/weights/" + str(self.episode_counter)
if not os.path.exists(save_dir): os.makedirs(save_dir)
self.generator.save_weights(f"{save_dir}/generator_{self.gen_mod_name}.h5")
self.discriminator.save_weights(f"{save_dir}/discriminator_{self.disc_mod_name}.h5")
def make_progress_gif(self, frame_duration:int=16):
if not os.path.exists(self.training_progress_save_path): os.makedirs(self.training_progress_save_path)
if not os.path.exists(self.training_progress_save_path + "/progress_images"): return
frames = []
img_file_names = os.listdir(self.training_progress_save_path + "/progress_images")
for im_file in img_file_names:
if os.path.isfile(self.training_progress_save_path + "/progress_images/" + im_file):
frames.append(Image.open(self.training_progress_save_path + "/progress_images/" + im_file))
if len(frames) > 2:
frames[0].save(f"{self.training_progress_save_path}/progress_gif.gif", format="GIF", append_images=frames[1:], save_all=True, optimize=False, duration=frame_duration, loop=0)
| 48.174636
| 402
| 0.726739
|
50b47cc3cad5250e6aa8aee1ccb58867ae764069
| 2,491
|
py
|
Python
|
luxai2021/game/replay.py
|
legend-of-zyda/LuxPythonEnvGym
|
7d818b5943dad1b7fae3c66b612aae93c743bd0e
|
[
"MIT"
] | 61
|
2021-08-23T00:13:10.000Z
|
2022-03-26T13:11:57.000Z
|
luxai2021/game/replay.py
|
legend-of-zyda/LuxPythonEnvGym
|
7d818b5943dad1b7fae3c66b612aae93c743bd0e
|
[
"MIT"
] | 36
|
2021-08-25T03:32:29.000Z
|
2021-11-20T05:15:29.000Z
|
luxai2021/game/replay.py
|
legend-of-zyda/LuxPythonEnvGym
|
7d818b5943dad1b7fae3c66b612aae93c743bd0e
|
[
"MIT"
] | 28
|
2021-09-03T22:43:18.000Z
|
2022-01-24T14:57:18.000Z
|
import json
from luxai2021.game.actions import Action
from typing import List
from .constants import Constants
class Replay:
"""
Implements saving of replays. Loosely mirrors '/src/Replay/index.ts'
"""
def __init__(self, game, file:str, stateful:bool=False):
"""
Creates a replay-writer class to the target file. Optionally
stateful which includes the whole map in each turn instead of
just the actions taken.
Args:
file ([type]): [description]
stateful (bool, optional): [description]. Defaults to False.
"""
self.replayFilePath = None
self.file = file
self.stateful = stateful
self.clear(game)
def clear(self, game):
self.data = {
'seed' : 0,
'mapType' : Constants.MAP_TYPES.RANDOM,
'teamDetails' : [{"name":"Agent0","tournamentID":""},{"name":"Agent1","tournamentID":""}],
'allCommands' : [], # Array<Array<str>>;
'version' : "3.1.0", #string;
"results":{"ranks":[{"rank":1,"agentID":0},{"rank":2,"agentID":1}],"replayFile":"replays\\1632799860645_ADAt9Ktkv2za.json"},
}
if "seed" in game.configs:
self.data["seed"] = game.configs["seed"]
if self.stateful:
self.data['stateful'] = [] #Array<SerializedState>;
def add_actions(self, game, actions: List[Action]) -> None:
"""
Adds the specified commands to the replay.
Args:
commands (List[int]): Commands to add.
"""
commands = []
for action in actions:
commands.append(
{
"command" : action.to_message(self),
"agentID" : action.team,
}
)
self.data["allCommands"].append(commands)
def add_state(self, game) -> None:
"""
Write this state.
Args:
game (Game): [description]
"""
if self.stateful:
state = game.to_state_object()
self.data['stateful'].append(state)
def write(self, game) -> None:
"""
Write this replay to the specified target file.
"""
self.data['width'] = game.map.width
self.data['height'] = game.map.height
with open(self.file, "w") as o:
# Write the replay file
json.dump(self.data, o)
| 30.378049
| 136
| 0.527499
|
28467c933fbdc44707f1ead8368d88195c05b38f
| 257
|
py
|
Python
|
openks/mm/graph/__init__.py
|
lzy1203/OpenKS
|
6a22fac7550e248e43d5b55ef39b1fe67c4b4b68
|
[
"Apache-2.0"
] | null | null | null |
openks/mm/graph/__init__.py
|
lzy1203/OpenKS
|
6a22fac7550e248e43d5b55ef39b1fe67c4b4b68
|
[
"Apache-2.0"
] | null | null | null |
openks/mm/graph/__init__.py
|
lzy1203/OpenKS
|
6a22fac7550e248e43d5b55ef39b1fe67c4b4b68
|
[
"Apache-2.0"
] | 2
|
2021-11-18T06:55:55.000Z
|
2021-12-29T15:21:07.000Z
|
from .graph import MMGraph
from .schema import (Entity, Relation, Schema, SchemaSet, load_schema,
load_schemas)
from .schema_impl import (HasEntity, ImageEntity, ImageViewEntity, Interact,
SemanticallySimilar)
| 42.833333
| 76
| 0.673152
|
dea10791c9565960f977930ca1a654816041177e
| 6,719
|
py
|
Python
|
mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py
|
kungfu-team/mindspore-bert
|
71501cf52ae01db9d6a73fb64bcfe68a6509dc32
|
[
"Apache-2.0"
] | 2
|
2021-07-08T13:10:42.000Z
|
2021-11-08T02:48:57.000Z
|
mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Explainer with modified ReLU."""
import mindspore.nn as nn
import mindspore.ops.operations as op
from mindspore.explainer._utils import (
unify_inputs,
unify_targets,
)
from .backprop_utils import GradNet, get_bp_weights
from .gradient import Gradient
class ModifiedReLU(Gradient):
"""Basic class for modified ReLU explanation."""
def __init__(self, network, use_relu_backprop=False):
super(ModifiedReLU, self).__init__(network)
self.use_relu_backprop = use_relu_backprop
self._hook_relu_backward()
self._grad_net = GradNet(self._backward_model)
def __call__(self, inputs, targets):
"""
Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop".
Args:
inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
If it is a 1D tensor, its length should be the same as `inputs`.
Returns:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
"""
self._verify_data(inputs, targets)
inputs = unify_inputs(inputs)
targets = unify_targets(targets)
weights = get_bp_weights(self._backward_model, inputs, targets)
gradients = self._grad_net(*inputs, weights)
saliency = self._aggregation_fn(gradients)
return saliency
def _hook_relu_backward(self):
"""Set backward hook for ReLU layers."""
for _, cell in self._backward_model.cells_and_names():
if isinstance(cell, nn.ReLU):
cell.register_backward_hook(self._backward_hook)
def _backward_hook(self, _, grad_inputs, grad_outputs):
"""Hook function for ReLU layers."""
inputs = grad_inputs if self.use_relu_backprop else grad_outputs
relu = op.ReLU()
if isinstance(inputs, tuple):
return relu(*inputs)
return relu(inputs)
class Deconvolution(ModifiedReLU):
"""
Deconvolution explanation.
Deconvolution method is a modified version of Gradient method. For the original ReLU operation in the network to be
explained, Deconvolution modifies the propagation rule from directly backpropagating gradients to backprpagating
positive gradients.
Note:
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
operations. To use `Deconvolution`, the `ReLU` operations in the network must be implemented with
`mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
correct.
Args:
network (Cell): The black-box model to be explained.
Inputs:
- **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
- **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer.
If it is a 1D tensor, its length should be the same as `inputs`.
Outputs:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore.explainer.explanation import Deconvolution
>>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
>>> net = LeNet5(10, num_channel=3)
>>> deconvolution = Deconvolution(net)
>>> # parse data and the target label to be explained and get the saliency map
>>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32)
>>> label = 5
>>> saliency = deconvolution(inputs, label)
>>> print(saliency.shape)
(1, 1, 32, 32)
"""
def __init__(self, network):
super(Deconvolution, self).__init__(network, use_relu_backprop=True)
class GuidedBackprop(ModifiedReLU):
"""
Guided-Backpropagation explanation.
Guided-Backpropagation method is an extension of Gradient method. On top of the original ReLU operation in the
network to be explained, Guided-Backpropagation introduces another ReLU operation to filter out the negative
gradients during backpropagation.
Note:
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
operations. To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with
`mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
correct.
Args:
network (Cell): The black-box model to be explained.
Inputs:
- **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
- **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer.
If it is a 1D tensor, its length should be the same as `inputs`.
Outputs:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore.explainer.explanation import GuidedBackprop
>>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
>>> net = LeNet5(10, num_channel=3)
>>> gbp = GuidedBackprop(net)
>>> # feed data and the target label to be explained and get the saliency map
>>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32)
>>> label = 5
>>> saliency = gbp(inputs, label)
>>> print(saliency.shape)
(1, 1, 32, 32)
"""
def __init__(self, network):
super(GuidedBackprop, self).__init__(network, use_relu_backprop=False)
| 41.220859
| 119
| 0.659622
|
9130200ba462c9fe12efb79390abe5da8549a5e8
| 7,369
|
py
|
Python
|
package/scripts/common.py
|
lijufeng2016/ambari-hue-service
|
a9c30cb9a78c59a94ec2a2d667c7597c75c3aad7
|
[
"Apache-2.0"
] | 17
|
2020-03-10T06:57:20.000Z
|
2021-11-27T03:22:04.000Z
|
package/scripts/common.py
|
tokings/ambari-hue-service
|
a9c30cb9a78c59a94ec2a2d667c7597c75c3aad7
|
[
"Apache-2.0"
] | null | null | null |
package/scripts/common.py
|
tokings/ambari-hue-service
|
a9c30cb9a78c59a94ec2a2d667c7597c75c3aad7
|
[
"Apache-2.0"
] | 17
|
2020-04-12T02:17:10.000Z
|
2021-11-27T03:22:05.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, pwd, grp, signal, time
from resource_management import *
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, Directory, File
from resource_management.core.shell import call
from resource_management.core.system import System
from resource_management.libraries.functions.default import default
def setup_user():
"""
Creates Hue user home directory and sets up the correct ownership.
"""
__create_hue_user()
__set_home_dir_ownership()
def __create_hue_user():
import params
try:
grp.getgrnam(params.hue_group)
except KeyError:
Logger.info(format("Creating group '{params.hue_group}' for Hue Service"))
Group(
group_name = params.hue_group,
ignore_failures = True
)
try:
pwd.getpwnam(params.hue_user)
except KeyError:
Logger.info(format("Creating user '{params.hue_user}' for Hue Service"))
User(
username = params.hue_user,
groups = [params.hue_group],
ignore_failures = True
)
def __set_home_dir_ownership():
import params
"""
Updates the Hue user home directory to be owned by hue:hue.
"""
if not os.path.exists("/home/{0}".format(params.hue_user)):
Directory(params.hue_local_home_dir,
mode=0700,
cd_access='a',
owner=params.hue_user,
group=params.hue_group,
create_parents=True
)
def download_hue():
import params
"""
Download Hue to the installation directory
"""
Execute('wget -O hue.tgz {0}'.format(params.download_url))
Execute('tar -zxvf hue.tgz -C {0} && rm -f hue.tgz'.format(params.hue_install_dir))
# Ensure all Hue files owned by hue
Execute('mv {0}-* {0}'.format(params.hue_dir))
Execute('chown -R {0}:{1} {2}'.format(params.hue_user,params.hue_group,params.hue_dir))
Execute('ln -s {0} /usr/hdp/current/hue-server'.format(params.hue_dir))
# Execute('make {0}/apps'.format(params.hue_dir))
Logger.info("Hue Service is installed")
def add_hdfs_configuration(if_ranger=False, security_enabled=False):
import params
services_configurations = {}
services_configurations['core-site'] = {}
services_configurations['core-site']['hadoop.proxyuser.hue.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hue.hosts'] = '*'
services_configurations['hdfs-site'] = {}
services_configurations['hdfs-site']['dfs.namenode.acls.enabled'] = 'true'
if params.hue_hbase_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.hbase.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hbase.hosts'] = '*'
if params.hue_hive_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.hive.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hive.hosts'] = '*'
if params.hue_spark_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.spark.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.spark.hosts'] = '*'
if params.hue_oozie_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.oozie.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.oozie.hosts'] = '*'
if params.dfs_ha_enabled:
services_configurations['core-site']['hadoop.proxyuser.httpfs.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.httpfs.hosts'] = '*'
#services_configurations['httpfs-site'] = {}
#services_configurations['httpfs-site']['httpfs.proxyuser.hue.groups'] = '*'
#services_configurations['httpfs-site']['httpfs.proxyuser.hue.hosts'] = '*'
if security_enabled:
services_configurations['core-site']['hadoop.proxyuser.HTTP.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.HTTP.hosts'] = '*'
services_configurations['core-site']['hue.kerberos.principal.shortname'] = 'hue'
add_configurations(services_configurations)
def add_hbase_configuration(if_ranger=False, security_enabled=False):
import params
services_configurations = {}
services_configurations['hbase-site'] = {}
if if_ranger:
services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
if security_enabled:
services_configurations['hbase-site']['hbase.thrift.security.qop'] = 'auth'
services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
services_configurations['hbase-site']['hbase.thrift.kerberos.principal'] = params.HTTP_principal
services_configurations['hbase-site']['hbase.thrift.keytab.file'] = params.HTTP_keytab
services_configurations['hbase-site']['hbase.rpc.engine'] = 'org.apache.hadoop.hbase.ipc.SecureRpcEngine'
add_configurations(services_configurations)
def add_hive_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['hive-site'] = {}
services_configurations['hive-site']['hive.security.authorization.sqlstd.confwhitelist.append'] = 'hive.server2.logging.operation.verbose'
# services_configurations['webhcat-site'] = {}
# services_configurations['webhcat-site']['webhcat.proxyuser.hue.groups'] = '*'
# services_configurations['webhcat-site']['webhcat.proxyuser.hue.hosts'] = '*'
if if_ranger:
services_configurations['hive-site']['hive.server2.enable.impersonation'] = 'true'
add_configurations(services_configurations)
def add_oozie_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['oozie-site'] = {}
services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.groups'] = '*'
services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.hosts'] = '*'
add_configurations(services_configurations)
def add_spark_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['livy-conf'] = {}
services_configurations['livy-conf']['livy.server.csrf_protection.enabled'] = 'false'
add_configurations(services_configurations)
def add_configurations(services_configurations):
"""
Run the script file to add configurations
#/var/lib/ambari-server/resources/scripts/configs.sh set ambari-server-host \
cluster_name core-site "hadoop.proxyuser.hbase.hosts" "*"
services_configurations:{'configuration file1':{'key1':'value1','key2':'value2',...},
'configuration file2':{'key1':'value1','key2':'value2',...}
...}
"""
import params
if isinstance(services_configurations, dict):
for i in range(len(services_configurations)):
key1 = services_configurations.keys()[i]
value1 = services_configurations[key1]
if isinstance(value1, dict):
for j in range(len(value1)):
key2 = value1.keys()[j]
value2 = value1[key2]
cmd = format(params.service_packagedir + "/files/configs.sh set " + params.ambari_server_hostname + " " + params.cluster_name + " " + key1 + " '" + key2 + "' '"+ value2 + "'")
Execute(cmd)
| 45.770186
| 185
| 0.7214
|
ee5226d75b57931207b0a079895f1d3650ab1949
| 3,529
|
py
|
Python
|
test/data_processing/test_find_best_worst_lists.py
|
0xProject/p2p_incentives
|
ce69926eb3d003fb2767651df9486556c0e20ab6
|
[
"Apache-2.0"
] | 3
|
2020-03-11T19:42:48.000Z
|
2021-04-01T21:09:05.000Z
|
test/data_processing/test_find_best_worst_lists.py
|
0xProject/p2p_incentives
|
ce69926eb3d003fb2767651df9486556c0e20ab6
|
[
"Apache-2.0"
] | null | null | null |
test/data_processing/test_find_best_worst_lists.py
|
0xProject/p2p_incentives
|
ce69926eb3d003fb2767651df9486556c0e20ab6
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains unit tests of find_best_worst_lists().
"""
from typing import List, Tuple
import pytest
from data_processing import find_best_worst_lists
from data_types import BestAndWorstLists, InvalidInputError, SpreadingRatio
from .__init__ import RATIO_LIST
# test normal cases
CASES_BEST_WORST_LISTS: List[Tuple[List[SpreadingRatio], BestAndWorstLists]] = [
# tuples: (input, output)
(
[RATIO_LIST[0], RATIO_LIST[1], RATIO_LIST[2]],
BestAndWorstLists(best=RATIO_LIST[2], worst=RATIO_LIST[0]),
),
(
[RATIO_LIST[0], RATIO_LIST[1], RATIO_LIST[2], RATIO_LIST[3]],
BestAndWorstLists(best=RATIO_LIST[2], worst=RATIO_LIST[0]),
),
(
[RATIO_LIST[3], RATIO_LIST[4], RATIO_LIST[5]],
BestAndWorstLists(best=RATIO_LIST[4], worst=RATIO_LIST[4]),
),
(
[RATIO_LIST[4], RATIO_LIST[5], RATIO_LIST[6]],
BestAndWorstLists(best=RATIO_LIST[6], worst=RATIO_LIST[4]),
),
(
[RATIO_LIST[5], RATIO_LIST[7], RATIO_LIST[8]],
BestAndWorstLists(best=RATIO_LIST[5], worst=RATIO_LIST[8]),
),
(
[RATIO_LIST[3], RATIO_LIST[9], RATIO_LIST[10], RATIO_LIST[11]],
BestAndWorstLists(best=RATIO_LIST[10], worst=RATIO_LIST[11]),
),
(
[
RATIO_LIST[0],
RATIO_LIST[1],
RATIO_LIST[2],
RATIO_LIST[3],
RATIO_LIST[4],
RATIO_LIST[5],
RATIO_LIST[6],
RATIO_LIST[7],
RATIO_LIST[8],
RATIO_LIST[9],
RATIO_LIST[10],
],
BestAndWorstLists(best=RATIO_LIST[6], worst=RATIO_LIST[0]),
),
]
@pytest.mark.parametrize("ratio_list, expected_output", CASES_BEST_WORST_LISTS)
def test_find_best_worst_lists__normal(
ratio_list: List[SpreadingRatio], expected_output: BestAndWorstLists
) -> None:
"""
This function tests find_best_worst_lists in normal cases
:param ratio_list: list of SpreadingRatio instances
:param expected_output: an instance of BestAndWorstLists
:return: None
"""
actual_output: BestAndWorstLists = find_best_worst_lists(ratio_list)
for idx in range(2):
assert len(expected_output[idx]) == len(actual_output[idx])
for value_idx in range(len(expected_output)):
if isinstance(expected_output[idx][value_idx], float):
assert actual_output[idx][value_idx] == pytest.approx(
expected_output[idx][value_idx]
)
else: # this is a None
assert expected_output[idx][value_idx] is actual_output[idx][value_idx]
# test exceptions
def test_find_best_worst_lists__all_none() -> None:
"""
This function tests find_best_worst_lists when every element is None.
:return: None
"""
with pytest.raises(ValueError, match="All entries are None."):
find_best_worst_lists([RATIO_LIST[3], RATIO_LIST[12], RATIO_LIST[13]])
def test_find_best_worst_lists__empty_input() -> None:
"""
This function tests find_best_worst_lists when the input is empty.
:return: None
"""
with pytest.raises(InvalidInputError):
find_best_worst_lists([])
def test_find_best_worst_lists__different_length() -> None:
"""
This function tests find_best_worst_lists when the input length varies.
:return: None
"""
with pytest.raises(ValueError, match="Input lists are of different length."):
find_best_worst_lists([RATIO_LIST[0], RATIO_LIST[1], RATIO_LIST[14]])
| 32.376147
| 87
| 0.656277
|
3df1dc2d84ffc08534cc0b90bdd5c95b22ff164e
| 13,482
|
py
|
Python
|
env/env/lib/python3.6/site-packages/dominate/dom_tag.py
|
Aimee-pacy/NEWS
|
2b8afa2af5e5fe5ce5e91c43bcbb7c518606a43f
|
[
"Unlicense"
] | null | null | null |
env/env/lib/python3.6/site-packages/dominate/dom_tag.py
|
Aimee-pacy/NEWS
|
2b8afa2af5e5fe5ce5e91c43bcbb7c518606a43f
|
[
"Unlicense"
] | null | null | null |
env/env/lib/python3.6/site-packages/dominate/dom_tag.py
|
Aimee-pacy/NEWS
|
2b8afa2af5e5fe5ce5e91c43bcbb7c518606a43f
|
[
"Unlicense"
] | null | null | null |
__license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
# pylint: disable=bad-indentation, bad-whitespace, missing-docstring
import copy
import numbers
from collections import defaultdict, namedtuple
from functools import wraps
import threading
try:
# Python 3
from collections.abc import Callable
except ImportError:
# Python 2.7
from collections import Callable
try:
basestring = basestring
except NameError: # py3
basestring = str
unicode = str
try:
import greenlet
except ImportError:
greenlet = None
def _get_thread_context():
context = [threading.current_thread()]
if greenlet:
context.append(greenlet.getcurrent())
return hash(tuple(context))
class dom_tag(object):
is_single = False # Tag does not require matching end tag (ex. <hr/>)
is_pretty = True # Text inside the tag should be left as-is (ex. <pre>)
# otherwise, text will be escaped() and whitespace may be
# modified
is_inline = False
def __new__(_cls, *args, **kwargs):
'''
Check if bare tag is being used a a decorator
(called with a single function arg).
decorate the function and return
'''
if len(args) == 1 and isinstance(args[0], Callable) \
and not isinstance(args[0], dom_tag) and not kwargs:
wrapped = args[0]
@wraps(wrapped)
def f(*args, **kwargs):
with _cls() as _tag:
return wrapped(*args, **kwargs) or _tag
return f
return object.__new__(_cls)
def __init__(self, *args, **kwargs):
'''
Creates a new tag. Child tags should be passed as arguments and attributes
should be passed as keyword arguments.
There is a non-rendering attribute which controls how the tag renders:
* `__inline` - Boolean value. If True renders all children tags on the same
line.
'''
self.attributes = {}
self.children = []
self.parent = None
self.document = None
# Does not insert newlines on all children if True (recursive attribute)
self.is_inline = kwargs.pop('__inline', self.is_inline)
self.is_pretty = kwargs.pop('__pretty', self.is_pretty)
#Add child elements
if args:
self.add(*args)
for attr, value in kwargs.items():
self.set_attribute(*type(self).clean_pair(attr, value))
self._ctx = None
self._add_to_ctx()
# context manager
frame = namedtuple('frame', ['tag', 'items', 'used'])
# stack of frames
_with_contexts = defaultdict(list)
def _add_to_ctx(self):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
self._ctx = stack[-1]
stack[-1].items.append(self)
def __enter__(self):
stack = dom_tag._with_contexts[_get_thread_context()]
stack.append(dom_tag.frame(self, [], set()))
return self
def __exit__(self, type, value, traceback):
thread_id = _get_thread_context()
stack = dom_tag._with_contexts[thread_id]
frame = stack.pop()
for item in frame.items:
if item in frame.used: continue
self.add(item)
if not stack:
del dom_tag._with_contexts[thread_id]
def __call__(self, func):
'''
tag instance is being used as a decorator.
wrap func to make a copy of this tag
'''
# remove decorator from its context so it doesn't
# get added in where it was defined
if self._ctx:
self._ctx.used.add(self)
@wraps(func)
def f(*args, **kwargs):
tag = copy.deepcopy(self)
tag._add_to_ctx()
with tag:
return func(*args, **kwargs) or tag
return f
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.')
__setitem__ = set_attribute
def delete_attribute(self, key):
if isinstance(key, int):
del self.children[key:key+1]
else:
del self.attributes[key]
__delitem__ = delete_attribute
def setdocument(self, doc):
'''
Creates a reference to the parent document to allow for partial-tree
validation.
'''
# assume that a document is correct in the subtree
if self.document != doc:
self.document = doc
for i in self.children:
if not isinstance(i, dom_tag): return
i.setdocument(doc)
def add(self, *args):
'''
Add new child tags.
'''
for obj in args:
if isinstance(obj, numbers.Number):
# Convert to string so we fall into next if block
obj = str(obj)
if isinstance(obj, basestring):
obj = escape(obj)
self.children.append(obj)
elif isinstance(obj, dom_tag):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
stack[-1].used.add(obj)
self.children.append(obj)
obj.parent = self
obj.setdocument(self.document)
elif isinstance(obj, dict):
for attr, value in obj.items():
self.set_attribute(*dom_tag.clean_pair(attr, value))
elif hasattr(obj, '__iter__'):
for subobj in obj:
self.add(subobj)
else: # wtf is it?
raise ValueError('%r not a tag or string.' % obj)
if len(args) == 1:
return args[0]
return args
def add_raw_string(self, s):
self.children.append(s)
def remove(self, obj):
self.children.remove(obj)
def clear(self):
for i in self.children:
if isinstance(i, dom_tag) and i.parent is self:
i.parent = None
self.children = []
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results
def __getitem__(self, key):
'''
Returns the stored value of the specified attribute or child
(if it exists).
'''
if isinstance(key, int):
# Children are accessed using integers
try:
return object.__getattribute__(self, 'children')[key]
except KeyError:
raise IndexError('Child with index "%s" does not exist.' % key)
elif isinstance(key, basestring):
# Attributes are accessed using strings
try:
return object.__getattribute__(self, 'attributes')[key]
except KeyError:
raise AttributeError('Attribute "%s" does not exist.' % key)
else:
raise TypeError('Only integer and string types are valid for accessing '
'child tags and attributes, respectively.')
__getattr__ = __getitem__
def __len__(self):
'''
Number of child elements.
'''
return len(self.children)
def __bool__(self):
'''
Hack for "if x" and __len__
'''
return True
__nonzero__ = __bool__
def __iter__(self):
'''
Iterates over child elements.
'''
return self.children.__iter__()
def __contains__(self, item):
'''
Checks recursively if item is in children tree.
Accepts both a string and a class.
'''
return bool(self.get(item))
def __iadd__(self, obj):
'''
Reflexive binary addition simply adds tag as a child.
'''
self.add(obj)
return self
# String and unicode representations are the same as render()
def __unicode__(self):
return self.render()
__str__ = __unicode__
def render(self, indent=' ', pretty=True, xhtml=False):
data = self._render([], 0, indent, pretty, xhtml)
return u''.join(data)
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
pretty = pretty and self.is_pretty
name = getattr(self, 'tagname', type(self).__name__)
# Workaround for python keywords and standard classes/methods
# (del, object, input)
if name[-1] == '_':
name = name[:-1]
# open tag
sb.append('<')
sb.append(name)
for attribute, value in sorted(self.attributes.items()):
if value is not False: # False values must be omitted completely
sb.append(' %s="%s"' % (attribute, escape(unicode(value), True)))
sb.append(' />' if self.is_single and xhtml else '>')
if not self.is_single:
inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * indent_level)
# close tag
sb.append('</')
sb.append(name)
sb.append('>')
return sb
def _render_children(self, sb, indent_level, indent_str, pretty, xhtml):
inline = True
for child in self.children:
if isinstance(child, dom_tag):
if pretty and not child.is_inline:
inline = False
sb.append('\n')
sb.append(indent_str * indent_level)
child._render(sb, indent_level, indent_str, pretty, xhtml)
else:
sb.append(unicode(child))
return inline
def __repr__(self):
name = '%s.%s' % (self.__module__, type(self).__name__)
attributes_len = len(self.attributes)
attributes = '%s attribute' % attributes_len
if attributes_len != 1: attributes += 's'
children_len = len(self.children)
children = '%s child' % children_len
if children_len != 1: children += 'ren'
return '<%s at %x: %s, %s>' % (name, id(self), attributes, children)
@staticmethod
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')])
if attribute in set(['http_equiv']) or special_prefix:
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute
@classmethod
def clean_pair(cls, attribute, value):
'''
This will call `clean_attribute` on the attribute and also allows for the
creation of boolean attributes.
Ex. input(selected=True) is equivalent to input(selected="selected")
'''
attribute = cls.clean_attribute(attribute)
# Check for boolean attributes
# (i.e. selected=True becomes selected="selected")
if value is True:
value = attribute
# Ignore `if value is False`: this is filtered out in render()
return (attribute, value)
_get_current_none = object()
def get_current(default=_get_current_none):
'''
get the current tag being used as a with context or decorated function.
if no context is active, raises ValueError, or returns the default, if provided
'''
h = _get_thread_context()
ctx = dom_tag._with_contexts.get(h, None)
if ctx:
return ctx[-1].tag
if default is _get_current_none:
raise ValueError('no current context')
return default
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
c = get_current()
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
c.set_attribute(*dom_tag.clean_pair(attr, value))
# escape() is used in render
from .util import escape
| 27.683778
| 86
| 0.624017
|
0db1e7c2da388460c11e0d76ac177ae888d444b7
| 445
|
py
|
Python
|
Unlock Pattern/unlock.py
|
rajitbanerjee/kattis
|
3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad
|
[
"CC0-1.0"
] | 2
|
2021-04-13T12:56:30.000Z
|
2022-03-21T16:46:58.000Z
|
Unlock Pattern/unlock.py
|
rajitbanerjee/kattis
|
3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad
|
[
"CC0-1.0"
] | null | null | null |
Unlock Pattern/unlock.py
|
rajitbanerjee/kattis
|
3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad
|
[
"CC0-1.0"
] | 1
|
2021-11-14T14:06:46.000Z
|
2021-11-14T14:06:46.000Z
|
"""https://open.kattis.com/problems/unlockpattern"""
matrix = [[0] * 3] * 3
points = {}
# compute the distance between two points
def dist(P, Q):
x1, y1 = P
x2, y2 = Q
return ((x1 - x2)**2 + (y1 - y2)**2)**0.5
for i in range(3):
matrix[i] = list(map(int, input().split()))
for j in range(3):
points[matrix[i][j]] = (i, j)
total = 0
for i in range(1, 9):
total += dist(points[i], points[i + 1])
print(total)
| 19.347826
| 52
| 0.550562
|
e4620a45e117fc47b18e1d718fcd7a1a9e0a0dbc
| 2,048
|
py
|
Python
|
backend/products/models.py
|
crowdbotics-apps/royal-cloud-33498
|
4ac1701e6af7db4b3b7393b73fc0af80fc313b3b
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/products/models.py
|
crowdbotics-apps/royal-cloud-33498
|
4ac1701e6af7db4b3b7393b73fc0af80fc313b3b
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/products/models.py
|
crowdbotics-apps/royal-cloud-33498
|
4ac1701e6af7db4b3b7393b73fc0af80fc313b3b
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import models
from home.constants import PRODUCT_TYPE, SIZE_VARIANCE
from home.models import UUIDModel
from django.contrib.postgres.fields import ArrayField, JSONField
class Brand(UUIDModel):
"""
A data representation of the brands used by the Admin
"""
name = models.CharField(max_length=150)
class Category(UUIDModel):
"""
A data representation of the various product categories
"""
name = models.CharField(max_length=150)
class Meta:
verbose_name_plural = "Categories"
class Product(UUIDModel):
"""
A data represention of the products available for sale
"""
brand = models.ForeignKey(Brand,
on_delete=models.SET_NULL,
related_name='products',
blank=True,
null=True)
category = models.ForeignKey(Category,
on_delete=models.SET_NULL,
related_name='products',
blank=True,
null=True)
per_pack_price = models.DecimalField(max_digits=7, decimal_places=2)
per_item_price = models.DecimalField(max_digits=7, decimal_places=2)
styles = ArrayField(models.CharField(max_length=32, blank=True), null=True, blank=True)
upload_date = models.DateField(auto_now_add=True)
half_pack_available = models.BooleanField(default=False)
half_pack_orders = JSONField(blank=True, default=dict)
half_pack_styles = ArrayField(models.CharField(max_length=32, blank=True), null=True, blank=True)
size_variance = models.CharField(choices=SIZE_VARIANCE, max_length=32, default='2S 2M 2L')
type = models.CharField(choices=PRODUCT_TYPE, max_length=32)
class Photo(UUIDModel):
"""
A data representation of the multiple photos in a product
"""
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='photos')
image = models.ImageField(upload_to='products/images')
| 37.236364
| 101
| 0.654785
|
163c253cb88d76dac1eba0789e91effdfa4fc8ce
| 2,443
|
py
|
Python
|
tests/parsers/sqlite_plugins/mac_notificationcenter.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
tests/parsers/sqlite_plugins/mac_notificationcenter.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
tests/parsers/sqlite_plugins/mac_notificationcenter.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MacOS Notification Center plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import mac_notificationcenter
from tests.parsers.sqlite_plugins import test_lib
class MacNotificationCenterTest(test_lib.SQLitePluginTestCase):
"""Tests for the MacOS Notification Center plugin."""
def testProcess(self):
"""Tests the Process function on a MacOS Notification Center db."""
plugin = mac_notificationcenter.MacNotificationCenterPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['mac_notificationcenter.db'], plugin)
self.assertEqual(storage_writer.number_of_events, 6)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# TODO: look into rounding differences between date_time and timestamp
expected_event_values = {
'body': 'KeePassXC can now be run',
'bundle_name': 'com.google.santagui',
'data_type': 'mac:notificationcenter:db',
'date_time': '2018-05-02 10:59:18.930155',
'presented': 1,
'timestamp': '2018-05-02 10:59:18.930156',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'title': 'Santa'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'body': 'Drive File Stream is loading your files…',
'bundle_name': 'com.google.drivefs',
'data_type': 'mac:notificationcenter:db',
'date_time': '2018-05-02 11:13:21.531085',
'presented': 1,
'timestamp': '2018-05-02 11:13:21.531085',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'title': 'Drive File Stream'}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
expected_event_values = {
'body': 'PyCharm can now be run',
'bundle_name': 'com.google.santagui',
'data_type': 'mac:notificationcenter:db',
'date_time': '2018-05-16 16:38:04.686079',
'presented': 1,
'timestamp': '2018-05-16 16:38:04.686080',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'title': 'Santa'}
self.CheckEventValues(storage_writer, events[5], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 35.405797
| 75
| 0.695047
|
3d3703414d7d286fc159fb1ec34e80da6f68528e
| 43,981
|
py
|
Python
|
p3/reinforcement/reinforcementTestClasses.py
|
patrickmcgrory/cs188
|
e5c5995ad187e8edfaf5446dd40e84497461ae90
|
[
"BSD-3-Clause"
] | 22
|
2016-03-31T23:04:51.000Z
|
2021-11-06T08:45:56.000Z
|
p3/reinforcement/reinforcementTestClasses.py
|
naderm/cs188
|
e5c5995ad187e8edfaf5446dd40e84497461ae90
|
[
"BSD-3-Clause"
] | null | null | null |
p3/reinforcement/reinforcementTestClasses.py
|
naderm/cs188
|
e5c5995ad187e8edfaf5446dd40e84497461ae90
|
[
"BSD-3-Clause"
] | 39
|
2015-04-12T12:07:06.000Z
|
2021-12-01T21:55:04.000Z
|
# reinforcementTestClasses.py
# ---------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import testClasses
import random, math, traceback, sys, os
import layout, textDisplay, pacman, gridworld
import time
from util import Counter, TimeoutFunction, FixedRandom
from collections import defaultdict
from pprint import PrettyPrinter
from hashlib import sha1
pp = PrettyPrinter()
VERBOSE = False
import gridworld
LIVINGREWARD = -0.1
NOISE = 0.2
class ValueIterationTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(ValueIterationTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
iterations = int(testDict['valueIterations'])
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
maxPreIterations = 10
self.numsIterationsForDisplay = range(min(iterations, maxPreIterations))
self.testOutFile = testDict['test_out_file']
if maxPreIterations < iterations:
self.numsIterationsForDisplay.append(iterations)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsIterationsForDisplay:
checkPolicy = (n == self.numsIterationsForDisplay[-1])
testPass, stdOutString, fileOutString = self.executeNIterations(grades, moduleDict, solutionDict, n, checkPolicy)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNIterations(self, grades, moduleDict, solutionDict, n, checkPolicy):
testPass = True
valuesPretty, qValuesPretty, actions, policyPretty = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = ''
valuesKey = "values_k_%d" % n
if self.comparePrettyValues(valuesPretty, solutionDict[valuesKey]):
fileOutString += "Values at iteration %d are correct.\n" % n
fileOutString += " Student/correct solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, valuesPretty)
else:
testPass = False
outString = "Values at iteration %d are NOT correct.\n" % n
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, valuesPretty)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, solutionDict[valuesKey])
stdOutString += outString
fileOutString += outString
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action %s are correct.\n" % (n, action)
fileOutString += " Student/correct solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action %s are NOT correct.\n" % (n, action)
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
if checkPolicy:
if not self.comparePrettyValues(policyPretty, solutionDict['policy']):
testPass = False
outString = "Policy is NOT correct.\n"
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString('policy', policyPretty)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString('policy', solutionDict['policy'])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
policyPretty = ''
actions = []
for n in self.numsIterationsForDisplay:
valuesPretty, qValuesPretty, actions, policyPretty = self.runAgent(moduleDict, n)
handle.write(self.prettyValueSolutionString('values_k_%d' % n, valuesPretty))
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
handle.write(self.prettyValueSolutionString('policy', policyPretty))
handle.write(self.prettyValueSolutionString('actions', '\n'.join(actions) + '\n'))
return True
def runAgent(self, moduleDict, numIterations):
agent = moduleDict['valueIterationAgents'].ValueIterationAgent(self.grid, discount=self.discount, iterations=numIterations)
states = self.grid.getStates()
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.getValue(state)
policy[state] = agent.computeActionFromValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.computeQValueFromValues(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class ApproximateQLearningTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(ApproximateQLearningTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.extractor = 'IdentityExtractor'
if 'extractor' in testDict:
self.extractor = testDict['extractor']
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
numExperiences = int(testDict['numExperiences'])
maxPreExperiences = 10
self.numsExperiencesForDisplay = range(min(numExperiences, maxPreExperiences))
self.testOutFile = testDict['test_out_file']
if maxPreExperiences < numExperiences:
self.numsExperiencesForDisplay.append(numExperiences)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsExperiencesForDisplay:
testPass, stdOutString, fileOutString = self.executeNExperiences(grades, moduleDict, solutionDict, n)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNExperiences(self, grades, moduleDict, solutionDict, n):
testPass = True
qValuesPretty, weights, actions, lastExperience = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = "==================== Iteration %d ====================\n" % n
if lastExperience is not None:
fileOutString += "Agent observed the transition (startState = %s, action = %s, endState = %s, reward = %f)\n\n" % lastExperience
weightsKey = 'weights_k_%d' % n
if weights == eval(solutionDict[weightsKey]):
fileOutString += "Weights at iteration %d are correct." % n
fileOutString += " Student/correct solution:\n\n%s\n\n" % pp.pformat(weights)
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action '%s' are correct." % (n, action)
fileOutString += " Student/correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action '%s' are NOT correct." % (n, action)
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
for n in self.numsExperiencesForDisplay:
qValuesPretty, weights, actions, _ = self.runAgent(moduleDict, n)
handle.write(self.prettyValueSolutionString('weights_k_%d' % n, pp.pformat(weights)))
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
return True
def runAgent(self, moduleDict, numExperiences):
agent = moduleDict['qlearningAgents'].ApproximateQAgent(extractor=self.extractor, **self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
lastExperience = None
for i in range(numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
lastExperience = (startState, action, endState, reward)
agent.update(*lastExperience)
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
qValues = {}
weights = agent.getWeights()
for state in states:
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.getQValue(state, action)
else:
qValues[action][state] = None
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (qValuesPretty, weights, actions, lastExperience)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class QLearningTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(QLearningTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
numExperiences = int(testDict['numExperiences'])
maxPreExperiences = 10
self.numsExperiencesForDisplay = range(min(numExperiences, maxPreExperiences))
self.testOutFile = testDict['test_out_file']
if maxPreExperiences < numExperiences:
self.numsExperiencesForDisplay.append(numExperiences)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsExperiencesForDisplay:
checkValuesAndPolicy = (n == self.numsExperiencesForDisplay[-1])
testPass, stdOutString, fileOutString = self.executeNExperiences(grades, moduleDict, solutionDict, n, checkValuesAndPolicy)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNExperiences(self, grades, moduleDict, solutionDict, n, checkValuesAndPolicy):
testPass = True
valuesPretty, qValuesPretty, actions, policyPretty, lastExperience = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = "==================== Iteration %d ====================\n" % n
if lastExperience is not None:
fileOutString += "Agent observed the transition (startState = %s, action = %s, endState = %s, reward = %f)\n\n\n" % lastExperience
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action '%s' are correct." % (n, action)
fileOutString += " Student/correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action '%s' are NOT correct." % (n, action)
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
if checkValuesAndPolicy:
if not self.comparePrettyValues(valuesPretty, solutionDict['values']):
testPass = False
outString = "Values are NOT correct."
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString('values', valuesPretty)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString('values', solutionDict['values'])
stdOutString += outString
fileOutString += outString
if not self.comparePrettyValues(policyPretty, solutionDict['policy']):
testPass = False
outString = "Policy is NOT correct."
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString('policy', policyPretty)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString('policy', solutionDict['policy'])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
valuesPretty = ''
policyPretty = ''
for n in self.numsExperiencesForDisplay:
valuesPretty, qValuesPretty, actions, policyPretty, _ = self.runAgent(moduleDict, n)
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
handle.write(self.prettyValueSolutionString('values', valuesPretty))
handle.write(self.prettyValueSolutionString('policy', policyPretty))
return True
def runAgent(self, moduleDict, numExperiences):
agent = moduleDict['qlearningAgents'].QLearningAgent(**self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
lastExperience = None
for i in range(numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
lastExperience = (startState, action, endState, reward)
agent.update(*lastExperience)
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.computeValueFromQValues(state)
policy[state] = agent.computeActionFromQValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.getQValue(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty, lastExperience)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class EpsilonGreedyTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EpsilonGreedyTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.numExperiences = int(testDict['numExperiences'])
self.numIterations = int(testDict['iterations'])
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
def execute(self, grades, moduleDict, solutionDict):
if self.testEpsilonGreedy(moduleDict):
return self.testPass(grades)
else:
return self.testFail(grades)
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
def runAgent(self, moduleDict):
agent = moduleDict['qlearningAgents'].QLearningAgent(**self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
for i in range(self.numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
agent.update(startState, action, endState, reward)
return agent
def testEpsilonGreedy(self, moduleDict, tolerance=0.025):
agent = self.runAgent(moduleDict)
for state in self.grid.getStates():
numLegalActions = len(agent.getLegalActions(state))
if numLegalActions <= 1:
continue
numGreedyChoices = 0
optimalAction = agent.computeActionFromQValues(state)
for iteration in range(self.numIterations):
# assume that their computeActionFromQValues implementation is correct (q4 tests this)
if agent.getAction(state) == optimalAction:
numGreedyChoices += 1
# e = epsilon, g = # greedy actions, n = numIterations, k = numLegalActions
# g = n * [(1-e) + e/k] -> e = (n - g) / (n - n/k)
empiricalEpsilonNumerator = self.numIterations - numGreedyChoices
empiricalEpsilonDenominator = self.numIterations - self.numIterations / float(numLegalActions)
empiricalEpsilon = empiricalEpsilonNumerator / empiricalEpsilonDenominator
error = abs(empiricalEpsilon - self.epsilon)
if error > tolerance:
self.addMessage("Epsilon-greedy action selection is not correct.")
self.addMessage("Actual epsilon = %f; student empirical epsilon = %f; error = %f > tolerance = %f" % (self.epsilon, empiricalEpsilon, error, tolerance))
return False
return True
### q6
class Question6Test(testClasses.TestCase):
def __init__(self, question, testDict):
super(Question6Test, self).__init__(question, testDict)
def execute(self, grades, moduleDict, solutionDict):
studentSolution = moduleDict['analysis'].question6()
studentSolution = str(studentSolution).strip().lower()
hashedSolution = sha1(studentSolution).hexdigest()
if hashedSolution == '46729c96bb1e4081fdc81a8ff74b3e5db8fba415':
return self.testPass(grades)
else:
self.addMessage("Solution is not correct.")
self.addMessage(" Student solution: %s" % (studentSolution,))
return self.testFail(grades)
def writeSolution(self, moduleDict, filePath):
handle = open(filePath, 'w')
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
handle.close()
return True
### q7/q8
### =====
## Average wins of a pacman agent
class EvalAgentTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EvalAgentTest, self).__init__(question, testDict)
self.pacmanParams = testDict['pacmanParams']
self.scoreMinimum = int(testDict['scoreMinimum']) if 'scoreMinimum' in testDict else None
self.nonTimeoutMinimum = int(testDict['nonTimeoutMinimum']) if 'nonTimeoutMinimum' in testDict else None
self.winsMinimum = int(testDict['winsMinimum']) if 'winsMinimum' in testDict else None
self.scoreThresholds = [int(s) for s in testDict.get('scoreThresholds','').split()]
self.nonTimeoutThresholds = [int(s) for s in testDict.get('nonTimeoutThresholds','').split()]
self.winsThresholds = [int(s) for s in testDict.get('winsThresholds','').split()]
self.maxPoints = sum([len(t) for t in [self.scoreThresholds, self.nonTimeoutThresholds, self.winsThresholds]])
def execute(self, grades, moduleDict, solutionDict):
self.addMessage('Grading agent using command: python pacman.py %s'% (self.pacmanParams,))
startTime = time.time()
games = pacman.runGames(** pacman.readCommand(self.pacmanParams.split(' ')))
totalTime = time.time() - startTime
numGames = len(games)
stats = {'time': totalTime, 'wins': [g.state.isWin() for g in games].count(True),
'games': games, 'scores': [g.state.getScore() for g in games],
'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)}
averageScore = sum(stats['scores']) / float(len(stats['scores']))
nonTimeouts = numGames - stats['timeouts']
wins = stats['wins']
def gradeThreshold(value, minimum, thresholds, name):
points = 0
passed = (minimum == None) or (value >= minimum)
if passed:
for t in thresholds:
if value >= t:
points += 1
return (passed, points, value, minimum, thresholds, name)
results = [gradeThreshold(averageScore, self.scoreMinimum, self.scoreThresholds, "average score"),
gradeThreshold(nonTimeouts, self.nonTimeoutMinimum, self.nonTimeoutThresholds, "games not timed out"),
gradeThreshold(wins, self.winsMinimum, self.winsThresholds, "wins")]
totalPoints = 0
for passed, points, value, minimum, thresholds, name in results:
if minimum == None and len(thresholds)==0:
continue
# print passed, points, value, minimum, thresholds, name
totalPoints += points
if not passed:
assert points == 0
self.addMessage("%s %s (fail: below minimum value %s)" % (value, name, minimum))
else:
self.addMessage("%s %s (%s of %s points)" % (value, name, points, len(thresholds)))
if minimum != None:
self.addMessage(" Grading scheme:")
self.addMessage(" < %s: fail" % (minimum,))
if len(thresholds)==0 or minimum != thresholds[0]:
self.addMessage(" >= %s: 0 points" % (minimum,))
for idx, threshold in enumerate(thresholds):
self.addMessage(" >= %s: %s points" % (threshold, idx+1))
elif len(thresholds) > 0:
self.addMessage(" Grading scheme:")
self.addMessage(" < %s: 0 points" % (thresholds[0],))
for idx, threshold in enumerate(thresholds):
self.addMessage(" >= %s: %s points" % (threshold, idx+1))
if any([not passed for passed, _, _, _, _, _ in results]):
totalPoints = 0
return self.testPartial(grades, totalPoints, self.maxPoints)
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
### q2/q3
### =====
## For each parameter setting, compute the optimal policy, see if it satisfies some properties
def followPath(policy, start, numSteps=100):
state = start
path = []
for i in range(numSteps):
if state not in policy:
break
action = policy[state]
path.append("(%s,%s)" % state)
if action == 'north': nextState = state[0],state[1]+1
if action == 'south': nextState = state[0],state[1]-1
if action == 'east': nextState = state[0]+1,state[1]
if action == 'west': nextState = state[0]-1,state[1]
if action == 'exit' or action == None:
path.append('TERMINAL_STATE')
break
state = nextState
return path
def parseGrid(string):
grid = [[entry.strip() for entry in line.split()] for line in string.split('\n')]
for row in grid:
for x, col in enumerate(row):
try:
col = int(col)
except:
pass
if col == "_":
col = ' '
row[x] = col
return gridworld.makeGrid(grid)
def computePolicy(moduleDict, grid, discount):
valueIterator = moduleDict['valueIterationAgents'].ValueIterationAgent(grid, discount=discount)
policy = {}
for state in grid.getStates():
policy[state] = valueIterator.computeActionFromValues(state)
return policy
class GridPolicyTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(GridPolicyTest, self).__init__(question, testDict)
# Function in module in analysis that returns (discount, noise)
self.parameterFn = testDict['parameterFn']
self.question2 = testDict.get('question2', 'false').lower() == 'true'
# GridWorld specification
# _ is empty space
# numbers are terminal states with that value
# # is a wall
# S is a start state
#
self.gridText = testDict['grid']
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.gridName = testDict['gridName']
# Policy specification
# _ policy choice not checked
# N, E, S, W policy action must be north, east, south, west
#
self.policy = parseGrid(testDict['policy'])
# State the most probable path must visit
# (x,y) for a particular location; (0,0) is bottom left
# terminal for the terminal state
self.pathVisits = testDict.get('pathVisits', None)
# State the most probable path must not visit
# (x,y) for a particular location; (0,0) is bottom left
# terminal for the terminal state
self.pathNotVisits = testDict.get('pathNotVisits', None)
def execute(self, grades, moduleDict, solutionDict):
if not hasattr(moduleDict['analysis'], self.parameterFn):
self.addMessage('Method not implemented: analysis.%s' % (self.parameterFn,))
return self.testFail(grades)
result = getattr(moduleDict['analysis'], self.parameterFn)()
if type(result) == str and result.lower()[0:3] == "not":
self.addMessage('Actually, it is possible!')
return self.testFail(grades)
if self.question2:
livingReward = None
try:
discount, noise = result
discount = float(discount)
noise = float(noise)
except:
self.addMessage('Did not return a (discount, noise) pair; instead analysis.%s returned: %s' % (self.parameterFn, result))
return self.testFail(grades)
if discount != 0.9 and noise != 0.2:
self.addMessage('Must change either the discount or the noise, not both. Returned (discount, noise) = %s' % (result,))
return self.testFail(grades)
else:
try:
discount, noise, livingReward = result
discount = float(discount)
noise = float(noise)
livingReward = float(livingReward)
except:
self.addMessage('Did not return a (discount, noise, living reward) triple; instead analysis.%s returned: %s' % (self.parameterFn, result))
return self.testFail(grades)
self.grid.setNoise(noise)
if livingReward != None:
self.grid.setLivingReward(livingReward)
start = self.grid.getStartState()
policy = computePolicy(moduleDict, self.grid, discount)
## check policy
actionMap = {'N': 'north', 'E': 'east', 'S': 'south', 'W': 'west', 'X': 'exit'}
width, height = self.policy.width, self.policy.height
policyPassed = True
for x in range(width):
for y in range(height):
if self.policy[x][y] in actionMap and policy[(x,y)] != actionMap[self.policy[x][y]]:
differPoint = (x,y)
policyPassed = False
if not policyPassed:
self.addMessage('Policy not correct.')
self.addMessage(' Student policy at %s: %s' % (differPoint, policy[differPoint]))
self.addMessage(' Correct policy at %s: %s' % (differPoint, actionMap[self.policy[differPoint[0]][differPoint[1]]]))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where the policy is not defined (e.g. walls)")
self.addMessage(' Correct policy specification:')
self.printPolicy(self.policy, True)
self.addMessage(" Legend: N,S,E,W for states in which the student policy must move north etc,")
self.addMessage(" _ for states where it doesn't matter what the student policy does.")
self.printGridworld()
return self.testFail(grades)
## check path
path = followPath(policy, self.grid.getStartState())
if self.pathVisits != None and self.pathVisits not in path:
self.addMessage('Policy does not visit state %s when moving without noise.' % (self.pathVisits,))
self.addMessage(' States visited: %s' % (path,))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where policy not defined")
self.printGridworld()
return self.testFail(grades)
if self.pathNotVisits != None and self.pathNotVisits in path:
self.addMessage('Policy visits state %s when moving without noise.' % (self.pathNotVisits,))
self.addMessage(' States visited: %s' % (path,))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where policy not defined")
self.printGridworld()
return self.testFail(grades)
return self.testPass(grades)
def printGridworld(self):
self.addMessage(' Gridworld:')
for line in self.gridText.split('\n'):
self.addMessage(' ' + line)
self.addMessage(' Legend: # wall, _ empty, S start, numbers terminal states with that reward.')
def printPolicy(self, policy, policyTypeIsGrid):
if policyTypeIsGrid:
legend = {'N': 'N', 'E': 'E', 'S': 'S', 'W': 'W', ' ': '_'}
else:
legend = {'north': 'N', 'east': 'E', 'south': 'S', 'west': 'W', 'exit': 'X', '.': '.', ' ': '_'}
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
if policyTypeIsGrid:
self.addMessage(" %s" % (" ".join([legend[policy[x][y]] for x in range(self.grid.grid.width)]),))
else:
self.addMessage(" %s" % (" ".join([legend[policy.get((x,y), '.')] for x in range(self.grid.grid.width)]),))
# for state in sorted(self.grid.getStates()):
# if state != 'TERMINAL_STATE':
# self.addMessage(' (%s,%s) %s' % (state[0], state[1], policy[state]))
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
| 47.49568
| 168
| 0.606512
|
7a0ed20b77169e7e734645c290a85fad606ed59d
| 2,424
|
py
|
Python
|
server/mahjong_portal/sitemap.py
|
Xelia/mahjong-portal
|
1baa2eab57875a64a7f09537d1f43872b577f205
|
[
"MIT"
] | null | null | null |
server/mahjong_portal/sitemap.py
|
Xelia/mahjong-portal
|
1baa2eab57875a64a7f09537d1f43872b577f205
|
[
"MIT"
] | null | null | null |
server/mahjong_portal/sitemap.py
|
Xelia/mahjong-portal
|
1baa2eab57875a64a7f09537d1f43872b577f205
|
[
"MIT"
] | null | null | null |
from django.contrib.sitemaps import Sitemap
from django.urls import reverse
from club.models import Club
from player.models import Player
from rating.models import Rating
from tournament.models import Tournament
class BaseSitemap(Sitemap):
i18n = True
protocol = 'https'
priority = 1
class StaticSitemap(BaseSitemap):
changefreq = 'weekly'
def items(self):
return ['club_list', 'about']
def location(self, item):
return reverse(item)
class TournamentListSitemap(BaseSitemap):
changefreq = 'monthly'
def items(self):
return [2018, 2017, 2016, 2015, 2014, 2013]
def location(self, item):
return reverse('tournament_list', kwargs={'year': item})
class EMATournamentListSitemap(TournamentListSitemap):
changefreq = 'monthly'
def location(self, item):
return reverse('tournament_ema_list', kwargs={'year': item, 'tournament_type': 'EMA'})
class TournamentSitemap(BaseSitemap):
changefreq = 'monthly'
def items(self):
return Tournament.public.filter(is_upcoming=False).order_by('-end_date')
def location(self, obj):
return reverse('tournament_details', kwargs={'slug': obj.slug})
def lastmod(self, obj):
return obj.end_date
class TournamentAnnouncementSitemap(BaseSitemap):
changefreq = 'weekly'
def items(self):
return Tournament.public.filter(is_upcoming=True).order_by('-end_date')
def location(self, obj):
return reverse('tournament_announcement', kwargs={'slug': obj.slug})
def lastmod(self, obj):
return obj.updated_on
class ClubSitemap(BaseSitemap):
changefreq = 'weekly'
def items(self):
return Club.objects.all()
def location(self, obj):
return reverse('club_details', kwargs={'slug': obj.slug})
def lastmod(self, obj):
return obj.updated_on
class PlayerSitemap(BaseSitemap):
changefreq = 'weekly'
def items(self):
return Player.objects.all()
def location(self, obj):
return reverse('player_details', kwargs={'slug': obj.slug})
def lastmod(self, obj):
return obj.updated_on
class RatingSitemap(BaseSitemap):
changefreq = 'weekly'
def items(self):
return Rating.objects.all()
def location(self, obj):
return reverse('rating', kwargs={'slug': obj.slug})
def lastmod(self, obj):
return obj.updated_on
| 22.867925
| 94
| 0.673267
|
f94cdd78930b080aaba8b8b6616753c9bf0d3383
| 6,188
|
py
|
Python
|
telerivet/scheduledmessage.py
|
Telerivet/telerivet-python-client
|
c98eb1ce038ca93b6229d964fdb21e921b59f835
|
[
"MIT"
] | 7
|
2015-06-16T08:36:10.000Z
|
2020-07-29T04:35:52.000Z
|
telerivet/scheduledmessage.py
|
Telerivet/telerivet-python-client
|
c98eb1ce038ca93b6229d964fdb21e921b59f835
|
[
"MIT"
] | 5
|
2017-05-26T08:56:48.000Z
|
2020-08-13T01:14:23.000Z
|
telerivet/scheduledmessage.py
|
Telerivet/telerivet-python-client
|
c98eb1ce038ca93b6229d964fdb21e921b59f835
|
[
"MIT"
] | 10
|
2015-09-01T17:02:41.000Z
|
2019-10-21T15:28:55.000Z
|
from .entity import Entity
class ScheduledMessage(Entity):
"""
Represents a scheduled message within Telerivet.
Fields:
- id (string, max 34 characters)
* ID of the scheduled message
* Read-only
- content
* Text content of the scheduled message
* Read-only
- rrule
* Recurrence rule for recurring scheduled messages, e.g. 'FREQ=MONTHLY' or
'FREQ=WEEKLY;INTERVAL=2'; see <https://tools.ietf.org/html/rfc2445#section-4.3.10>
* Read-only
- timezone_id
* Timezone ID used to compute times for recurring messages; see
<http://en.wikipedia.org/wiki/List_of_tz_database_time_zones>
* Read-only
- recipients (array of objects)
* List of recipients. Each recipient is an object with a string `type` property, which
may be `"phone_number"`, `"group"`, or `"filter"`.
If the type is `"phone_number"`, the `phone_number` property will
be set to the recipient's phone number.
If the type is `"group"`, the `group_id` property will be set to
the ID of the group, and the `group_name` property will be set to the name of the
group.
If the type is `"filter"`, the `filter_type` property (string) and
`filter_params` property (object) describe the filter used to send the broadcast. (API
clients should not rely on a particular value or format of the `filter_type` or
`filter_params` properties, as they may change without notice.)
* Read-only
- recipients_str
* A string with a human readable description of the first few recipients (possibly
truncated)
* Read-only
- group_id
* ID of the group to send the message to (null if the recipient is an individual
contact, or if there are multiple recipients)
* Read-only
- contact_id
* ID of the contact to send the message to (null if the recipient is a group, or if
there are multiple recipients)
* Read-only
- to_number
* Phone number to send the message to (null if the recipient is a group, or if there
are multiple recipients)
* Read-only
- route_id
* ID of the phone or route the message will be sent from
* Read-only
- service_id (string, max 34 characters)
* The service associated with this message (for voice calls, the service defines the
call flow)
* Read-only
- audio_url
* For voice calls, the URL of an MP3 file to play when the contact answers the call
* Read-only
- tts_lang
* For voice calls, the language of the text-to-speech voice
* Allowed values: en-US, en-GB, en-GB-WLS, en-AU, en-IN, da-DK, nl-NL, fr-FR, fr-CA,
de-DE, is-IS, it-IT, pl-PL, pt-BR, pt-PT, ru-RU, es-ES, es-US, sv-SE
* Read-only
- tts_voice
* For voice calls, the text-to-speech voice
* Allowed values: female, male
* Read-only
- message_type
* Type of scheduled message
* Allowed values: sms, mms, ussd, call, service
* Read-only
- time_created (UNIX timestamp)
* Time the scheduled message was created in Telerivet
* Read-only
- start_time (UNIX timestamp)
* The time that the message will be sent (or first sent for recurring messages)
* Read-only
- end_time (UNIX timestamp)
* Time after which a recurring message will stop (not applicable to non-recurring
scheduled messages)
* Read-only
- prev_time (UNIX timestamp)
* The most recent time that Telerivet has sent this scheduled message (null if it has
never been sent)
* Read-only
- next_time (UNIX timestamp)
* The next upcoming time that Telerivet will sent this scheduled message (null if it
will not be sent again)
* Read-only
- occurrences (int)
* Number of times this scheduled message has already been sent
* Read-only
- is_template (bool)
* Set to true if Telerivet will render variables like [[contact.name]] in the message
content, false otherwise
* Read-only
- track_clicks (boolean)
* If true, URLs in the message content will automatically be replaced with unique
short URLs
* Read-only
- media (array)
* For text messages containing media files, this is an array of objects with the
properties `url`, `type` (MIME type), `filename`, and `size` (file size in bytes).
Unknown properties are null. This property is undefined for messages that do not
contain media files. Note: For files uploaded via the Telerivet web app, the URL is
temporary and may not be valid for more than 1 day.
* Read-only
- vars (dict)
* Custom variables stored for this scheduled message (copied to Message when sent)
* Updatable via API
- label_ids (array)
* IDs of labels to add to the Message
* Read-only
- project_id
* ID of the project this scheduled message belongs to
* Read-only
"""
def save(self):
"""
Saves any fields or custom variables that have changed for this scheduled message.
"""
super(ScheduledMessage, self).save()
def delete(self):
"""
Cancels this scheduled message.
"""
self._api.doRequest("DELETE", self.getBaseApiPath())
def getBaseApiPath(self):
return "/projects/%(project_id)s/scheduled/%(id)s" % {'project_id': self.project_id, 'id': self.id}
| 37.277108
| 108
| 0.577085
|
3199b4b95cb68f665ae235dca677913cce6ac21f
| 1,226
|
py
|
Python
|
consulting/views.py
|
redsolution/django-consulting
|
e651322140b017c84171aae23fc608119d31e674
|
[
"Apache-2.0"
] | null | null | null |
consulting/views.py
|
redsolution/django-consulting
|
e651322140b017c84171aae23fc608119d31e674
|
[
"Apache-2.0"
] | null | null | null |
consulting/views.py
|
redsolution/django-consulting
|
e651322140b017c84171aae23fc608119d31e674
|
[
"Apache-2.0"
] | null | null | null |
from consulting.forms import AskQuestionForm
from consulting.models import Topic, Question
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.generic import list_detail, create_update
def index(request):
queryset = Topic.objects.published()
return list_detail.object_list(request, queryset)
def topic(request, slug):
topic = get_object_or_404(Topic, slug=slug)
# Form workaround
form = AskQuestionForm(initial={'topic': topic})
if request.method == 'POST':
form = AskQuestionForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(topic.get_absolute_url())
else:
form = AskQuestionForm(instance=Question(topic=topic))
extra_context = {'form': form}
return list_detail.object_detail(request, Topic.objects.published(),
slug=slug, extra_context=extra_context)
def question(request, slug, object_id):
return list_detail.object_detail(request, Question.objects.published(), object_id)
| 37.151515
| 86
| 0.754486
|
69d2cfe70d3f23301800233f2196d7280f3eea35
| 2,635
|
py
|
Python
|
Widen/LC764_Largest_Plus_Sign.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
Widen/LC764_Largest_Plus_Sign.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
Widen/LC764_Largest_Plus_Sign.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
# 764. Largest Plus Sign
# In a 2D grid from (0, 0) to (N-1, N-1), every cell contains a 1, except those cells in the given list mines which are 0. What is the largest axis-aligned plus sign of 1s contained in the grid? Return the order of the plus sign. If there is none, return 0.
# An "axis-aligned plus sign of 1s of order k" has some center grid[x][y] = 1 along with 4 arms of length k-1 going up, down, left, and right, and made of 1s. This is demonstrated in the diagrams below. Note that there could be 0s or 1s beyond the arms of the plus sign, only the relevant area of the plus sign is checked for 1s.
# Runtime: 3484 ms, faster than 31.20% of Python3 online submissions for Largest Plus Sign.
# Memory Usage: 50.1 MB, less than 16.67% of Python3 online submissions for Largest Plus Sign.
class Solution:
def orderOfLargestPlusSign(self, N: int, mines: List[List[int]]) -> int:
grid = [[1 for _ in range(N)] for _ in range(N)]
for x, y in mines:
grid[x][y] = 0
search = [[[0, 0, 0, 0] for _ in range(N)] for _ in range(N)]
# search[i][0] -- how many 1's on the left
# search[i][1] -- how many 1's on the right
# search[i][2] -- how many 1's from above
# search[i][3] -- how many 1's from below
# initialize
for i in range(N):
if grid[i][0] == 1:
search[i][0][0] = 1
if grid[i][-1] == 1:
search[i][-1][1] = 1
if grid[0][i] == 1:
search[0][i][2] = 1
if grid[-1][i] == 1:
search[-1][i][3] = 1
res = 0
for i in range(N):
for j in range(N):
if grid[i][j] == 1:
res = 1
break
# update left and above
for i in range(1, N-1):
for j in range(1, N-1):
if grid[i][j] == 0:
continue
search[i][j][0] = search[i][j-1][0] + 1
search[i][j][2] = search[i-1][j][2] + 1
# update right and below
for i in range(N-2, 0, -1):
for j in range(N-2, 0, -1):
if grid[i][j] == 0:
continue
search[i][j][1] = search[i][j+1][1] + 1
search[i][j][3] = search[i+1][j][3] + 1
for i in range(N):
for j in range(N):
curr_res = min(search[i][j])
if curr_res == 0:
continue
res = max(res, curr_res)
return res
| 42.5
| 329
| 0.488425
|
57011bcc0e8243c3f6cbd53aef8a1341e0428142
| 1,962
|
py
|
Python
|
app/rooms/forms.py
|
ShinJam/airbnb-clone
|
5f79c6ff38d268e41e1b99ba1b1ccca65da746a3
|
[
"MIT"
] | null | null | null |
app/rooms/forms.py
|
ShinJam/airbnb-clone
|
5f79c6ff38d268e41e1b99ba1b1ccca65da746a3
|
[
"MIT"
] | 10
|
2020-03-03T16:15:51.000Z
|
2022-02-10T09:25:15.000Z
|
app/rooms/forms.py
|
ShinJam/airbnb-clone
|
5f79c6ff38d268e41e1b99ba1b1ccca65da746a3
|
[
"MIT"
] | null | null | null |
from django import forms
from django_countries.fields import CountryField
from . import models
class SearchForm(forms.Form):
city = forms.CharField(initial="Anywhere")
country = CountryField(default="KR").formfield()
room_type = forms.ModelChoiceField(
required=False, empty_label="Any kind", queryset=models.RoomType.objects.all()
)
price = forms.IntegerField(required=False)
guests = forms.IntegerField(required=False)
bedrooms = forms.IntegerField(required=False)
beds = forms.IntegerField(required=False)
baths = forms.IntegerField(required=False)
instant_book = forms.BooleanField(required=False)
superhost = forms.BooleanField(required=False)
amenities = forms.ModelMultipleChoiceField(
required=False,
queryset=models.Amenity.objects.all(),
widget=forms.CheckboxSelectMultiple,
)
facilities = forms.ModelMultipleChoiceField(
required=False,
queryset=models.Facility.objects.all(),
widget=forms.CheckboxSelectMultiple,
)
class CreatePhotoForm(forms.ModelForm):
class Meta:
model = models.Photo
fields = ("caption", "file")
def save(self, pk, *args, **kwargs):
photo = super().save(commit=False)
room = models.Room.objects.get(pk=pk)
photo.room = room
photo.save()
class CreateRoomForm(forms.ModelForm):
class Meta:
model = models.Room
fields = (
"name",
"description",
"country",
"city",
"price",
"address",
"guests",
"beds",
"bedrooms",
"baths",
"check_in",
"check_out",
"instant_book",
"room_type",
"amenities",
"facilities",
"house_rules",
)
def save(self, *args, **kwargs):
room = super().save(commit=False)
return room
| 28.434783
| 86
| 0.601427
|
79f6d2f138e958c92dcc27a6541aebcbfeadf439
| 12,372
|
py
|
Python
|
neutron/agent/metadata/driver.py
|
mcadariu/neutron
|
35494af5a25efb8b314941ab85b44923654f6acc
|
[
"Apache-2.0"
] | 1
|
2018-07-04T07:59:31.000Z
|
2018-07-04T07:59:31.000Z
|
neutron/agent/metadata/driver.py
|
ljzjohnson/neutron
|
d78664321482c15981a09642985a540195e754e3
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/metadata/driver.py
|
ljzjohnson/neutron
|
d78664321482c15981a09642985a540195e754e3
|
[
"Apache-2.0"
] | 1
|
2018-08-28T17:13:16.000Z
|
2018-08-28T17:13:16.000Z
|
# Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import grp
import os
import pwd
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.common import constants
from neutron.common import exceptions
LOG = logging.getLogger(__name__)
METADATA_SERVICE_NAME = 'metadata-proxy'
PROXY_CONFIG_DIR = "ns-metadata-proxy"
_HAPROXY_CONFIG_TEMPLATE = """
global
log /dev/log local0 %(log_level)s
log-tag %(log_tag)s
user %(user)s
group %(group)s
maxconn 1024
pidfile %(pidfile)s
daemon
defaults
log global
mode http
option httplog
option dontlognull
option http-server-close
option forwardfor
retries 3
timeout http-request 30s
timeout connect 30s
timeout client 32s
timeout server 32s
timeout http-keep-alive 30s
listen listener
bind 0.0.0.0:%(port)s
server metadata %(unix_socket_path)s
http-request add-header X-Neutron-%(res_type)s-ID %(res_id)s
"""
class InvalidUserOrGroupException(Exception):
pass
class HaproxyConfigurator(object):
def __init__(self, network_id, router_id, unix_socket_path, port, user,
group, state_path, pid_file):
self.network_id = network_id
self.router_id = router_id
if network_id is None and router_id is None:
raise exceptions.NetworkIdOrRouterIdRequiredError()
self.port = port
self.user = user
self.group = group
self.state_path = state_path
self.unix_socket_path = unix_socket_path
self.pidfile = pid_file
self.log_level = (
'debug' if logging.is_debug_enabled(cfg.CONF) else 'info')
# log-tag will cause entries to have the string pre-pended, so use
# the uuid haproxy will be started with. Additionally, if it
# starts with "haproxy" then things will get logged to
# /var/log/haproxy.log on Debian distros, instead of to syslog.
uuid = network_id or router_id
self.log_tag = "haproxy-" + METADATA_SERVICE_NAME + "-" + uuid
def create_config_file(self):
"""Create the config file for haproxy."""
# Need to convert uid/gid into username/group
try:
username = pwd.getpwuid(int(self.user)).pw_name
except (ValueError, KeyError):
try:
username = pwd.getpwnam(self.user).pw_name
except KeyError:
raise InvalidUserOrGroupException(
_("Invalid user/uid: '%s'") % self.user)
try:
groupname = grp.getgrgid(int(self.group)).gr_name
except (ValueError, KeyError):
try:
groupname = grp.getgrnam(self.group).gr_name
except KeyError:
raise InvalidUserOrGroupException(
_("Invalid group/gid: '%s'") % self.group)
cfg_info = {
'port': self.port,
'unix_socket_path': self.unix_socket_path,
'user': username,
'group': groupname,
'pidfile': self.pidfile,
'log_level': self.log_level,
'log_tag': self.log_tag
}
if self.network_id:
cfg_info['res_type'] = 'Network'
cfg_info['res_id'] = self.network_id
else:
cfg_info['res_type'] = 'Router'
cfg_info['res_id'] = self.router_id
haproxy_cfg = _HAPROXY_CONFIG_TEMPLATE % cfg_info
LOG.debug("haproxy_cfg = %s", haproxy_cfg)
cfg_dir = self.get_config_path(self.state_path)
# uuid has to be included somewhere in the command line so that it can
# be tracked by process_monitor.
self.cfg_path = os.path.join(cfg_dir, "%s.conf" % cfg_info['res_id'])
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
with open(self.cfg_path, "w") as cfg_file:
cfg_file.write(haproxy_cfg)
@staticmethod
def get_config_path(state_path):
return os.path.join(state_path or cfg.CONF.state_path,
PROXY_CONFIG_DIR)
@staticmethod
def cleanup_config_file(uuid, state_path):
"""Delete config file created when metadata proxy was spawned."""
# Delete config file if it exists
cfg_path = os.path.join(
HaproxyConfigurator.get_config_path(state_path),
"%s.conf" % uuid)
try:
os.unlink(cfg_path)
except OSError as ex:
# It can happen that this function is called but metadata proxy
# was never spawned so its config file won't exist
if ex.errno != errno.ENOENT:
raise
class MetadataDriver(object):
monitors = {}
def __init__(self, l3_agent):
self.metadata_port = l3_agent.conf.metadata_port
self.metadata_access_mark = l3_agent.conf.metadata_access_mark
registry.subscribe(
after_router_added, resources.ROUTER, events.AFTER_CREATE)
registry.subscribe(
after_router_updated, resources.ROUTER, events.AFTER_UPDATE)
registry.subscribe(
before_router_removed, resources.ROUTER, events.BEFORE_DELETE)
@classmethod
def metadata_filter_rules(cls, port, mark):
return [('INPUT', '-m mark --mark %s/%s -j ACCEPT' %
(mark, constants.ROUTER_MARK_MASK)),
('INPUT', '-p tcp -m tcp --dport %s '
'-j DROP' % port)]
@classmethod
def metadata_nat_rules(cls, port):
return [('PREROUTING', '-d 169.254.169.254/32 '
'-i %(interface_name)s '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-ports %(port)s' %
{'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
'port': port})]
@classmethod
def metadata_checksum_rules(cls, port):
return [('POSTROUTING', '-o %(interface_name)s '
'-p tcp -m tcp --sport %(port)s -j CHECKSUM '
'--checksum-fill' %
{'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
'port': port})]
@classmethod
def _get_metadata_proxy_user_group(cls, conf):
user = conf.metadata_proxy_user or str(os.geteuid())
group = conf.metadata_proxy_group or str(os.getegid())
return user, group
@classmethod
def _get_metadata_proxy_callback(cls, port, conf, network_id=None,
router_id=None):
def callback(pid_file):
metadata_proxy_socket = conf.metadata_proxy_socket
user, group = (
cls._get_metadata_proxy_user_group(conf))
haproxy = HaproxyConfigurator(network_id,
router_id,
metadata_proxy_socket,
port,
user,
group,
conf.state_path,
pid_file)
haproxy.create_config_file()
proxy_cmd = ['haproxy',
'-f', haproxy.cfg_path]
return proxy_cmd
return callback
@classmethod
def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
network_id=None, router_id=None):
uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
port, conf, network_id=network_id, router_id=router_id)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
# TODO(dalvarez): Remove in Q cycle. This will kill running instances
# of old ns-metadata-proxy Python version in order to be replaced by
# haproxy. This will help with upgrading and shall be removed in next
# cycle.
cls._migrate_python_ns_metadata_proxy_if_needed(pm)
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm
@staticmethod
def _migrate_python_ns_metadata_proxy_if_needed(pm):
"""Kill running Python version of ns-metadata-proxy.
This function will detect if the current metadata proxy process is
running the old Python version and kill it so that the new haproxy
version is spawned instead.
"""
# Read cmdline to a local var to avoid reading twice from /proc file
cmdline = pm.cmdline
if cmdline and 'haproxy' not in cmdline:
LOG.debug("Migrating old instance of python ns-metadata proxy to "
"new one based on haproxy (%s)", cmdline)
pm.disable()
@classmethod
def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name):
monitor.unregister(uuid, METADATA_SERVICE_NAME)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name)
pm.disable()
# Delete metadata proxy config file
HaproxyConfigurator.cleanup_config_file(uuid, cfg.CONF.state_path)
cls.monitors.pop(uuid, None)
@classmethod
def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None,
callback=None):
return external_process.ProcessManager(
conf=conf,
uuid=router_id,
namespace=ns_name,
default_cmd_callback=callback)
def after_router_added(resource, event, l3_agent, **kwargs):
router = kwargs['router']
proxy = l3_agent.metadata_driver
for c, r in proxy.metadata_filter_rules(proxy.metadata_port,
proxy.metadata_access_mark):
router.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in proxy.metadata_nat_rules(proxy.metadata_port):
router.iptables_manager.ipv4['nat'].add_rule(c, r)
for c, r in proxy.metadata_checksum_rules(proxy.metadata_port):
router.iptables_manager.ipv4['mangle'].add_rule(c, r)
router.iptables_manager.apply()
if not isinstance(router, ha_router.HaRouter):
proxy.spawn_monitored_metadata_proxy(
l3_agent.process_monitor,
router.ns_name,
proxy.metadata_port,
l3_agent.conf,
router_id=router.router_id)
def after_router_updated(resource, event, l3_agent, **kwargs):
router = kwargs['router']
proxy = l3_agent.metadata_driver
if (not proxy.monitors.get(router.router_id) and
not isinstance(router, ha_router.HaRouter)):
proxy.spawn_monitored_metadata_proxy(
l3_agent.process_monitor,
router.ns_name,
proxy.metadata_port,
l3_agent.conf,
router_id=router.router_id)
def before_router_removed(resource, event, l3_agent, **kwargs):
router = kwargs['router']
proxy = l3_agent.metadata_driver
proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor,
router.router['id'],
l3_agent.conf,
router.ns_name)
| 37.377644
| 79
| 0.605884
|
26cfff07d0f1c4447af27da367c2b9466ddb5ed4
| 739
|
py
|
Python
|
manimlib/utils/sounds.py
|
adornetejr/manim
|
e0715ceeff4778d11ef4ac31f8f8f2b56a2187ad
|
[
"MIT"
] | 48
|
2021-06-28T01:48:01.000Z
|
2022-03-31T18:22:32.000Z
|
manimlib/utils/sounds.py
|
im-AMS/manim
|
19e3c97589181ffd43ef14d9169af4e40e054664
|
[
"MIT"
] | 5
|
2021-03-19T11:41:36.000Z
|
2022-03-12T00:20:16.000Z
|
manimlib/utils/sounds.py
|
im-AMS/manim
|
19e3c97589181ffd43ef14d9169af4e40e054664
|
[
"MIT"
] | 19
|
2018-10-16T06:52:45.000Z
|
2020-11-10T04:52:53.000Z
|
import os
from manimlib.utils.file_ops import seek_full_path_from_defaults
def play_chord(*nums):
commands = [
"play",
"-n",
"-c1",
"--no-show-progress",
"synth",
] + [
"sin %-" + str(num)
for num in nums
] + [
"fade h 0.5 1 0.5",
">",
os.devnull
]
try:
os.system(" ".join(commands))
except:
pass
def play_error_sound():
play_chord(11, 8, 6, 1)
def play_finish_sound():
play_chord(12, 9, 5, 2)
def get_full_sound_file_path(sound_file_name):
return seek_full_path_from_defaults(
sound_file_name,
default_dir=os.path.join("assets", "sounds"),
extensions=[".wav", ".mp3"]
)
| 18.475
| 64
| 0.541272
|
796e9344a41a754f05f5422fc5e895905ec93625
| 36,818
|
py
|
Python
|
src/ploomber/sources/notebooksource.py
|
jramirez857/ploomber
|
94751fb46d5f0e2c4658463601e08a49f55ba08c
|
[
"Apache-2.0"
] | null | null | null |
src/ploomber/sources/notebooksource.py
|
jramirez857/ploomber
|
94751fb46d5f0e2c4658463601e08a49f55ba08c
|
[
"Apache-2.0"
] | null | null | null |
src/ploomber/sources/notebooksource.py
|
jramirez857/ploomber
|
94751fb46d5f0e2c4658463601e08a49f55ba08c
|
[
"Apache-2.0"
] | null | null | null |
"""
On languages and kernels
------------------------
NotebookSource represents source code in a Jupyter notebook format (language
agnostic). Apart from .ipynb, we also support any other extension supported
by jupytext.
Given a notebook, we have to know which language it is written in to extract
upstream/product variables (though this only happens when the option of
extracting dependencies automatically is on), we also have to determine the
Jupyter kernel to use (this is always needed).
The unequivocal place to store this information is in the notebook metadata
section, but given that we advocate for the use of scripts (converted to
notebooks via jupytext), they most likely won't contain metadata (metadata
saving is turned off by default in jupytext), so we have to infer this
ourselves.
To make things more complex, jupytext adds its own metadata section but we are
ignoring that for now.
Given that there are many places where this information might be stored, we
have a few rules to automatically determine language and kernel given a
script/notebook.
"""
from functools import wraps
import ast
from pathlib import Path
import warnings
from contextlib import redirect_stdout
from io import StringIO
from copy import deepcopy
# papermill is importing a deprecated module from pyarrow
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
from papermill.parameterize import parameterize_notebook
import click
import nbformat
import jupytext
from jupytext import cli as jupytext_cli
from jupytext.formats import long_form_one_format, short_form_one_format
from jupytext.config import JupytextConfiguration
import parso
from ploomber.exceptions import (SourceInitializationError,
MissingParametersCellError)
from ploomber.placeholders.placeholder import Placeholder
from ploomber.util import requires
from ploomber.sources.abc import Source
from ploomber.sources.nb_utils import find_cell_with_tag, find_cell_with_tags
from ploomber.static_analysis.extractors import extractor_class_for_language
from ploomber.static_analysis.pyflakes import check_notebook
from ploomber.sources import docstring
from ploomber.io import pretty_print
def _jupytext_fmt(primitive, extension):
"""
Determine the jupytext fmt string to use based on the content and extension
"""
if extension != 'ipynb':
fmt, _ = jupytext.guess_format(primitive, f'.{extension}')
fmt_final = f'{extension}:{fmt}'
else:
fmt_final = '.ipynb'
return fmt_final
# TODO: we should unit test that this function is called, as opposed to vanilla
# .read_text
def _read_primitive(path):
"""
We read using the UTF-8 instead of the default encoding since notebooks are
always stored in UTF-8.
We can see this in nbformat, which always reads as UTF-8:
https://github.com/jupyter/nbformat/blob/df63593b64a15ee1c37b522973c39e8674f93c5b/nbformat/__init__.py#L125
Scripts are a different story since they may have other encodings, however,
modern editors have UTF-8 as default (example: VSCode
https://docs.microsoft.com/en-us/powershell/scripting/dev-cross-plat/vscode/understanding-file-encoding?view=powershell-7.2#configuring-vs-code)
so it's safer to use UTF-8 than the default encoding.
jupytext already does this:
https://github.com/mwouts/jupytext/issues/896
"""
return Path(path).read_text(encoding='utf-8')
def _get_last_cell(nb):
"""
Get last cell, ignores cells with empty source (unless the notebook only
has one cell and it's empty)
"""
# iterate in reverse order
for idx in range(-1, -len(nb.cells) - 1, -1):
cell = nb.cells[idx]
# only return it if it has some code
if cell['source'].strip():
return cell
# otherwise return the first cell
return nb.cells[0]
def _get_cell_suggestion(nb):
format_name = nb.metadata.get('jupytext', {}).get('text_representation',
{}).get('format_name')
preamble = 'Add a new cell with your code'
if format_name == 'light':
message = f'{preamble}:\n' + """
# + tags=["parameters"]
# your parameters here...
# -
# +
# your code here...
# -
"""
elif format_name == 'percent':
message = f'{preamble}:\n' + """
# %% tags=["parameters"]
# your parameters here...
# %%
# your code here...
"""
else:
message = preamble + '.'
return message
def requires_path(func):
"""
Checks if NotebookSource instance was initialized from a file, raises
an error if not
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._path is None:
raise ValueError(f'Cannot use {func.__name__!r} if notebook was '
'not initialized from a file')
return func(self, *args, **kwargs)
return wrapper
class NotebookSource(Source):
"""
A source object representing a jupyter notebook (or any format supported
by jupytext)
Parameters
----------
hot_reload : bool, optional
Makes the notebook always read the file before rendering
kernelspec_name : str, optional
Which kernel to use for executing the notebook, it overrides any
existing kernelspec metadata in the notebook. If the notebook does
not have kernelspec info, this parameter is required. Defaults to None.
To see which kernelspecs are available run "jupyter kernelspec list"
check_if_kernel_installed : bool, optional
Check if the kernel is installed during initization
Notes
-----
The render method prepares the notebook for execution: it adds the
parameters and it makes sure kernelspec is defined
"""
@requires([
'parso', 'pyflakes', 'jupytext', 'nbformat', 'papermill',
'jupyter_client'
])
def __init__(self,
primitive,
hot_reload=False,
ext_in=None,
kernelspec_name=None,
static_analysis='regular',
check_if_kernel_installed=True):
# any non-py file must first be converted using jupytext, we need
# that representation for validation, if input is already a .py file
# do not convert. If passed a string, try to guess format using
# jupytext. We also need ipynb representation for .develop(),
# but do lazy loading in case we don't need both
self._primitive = primitive
self._check_if_kernel_installed = check_if_kernel_installed
# this happens if using SourceLoader
if isinstance(primitive, Placeholder):
self._path = primitive.path
self._primitive = str(primitive)
elif isinstance(primitive, str):
self._path = None
self._primitive = primitive
elif isinstance(primitive, Path):
self._path = primitive
if primitive.is_dir():
raise SourceInitializationError(
f'Failed to initialize {str(primitive)!r}. '
'Expected a file, got a directory.' +
_suggest_ploomber_scaffold_is_dir())
if not primitive.exists():
raise SourceInitializationError(
f'Failed to initialize {str(primitive)!r}. '
'File does not exist.' +
_suggest_ploomber_scaffold_missing_file())
self._primitive = _read_primitive(primitive)
else:
raise TypeError('Notebooks must be initialized from strings, '
'Placeholder or pathlib.Path, got {}'.format(
type(primitive)))
static_analysis_vals = {'disable', 'regular', 'strict'}
if static_analysis not in static_analysis_vals:
raise ValueError(f'{static_analysis!r} is not a '
"valid 'static_analysis' value, choose one from: "
f'{pretty_print.iterable(static_analysis_vals)}')
self.static_analysis = static_analysis
self._kernelspec_name = kernelspec_name
self._hot_reload = hot_reload
# TODO: validate ext_in values and extensions
if self._path is None and hot_reload:
raise ValueError('hot_reload only works in the notebook was '
'loaded from a file')
if self._path is not None and ext_in is None:
self._ext_in = self._path.suffix[1:]
elif self._path is None and ext_in is None:
if Path(self._primitive).exists():
path = str(self._primitive)
raise ValueError(
f'The file {path!r} you passed looks like '
'a path to a file. Perhaps you meant passing a '
'pathlib.Path object? Example:\n\n'
'from pathlib import Path\n'
f'NotebookRunner(Path({path!r}))')
else:
raise ValueError(
'"ext_in" cannot be None if the notebook is '
'initialized from a string. Either pass '
'a pathlib.Path object with the notebook file '
'location or pass the source code as string '
'and include the "ext_in" parameter')
elif self._path is not None and ext_in is not None:
raise ValueError('"ext_in" must be None if notebook is '
'initialized from a pathlib.Path object')
elif self._path is None and ext_in is not None:
self._ext_in = ext_in
# try to determine language based on extension, though this test
# might be inconclusive if dealing with a ipynb file, though we only
# use this to determine the appropriate jupyter kernel when
# initializing from a string, when initializing from files, the
# extension is used to determine the kernel
self._language = determine_language(self._ext_in)
self._loc = None
self._params = None
self._nb_str_unrendered = None
self._nb_obj_unrendered = None
self._nb_str_rendered = None
self._nb_obj_rendered = None
# this will raise an error if kernelspec_name is invalid
self._read_nb_str_unrendered()
self._post_init_validation(str(self._primitive))
@property
def primitive(self):
if self._hot_reload:
self._primitive = _read_primitive(self._path)
return self._primitive
def render(self, params):
"""Render notebook (fill parameters using papermill)
"""
self._params = json_serializable_params(params)
self._render()
def _render(self):
# _read_nb_str_unrendered uses hot_reload, this ensures we always get
# the latest version
_, nb = self._read_nb_str_unrendered()
if 'parameters' in _get_last_cell(nb).metadata.get('tags', []):
cell_suggestion = _get_cell_suggestion(nb)
kind = 'notebook' if self._ext_in == 'ipynb' else 'script'
raise SourceInitializationError(
f'Error processing {str(self._path)!r}: the last cell '
f'in the {kind} is the parameters cell. {cell_suggestion}')
# this is needed for parameterize_notebook to work
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
nb.metadata['papermill'] = dict()
# NOTE: we use parameterize_notebook instead of execute_notebook
# with the prepare_only option because the latter adds a "papermill"
# section on each cell's metadata, which makes it too verbose when
# using NotebookRunner.develop() when the source is script (each cell
# will have an empty "papermill" metadata dictionary)
nb = parameterize_notebook(nb, self._params)
# delete empty tags to prevent cluttering the notebooks
for cell in nb.cells:
if not len(cell.metadata['tags']):
cell.metadata.pop('tags')
self._nb_str_rendered = nbformat.writes(nb)
self._post_render_validation()
def _read_nb_str_unrendered(self):
"""
Returns the notebook representation (JSON string), this is the raw
source code passed, does not contain injected parameters.
Adds kernelspec info if not present based on the kernelspec_name,
this metadata is required for papermill to know which kernel to use.
An exception is raised if we cannot determine kernel information.
"""
# hot_reload causes to always re-evalaute the notebook representation
if self._nb_str_unrendered is None or self._hot_reload:
# this is the notebook node representation
nb = _to_nb_obj(
self.primitive,
ext=self._ext_in,
# passing the underscored version
# because that's the only one available
# when this is initialized
language=self._language,
kernelspec_name=self._kernelspec_name,
check_if_kernel_installed=self._check_if_kernel_installed,
path=self._path)
# if the user injected cells manually (with ploomber nb --inject)
# the source will contain the injected cell, remove it because
# it should not be considered part of the source code
self._nb_obj_unrendered = _cleanup_rendered_nb(nb, print_=False)
# get the str representation. always write from nb_obj, even if
# this was initialized with a ipynb file, nb_obj contains
# kernelspec info
self._nb_str_unrendered = nbformat.writes(
self._nb_obj_unrendered, version=nbformat.NO_CONVERT)
return self._nb_str_unrendered, self._nb_obj_unrendered
def _post_init_validation(self, value):
"""
Validate notebook after initialization (run pyflakes to detect
syntax errors)
"""
# NOTE: what happens if I pass source code with errors to parso?
# maybe we don't need to use pyflakes after all
# we can also use compile. can pyflakes detect things that
# compile cannot?
params_cell, _ = find_cell_with_tag(self._nb_obj_unrendered,
'parameters')
if params_cell is None:
loc = ' "{}"'.format(self.loc) if self.loc else ''
msg = ('Notebook{} does not have a cell tagged '
'"parameters"'.format(loc))
if self.loc and Path(self.loc).suffix == '.py':
msg += """.
Add a cell at the top like this:
# %% tags=["parameters"]
upstream = None
product = None
Go to: https://ploomber.io/s/params for more information
"""
if self.loc and Path(self.loc).suffix == '.ipynb':
msg += ('. Add a cell at the top and tag it as "parameters". '
'Go to the next URL for '
'details: https://ploomber.io/s/params')
raise MissingParametersCellError(msg)
def _post_render_validation(self):
"""
Validate params passed against parameters in the notebook
"""
# NOTE: maybe static_analysis = off should not turn off everything
# but only warn
# strict mode: raise and check signature
# regular mode: _check_notebook called in NotebookRunner.run
if self.static_analysis == 'strict':
self._check_notebook(raise_=True, check_signature=True)
else:
# otherwise, only warn on unused parameters
_warn_on_unused_params(self._nb_obj_unrendered, self._params)
def _check_notebook(self, raise_, check_signature):
if self.static_analysis and self.language == 'python':
# warn if errors (e.g., undeclared variables, syntax errors)
check_notebook(self._nb_str_to_obj(self._nb_str_rendered),
self._params,
filename=self._path or 'notebook',
raise_=raise_,
check_signature=check_signature)
@property
def doc(self):
"""
Returns notebook docstring parsed either from a triple quoted string
in the top cell or a top markdown markdown cell
"""
return docstring.extract_from_nb(self._nb_obj_unrendered)
@property
def loc(self):
return self._path
@property
def name(self):
# filename without extension(e.g., plot.py -> plot)
if self._path:
return self._path.stem
@property
def nb_str_rendered(self):
"""
Returns the notebook (as a string) with parameters injected, hot
reloadig if necessary
"""
if self._nb_str_rendered is None:
raise RuntimeError('Attempted to get location for an unrendered '
'notebook, render it first')
if self._hot_reload:
self._render()
return self._nb_str_rendered
@property
def nb_obj_rendered(self):
"""
Returns the notebook (as an objet) with parameters injected, hot
reloadig if necessary
"""
if self._nb_obj_rendered is None:
# using self.nb_str_rendered triggers hot reload if needed
self._nb_obj_rendered = self._nb_str_to_obj(self.nb_str_rendered)
return self._nb_obj_rendered
def __str__(self):
# reload if empty or hot_reload=True
self._read_nb_str_unrendered()
# FIXME: this should ignore changes to the markdown cells
return '\n'.join([c.source for c in self._nb_obj_unrendered.cells])
def __repr__(self):
if self.loc is not None:
return "{}('{}')".format(type(self).__name__, self.loc)
else:
return "{}(loaded from string)".format(type(self).__name__)
@property
def variables(self):
raise NotImplementedError
@property
def extension(self):
# this can be Python, R, Julia, etc. We are handling them the same,
# for now, no normalization can be done.
# One approach is to use the ext if loaded from file, otherwise None
return None
# FIXME: add this to the abstract class, probably get rid of "extension"
# since it's not informative (ipynb files can be Python, R, etc)
@property
def language(self):
"""
Notebook Language (Python, R, etc), this is a best-effort property,
can be None if we could not determine the language
"""
if self._language is None:
self._read_nb_str_unrendered()
try:
# make sure you return "r" instead of "R"
return (self._nb_obj_unrendered.metadata.kernelspec.language.
lower())
except AttributeError:
return None
else:
return self._language
def _nb_str_to_obj(self, nb_str):
return nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)
def _get_parameters_cell(self):
self._read_nb_str_unrendered()
cell, _ = find_cell_with_tag(self._nb_obj_unrendered, tag='parameters')
return cell.source
def extract_upstream(self):
extractor_class = extractor_class_for_language(self.language)
return extractor_class(self._get_parameters_cell()).extract_upstream()
def extract_product(self):
extractor_class = extractor_class_for_language(self.language)
return extractor_class(self._get_parameters_cell()).extract_product()
@requires_path
def save_injected_cell(self):
"""
Inject cell, overwrite the source file (and any paired files)
"""
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# add metadata to flag that the cell was injected manually
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(ploomber=dict(injected_manually=True))))
# Are we updating a text file that has a metadata filter? If so,
# add ploomber as a section that must be stored
if (self.nb_obj_rendered.metadata.get(
'jupytext', {}).get('notebook_metadata_filter') == '-all'):
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(jupytext=dict(
notebook_metadata_filter='ploomber,-all'))))
# overwrite
jupytext.write(self.nb_obj_rendered, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(self.nb_obj_rendered, fmt_,
self._path.stem):
jupytext.write(self.nb_obj_rendered, fp=path, fmt=fmt_)
@requires_path
def remove_injected_cell(self):
"""
Delete injected cell, overwrite the source file (and any paired files)
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
# remove metadata
recursive_update(
nb_clean,
dict(metadata=dict(ploomber=dict(injected_manually=None))))
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# overwrite
jupytext.write(nb_clean, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(self._nb_obj_unrendered, fmt_,
self._path.stem):
jupytext.write(nb_clean, fp=path, fmt=fmt_)
@requires_path
def format(self, fmt, entry_point):
"""Change source format
Returns
-------
str
The path if the extension changed, None otherwise
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
ext_file = self._path.suffix
ext_format = long_form_one_format(fmt)['extension']
extension_changed = ext_file != ext_format
if extension_changed:
if Path(entry_point).is_file():
path = self._path.with_suffix(ext_format)
Path(self._path).unlink()
modified_entry = Path(entry_point).read_text()
main_file = f'{self.name}{ext_file}'
if main_file in modified_entry:
modified_entry = modified_entry.replace(
main_file, f'{self.name}{ext_format}')
Path(entry_point).write_text(modified_entry)
else:
click.secho(
f'{main_file} does not appear in entry-point'
f'please edit manually\n',
fg='yellow')
path = self._path
else:
click.secho(
"The entry-point is not a valid file, please"
" update the pipeline file extensions manually\n",
fg='yellow')
path = self._path
else:
path = self._path
jupytext.write(nb_clean, path, fmt=fmt)
return path if extension_changed else None
@requires_path
def pair(self, base_path):
"""Pairs with an ipynb file
"""
# TODO: add unit test
if self._ext_in == 'ipynb':
raise ValueError(
'pairing only works with .py files, got .ipynb. '
'Yoy may convert the .ipynb to .py and try again.')
fmt, _ = jupytext.guess_format(self._primitive, f'.{self._ext_in}')
fmt_ = f'{self._ext_in}:{fmt}'
# mute jupytext's output
with redirect_stdout(StringIO()):
jupytext_cli.jupytext(args=[
'--set-formats', f'{base_path}//ipynb,{fmt_}',
str(self._path)
])
@requires_path
def sync(self):
"""Pairs with and ipynb file
"""
# mute jupytext's output
with redirect_stdout(StringIO()):
jupytext_cli.jupytext(args=['--sync', str(self._path)])
def json_serializable_params(params):
# papermill only allows JSON serializable parameters
# convert Params object to dict
params = params.to_dict()
params['product'] = params['product'].to_json_serializable()
if params.get('upstream'):
params['upstream'] = params['upstream'].to_json_serializable()
return params
def _to_nb_obj(source,
language,
ext=None,
kernelspec_name=None,
check_if_kernel_installed=True,
path=None):
"""
Convert to jupyter notebook via jupytext, if the notebook does not contain
kernel information and the user did not pass a kernelspec_name explicitly,
we will try to infer the language and select a kernel appropriately.
If a valid kernel is found, it is added to the notebook. If none of this
works, an exception is raised.
If also converts the code string to its notebook node representation,
adding kernel data accordingly.
Parameters
----------
source : str
Jupyter notebook (or jupytext compatible formatted) document
language : str
Programming language
path : str, default=None
Script/notebook path. If not None, it's used to throw an informative
error if the notebook fails to load
Returns
-------
nb
Notebook object
Raises
------
RenderError
If the notebook has no kernelspec metadata and kernelspec_name is
None. A notebook without kernelspec metadata will not display in
jupyter notebook correctly. We have to make sure all notebooks
have this.
"""
import jupytext
# let jupytext figure out the format
try:
nb = jupytext.reads(source, fmt=ext)
except Exception as e:
what = 'notebook' if ext == 'ipynb' else 'script'
err = f'Failed to read {what}'
if path is not None:
err += f' from {str(path)!r}'
raise SourceInitializationError(err) from e
# NOTE: I can add the cell with parameters here, but what happens if
# extract_upstream is false? would that be a problem?
check_nb_kernelspec_info(nb,
kernelspec_name,
ext,
language,
check_if_installed=check_if_kernel_installed)
return nb
def check_nb_kernelspec_info(nb,
kernelspec_name,
ext,
language,
check_if_installed=True):
"""Make sure the passed notebook has kernel info
Parameters
----------
check_if_installed : bool
Also check if the kernelspec is installed, nb.metadata.kernelspec
to be replaced by whatever information jupyter returns when requesting
the kernelspec
"""
import jupyter_client
kernel_name = determine_kernel_name(nb, kernelspec_name, ext, language)
# cannot keep going if we don't have the kernel name
if kernel_name is None:
raise SourceInitializationError(
'Notebook does not contain kernelspec metadata and '
'kernelspec_name was not specified, either add '
'kernelspec info to your source file or specify '
'a kernelspec by name. To see list of installed kernels run '
'"jupyter kernelspec list" in the terminal (first column '
'indicates the name). Python is usually named "python3", '
'R usually "ir"')
if check_if_installed:
kernelspec = jupyter_client.kernelspec.get_kernel_spec(kernel_name)
nb.metadata.kernelspec = {
"display_name": kernelspec.display_name,
"language": kernelspec.language,
"name": kernel_name
}
else:
if 'metadata' not in nb:
nb['metadata'] = dict()
if 'kernelspec' not in nb['metadata']:
nb['metadata']['kernelspec'] = dict()
# we cannot ask jupyter, so we fill this in ourselves
nb.metadata.kernelspec = {
"display_name": 'R' if kernel_name == 'ir' else 'Python 3',
"language": 'R' if kernel_name == 'ir' else 'python',
"name": kernel_name
}
def determine_kernel_name(nb, kernelspec_name, ext, language):
"""
Determines the kernel name by using the following data (returns whatever
gives kernel info first): 1) explicit kernel from the user 2) notebook's
metadata 3) file extension 4) language 5) best guess
"""
# explicit kernelspec name
if kernelspec_name is not None:
return kernelspec_name
# use metadata info
try:
return nb.metadata.kernelspec.name
except AttributeError:
pass
# use language from extension if passed, otherwise use language variable
if ext:
language = determine_language(ext)
lang2kernel = {'python': 'python3', 'r': 'ir'}
if language in lang2kernel:
return lang2kernel[language]
# nothing worked, try to guess if it's python...
is_python_ = is_python(nb)
if is_python_:
return 'python3'
else:
return None
def inject_cell(model, params):
"""Inject params (by adding a new cell) to a model
Notes
-----
A model is different than a notebook:
https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html
"""
nb = nbformat.from_dict(model['content'])
# we must ensure nb has kernelspec info, otherwise papermill will fail to
# parametrize
ext = model['name'].split('.')[-1]
check_nb_kernelspec_info(nb, kernelspec_name=None, ext=ext, language=None)
# papermill adds a bunch of things before calling parameterize_notebook
# if we don't add those things, parameterize_notebook breaks
# https://github.com/nteract/papermill/blob/0532d499e13e93d8990211be33e9593f1bffbe6c/papermill/iorw.py#L400
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': None,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
params = json_serializable_params(params)
comment = ('This cell was injected automatically based on your stated '
'upstream dependencies (cell above) and pipeline.yaml '
'preferences. It is temporary and will be removed when you '
'save this notebook')
model['content'] = parameterize_notebook(nb,
params,
report_mode=False,
comment=comment)
def _cleanup_rendered_nb(nb, print_=True):
"""
Cleans up a rendered notebook object. Removes cells with tags:
injected-parameters, debugging-settings, and metadata injected by
papermill
"""
out = find_cell_with_tags(nb,
['injected-parameters', 'debugging-settings'])
if print_:
for key in out.keys():
print(f'Removing {key} cell...')
idxs = set(cell['index'] for cell in out.values())
nb['cells'] = [
cell for idx, cell in enumerate(nb['cells']) if idx not in idxs
]
# papermill adds "tags" to all cells that don't have them, remove them
# if they are empty to avoid cluttering the script
for cell in nb['cells']:
if 'tags' in cell.get('metadata', {}):
if not len(cell['metadata']['tags']):
del cell['metadata']['tags']
return nb
def is_python(nb):
"""
Determine if the notebook is Python code for a given notebook object, look
for metadata.kernelspec.language first, if not defined, try to guess if
it's Python, it's conservative and it returns False if the code is valid
Python but contains (<-), in which case it's much more likely to be R
"""
is_python_ = None
# check metadata first
try:
language = nb.metadata.kernelspec.language
except AttributeError:
pass
else:
is_python_ = language == 'python'
# no language defined in metadata, check if it's valid python
if is_python_ is None:
code_str = '\n'.join([c.source for c in nb.cells])
try:
ast.parse(code_str)
except SyntaxError:
is_python_ = False
else:
# there is a lot of R code which is also valid Python code! So
# let's
# run a quick test. It is very unlikely to have "<-" in Python (
# {less than} {negative} but extremely common {assignment}
if '<-' not in code_str:
is_python_ = True
# inconclusive test...
if is_python_ is None:
is_python_ = False
return is_python_
def determine_language(extension):
"""
A function to determine programming language given file extension,
returns programming language name (all lowercase) if could be determined,
None if the test is inconclusive
"""
if extension.startswith('.'):
extension = extension[1:]
mapping = {'py': 'python', 'r': 'r', 'R': 'r', 'Rmd': 'r', 'rmd': 'r'}
# ipynb can be many languages, it must return None
return mapping.get(extension)
def recursive_update(target, update):
"""Recursively update a dictionary. Taken from jupytext.header
"""
for key in update:
value = update[key]
if value is None:
# remove if it exists
target.pop(key, None)
elif isinstance(value, dict):
target[key] = recursive_update(target.get(key, {}), value)
else:
target[key] = value
return target
def parse_jupytext_format(fmt, name):
"""
Parse a jupytext format string (such as notebooks//ipynb) and return the
path to the file and the extension
"""
fmt_parsed = long_form_one_format(fmt)
path = Path(fmt_parsed['prefix'], f'{name}{fmt_parsed["extension"]}')
del fmt_parsed['prefix']
return path, short_form_one_format(fmt_parsed)
def iter_paired_notebooks(nb, fmt_, name):
formats = nb.metadata.get('jupytext', {}).get('formats', '')
if not formats:
return
formats = formats.split(',')
formats.remove(fmt_)
# overwrite all paired files
for path, fmt_current in (parse_jupytext_format(fmt, name)
for fmt in formats):
yield path, fmt_current
def _nb2codestr(nb):
return '\n'.join([c.source for c in nb.cells if c.cell_type == 'code'])
def _warn_on_unused_params(nb, params):
nb = deepcopy(nb)
_, idx = find_cell_with_tag(nb, 'parameters')
del nb.cells[idx]
code = _nb2codestr(nb)
# NOTE: if there a syntax error we cannot accurately check this
m = parso.parse(code)
names = set(m.get_used_names())
# remove product since it may not be required
# FIXME: maybe only remove it if it's a dictionary with >2 keys
unused = set(params) - names - {'product'}
if unused:
warnings.warn('These parameters are not used in the '
f'task\'s source code: {pretty_print.iterable(unused)}')
def add_parameters_cell(path, extract_upstream, extract_product):
"""
Add parameters cell to a script/notebook in the given path, overwrites the
original file
"""
source = ''
if extract_upstream:
source += """\
# declare a list tasks whose products you want to use as inputs
upstream = None
"""
if extract_product:
source += """\
# declare a dictionary with the outputs of this task
product = None
"""
c = JupytextConfiguration()
c.notebook_metadata_filter
c.cell_metadata_filter = 'all'
nb = jupytext.read(path)
new_cell = nbformat.v4.new_code_cell(source,
metadata={'tags': ['parameters']})
nb.cells.insert(0, new_cell)
jupytext.write(nb, path, config=c)
def _suggest_ploomber_scaffold_missing_file():
if Path('pipeline.yaml').is_file():
return '\nTo create it, run: ploomber scaffold'
else:
return ''
def _suggest_ploomber_scaffold_is_dir():
if Path('pipeline.yaml').is_file():
return ('\nTo create it, delete the directory, '
'then run: ploomber scaffold')
else:
return ''
| 34.538462
| 148
| 0.621299
|
bcba1b93f832006a0bb27f87943c57447ea11e48
| 37,263
|
py
|
Python
|
google/cloud/video/transcoder_v1beta1/services/transcoder_service/client.py
|
renovate-bot/python-video-transcoder
|
845f1e371cc45040c882b725e12375430260e45a
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/video/transcoder_v1beta1/services/transcoder_service/client.py
|
renovate-bot/python-video-transcoder
|
845f1e371cc45040c882b725e12375430260e45a
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/video/transcoder_v1beta1/services/transcoder_service/client.py
|
renovate-bot/python-video-transcoder
|
845f1e371cc45040c882b725e12375430260e45a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Callable, Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import pagers
from google.cloud.video.transcoder_v1beta1.types import resources
from google.cloud.video.transcoder_v1beta1.types import services
from .transports.base import TranscoderServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TranscoderServiceGrpcTransport
from .transports.grpc_asyncio import TranscoderServiceGrpcAsyncIOTransport
class TranscoderServiceClientMeta(type):
"""Metaclass for the TranscoderService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TranscoderServiceTransport]]
_transport_registry["grpc"] = TranscoderServiceGrpcTransport
_transport_registry["grpc_asyncio"] = TranscoderServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[TranscoderServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TranscoderServiceClient(metaclass=TranscoderServiceClientMeta):
"""Using the Transcoder API, you can queue asynchronous jobs for
transcoding media into various output formats. Output formats
may include different streaming standards such as HTTP Live
Streaming (HLS) and Dynamic Adaptive Streaming over HTTP (DASH).
You can also customize jobs using advanced features such as
Digital Rights Management (DRM), audio equalization, content
concatenation, and digital ad-stitch ready content generation.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "transcoder.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
{@api.name}: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def job_path(project: str, location: str, job: str,) -> str:
"""Return a fully-qualified job string."""
return "projects/{project}/locations/{location}/jobs/{job}".format(
project=project, location=location, job=job,
)
@staticmethod
def parse_job_path(path: str) -> Dict[str, str]:
"""Parse a job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/jobs/(?P<job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def job_template_path(project: str, location: str, job_template: str,) -> str:
"""Return a fully-qualified job_template string."""
return "projects/{project}/locations/{location}/jobTemplates/{job_template}".format(
project=project, location=location, job_template=job_template,
)
@staticmethod
def parse_job_template_path(path: str) -> Dict[str, str]:
"""Parse a job_template path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/jobTemplates/(?P<job_template>.+?)$",
path,
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, TranscoderServiceTransport] = None,
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transcoder service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.TranscoderServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint, this is the default value for
the environment variable) and "auto" (auto switch to the default
mTLS endpoint if client SSL credentials is present). However,
the ``api_endpoint`` property takes precedence if provided.
(2) The ``client_cert_source`` property is used to provide client
SSL credentials for mutual TLS transport. If not provided, the
default SSL credentials will be used if present.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = ClientOptions.from_dict(client_options)
if client_options is None:
client_options = ClientOptions.ClientOptions()
if client_options.api_endpoint is None:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never")
if use_mtls_env == "never":
client_options.api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
has_client_cert_source = (
client_options.client_cert_source is not None
or mtls.has_default_client_cert_source()
)
client_options.api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if has_client_cert_source
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TranscoderServiceTransport):
# transport is a TranscoderServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=client_options.api_endpoint,
scopes=client_options.scopes,
api_mtls_endpoint=client_options.api_endpoint,
client_cert_source=client_options.client_cert_source,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_job(
self,
request: services.CreateJobRequest = None,
*,
parent: str = None,
job: resources.Job = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Job:
r"""Creates a job in the specified region.
Args:
request (:class:`~.services.CreateJobRequest`):
The request object. Request message for
`TranscoderService.CreateJob`.
parent (:class:`str`):
Required. The parent location to create and process this
job. Format: ``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (:class:`~.resources.Job`):
Required. Parameters for creating
transcoding job.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.resources.Job:
Transcoding job resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.CreateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.CreateJobRequest):
request = services.CreateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_jobs(
self,
request: services.ListJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsPager:
r"""Lists jobs in the specified region.
Args:
request (:class:`~.services.ListJobsRequest`):
The request object. Request message for
`TranscoderService.ListJobs`. The parent location from
which to retrieve the collection of jobs.
parent (:class:`str`):
Required. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListJobsPager:
Response message for ``TranscoderService.ListJobs``.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.ListJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.ListJobsRequest):
request = services.ListJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_job(
self,
request: services.GetJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Job:
r"""Returns the job data.
Args:
request (:class:`~.services.GetJobRequest`):
The request object. Request message for
`TranscoderService.GetJob`.
name (:class:`str`):
Required. The name of the job to retrieve. Format:
``projects/{project}/locations/{location}/jobs/{job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.resources.Job:
Transcoding job resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.GetJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.GetJobRequest):
request = services.GetJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_job(
self,
request: services.DeleteJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job.
Args:
request (:class:`~.services.DeleteJobRequest`):
The request object. Request message for
`TranscoderService.DeleteJob`.
name (:class:`str`):
Required. The name of the job to delete. Format:
``projects/{project}/locations/{location}/jobs/{job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.DeleteJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.DeleteJobRequest):
request = services.DeleteJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_job_template(
self,
request: services.CreateJobTemplateRequest = None,
*,
parent: str = None,
job_template: resources.JobTemplate = None,
job_template_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.JobTemplate:
r"""Creates a job template in the specified region.
Args:
request (:class:`~.services.CreateJobTemplateRequest`):
The request object. Request message for
`TranscoderService.CreateJobTemplate`.
parent (:class:`str`):
Required. The parent location to create this job
template. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_template (:class:`~.resources.JobTemplate`):
Required. Parameters for creating job
template.
This corresponds to the ``job_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_template_id (:class:`str`):
Required. The ID to use for the job template, which will
become the final component of the job template's
resource name.
This value should be 4-63 characters, and valid
characters are ``/[a-zA-Z0-9_-_]/``.
This corresponds to the ``job_template_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.resources.JobTemplate:
Transcoding job template resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job_template, job_template_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.CreateJobTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.CreateJobTemplateRequest):
request = services.CreateJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job_template is not None:
request.job_template = job_template
if job_template_id is not None:
request.job_template_id = job_template_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_job_templates(
self,
request: services.ListJobTemplatesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobTemplatesPager:
r"""Lists job templates in the specified region.
Args:
request (:class:`~.services.ListJobTemplatesRequest`):
The request object. Request message for
`TranscoderService.ListJobTemplates`.
parent (:class:`str`):
Required. The parent location from which to retrieve the
collection of job templates. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListJobTemplatesPager:
Response message for
``TranscoderService.ListJobTemplates``.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.ListJobTemplatesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.ListJobTemplatesRequest):
request = services.ListJobTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_job_templates]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobTemplatesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_job_template(
self,
request: services.GetJobTemplateRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.JobTemplate:
r"""Returns the job template data.
Args:
request (:class:`~.services.GetJobTemplateRequest`):
The request object. Request message for
`TranscoderService.GetJobTemplate`.
name (:class:`str`):
Required. The name of the job template to retrieve.
Format:
``projects/{project}/locations/{location}/jobTemplates/{job_template}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.resources.JobTemplate:
Transcoding job template resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.GetJobTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.GetJobTemplateRequest):
request = services.GetJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_job_template(
self,
request: services.DeleteJobTemplateRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job template.
Args:
request (:class:`~.services.DeleteJobTemplateRequest`):
The request object. Request message for
`TranscoderService.DeleteJobTemplate`.
name (:class:`str`):
Required. The name of the job template to delete.
``projects/{project}/locations/{location}/jobTemplates/{job_template}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.DeleteJobTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.DeleteJobTemplateRequest):
request = services.DeleteJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-video-transcoder",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TranscoderServiceClient",)
| 41.58817
| 106
| 0.614792
|
09c9a46ae0155009031f3c4351b6bb5044d39701
| 3,452
|
py
|
Python
|
ConSSL/datamodules/fashion_mnist_datamodule.py
|
SNUHDR2018/ConSSL
|
c7d406d0224e38895986c8fb7281a189e493c982
|
[
"MIT"
] | 78
|
2021-02-02T10:15:44.000Z
|
2022-03-27T08:51:46.000Z
|
ConSSL/datamodules/fashion_mnist_datamodule.py
|
SNUHDR2018/ConSSL
|
c7d406d0224e38895986c8fb7281a189e493c982
|
[
"MIT"
] | 1
|
2021-01-15T10:21:42.000Z
|
2021-01-26T22:41:58.000Z
|
ConSSL/datamodules/fashion_mnist_datamodule.py
|
SNUHDR2018/ConSSL
|
c7d406d0224e38895986c8fb7281a189e493c982
|
[
"MIT"
] | 52
|
2021-06-10T00:23:43.000Z
|
2021-09-19T10:13:11.000Z
|
from typing import Any, Callable, Optional, Union
from ConSSL.datamodules.vision_datamodule import VisionDataModule
from ConSSL.utils import _TORCHVISION_AVAILABLE
from ConSSL.utils.warnings import warn_missing_pkg
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as transform_lib
from torchvision.datasets import FashionMNIST
else: # pragma: no cover
warn_missing_pkg('torchvision')
FashionMNIST = None
class FashionMNISTDataModule(VisionDataModule):
"""
.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/
wp-content/uploads/2019/02/Plot-of-a-Subset-of-Images-from-the-Fashion-MNIST-Dataset.png
:width: 400
:alt: Fashion MNIST
Specs:
- 10 classes (1 per type)
- Each image is (1 x 28 x 28)
Standard FashionMNIST, train, val, test splits and transforms
Transforms::
mnist_transforms = transform_lib.Compose([
transform_lib.ToTensor()
])
Example::
from ConSSL.datamodules import FashionMNISTDataModule
dm = FashionMNISTDataModule('.')
model = LitModel()
Trainer().fit(model, datamodule=dm)
"""
name = "fashion_mnist"
dataset_cls = FashionMNIST
dims = (1, 28, 28)
def __init__(
self,
data_dir: Optional[str] = None,
val_split: Union[int, float] = 0.2,
num_workers: int = 16,
normalize: bool = False,
batch_size: int = 32,
seed: int = 42,
shuffle: bool = False,
pin_memory: bool = False,
drop_last: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
"""
Args:
data_dir: Where to save/load the data
val_split: Percent (float) or number (int) of samples to use for the validation split
num_workers: How many workers to use for loading data
normalize: If true applies image normalize
batch_size: How many samples per batch to load
seed: Random seed to be used for train/val/test splits
shuffle: If true shuffles the train data every epoch
pin_memory: If true, the data loader will copy Tensors into CUDA pinned memory before
returning them
drop_last: If true drops the last incomplete batch
"""
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
'You want to use FashionMNIST dataset loaded from `torchvision` which is not installed yet.'
)
super().__init__( # type: ignore[misc]
data_dir=data_dir,
val_split=val_split,
num_workers=num_workers,
normalize=normalize,
batch_size=batch_size,
seed=seed,
shuffle=shuffle,
pin_memory=pin_memory,
drop_last=drop_last,
*args,
**kwargs,
)
@property
def num_classes(self) -> int:
"""
Return:
10
"""
return 10
def default_transforms(self) -> Callable:
if self.normalize:
mnist_transforms = transform_lib.Compose([
transform_lib.ToTensor(), transform_lib.Normalize(mean=(0.5, ), std=(0.5, ))
])
else:
mnist_transforms = transform_lib.Compose([transform_lib.ToTensor()])
return mnist_transforms
| 31.381818
| 108
| 0.608343
|
162be5a54099ca0ba400683916426caa96229ca3
| 9,693
|
py
|
Python
|
docs/source/conf.py
|
ManuelMBaumann/pymor
|
9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31
|
[
"Unlicense"
] | null | null | null |
docs/source/conf.py
|
ManuelMBaumann/pymor
|
9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31
|
[
"Unlicense"
] | null | null | null |
docs/source/conf.py
|
ManuelMBaumann/pymor
|
9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import sys, os, re
os.environ['PYMOR_WITH_SPHINX'] = '1'
# Fix documentation generation for readthedocs.org
if os.environ.get('READTHEDOCS', None) == 'True':
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __setitem__(self, k, v):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name in ('__binding__', '__binding_version__'):
return ''
elif name == '__qt_version__':
return '5'
elif name in cls.__dict__:
return cls.__dict__.get(name)
elif name == 'QtWidgets':
return Mock()
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
QWidget = object
MOCK_MODULES = ['scipy', 'scipy.sparse', 'scipy.linalg', 'scipy.sparse.linalg', 'scipy.io', 'scipy.version',
'docopt',
'dogpile', 'dogpile.cache', 'dogpile.cache.backends', 'dogpile.cache.backends.file',
'dogpile.cache.compat',
'Qt', 'Qt.QtGui', 'Qt.QtCore', 'Qt.QtOpenGL', 'Qt.QtWidgets',
'OpenGL', 'OpenGL.GL', 'psutil',
'matplotlib', 'matplotlib.backends', 'matplotlib.backends.backend_qt4agg',
'matplotlib.backends.backend_qt5agg', 'matplotlib.figure', 'matplotlib.pyplot',
'pyvtk',
'IPython',
'IPython.parallel',
'sympy',
'pytest']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
if os.environ.get('READTHEDOCS', None) != 'True':
sys.path.insert(0, os.path.abspath('../../src'))
sys.path.insert(0, os.path.abspath('.'))
#generate autodoc
import gen_apidoc
import pymor
#import pymortests
import pymordemos
gen_apidoc.walk(pymor)
# gen_apidoc.walk(pymortests)
gen_apidoc.walk(pymordemos)
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'pymordocstring'
]
try:
# was added in sphinx 1.4, some of our target platforms have only 1.2.x
import sphinx.ext.imgmath
extensions.append('sphinx.ext.imgmath')
except ImportError:
extensions.append('sphinx.ext.pngmath')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'pyMOR'
copyright = '2013-2019 pyMOR developers and contributors'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
version = pymor.__version__
# The full version, including alpha/beta/rc tags.
release = version.split('-')[0]
print(version, release)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "literal"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'pymor.css'
html_theme = 'default'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Manual" % (project, version)
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = 'scipyshiny_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {
#'index': 'indexsidebar.html'
#}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {
#'index': 'indexcontent.html',
#}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymor'
# Pngmath should try to align formulas properly
pngmath_use_preview = True
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
#_stdauthor = 'Written by the NumPy community'
#latex_documents = [
#('reference/index', 'numpy-ref.tex', 'NumPy Reference',
#_stdauthor, 'manual'),
#('user/index', 'numpy-user.tex', 'NumPy User Guide',
#_stdauthor, 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("generated/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
intersphinx_mapping = {'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None)}
import substitutions
rst_epilog = substitutions.substitutions
modindex_common_prefix = ['pymor.']
| 32.41806
| 112
| 0.603631
|
d111663d5244f499e04005e1ac789ccbda5848a3
| 1,105
|
py
|
Python
|
day-07/part-1/youyoun.py
|
evqna/adventofcode-2020
|
526bb9c87057d02bda4de9647932a0e25bdb3a5b
|
[
"MIT"
] | 12
|
2020-11-30T19:22:18.000Z
|
2021-06-21T05:55:58.000Z
|
day-07/part-1/youyoun.py
|
evqna/adventofcode-2020
|
526bb9c87057d02bda4de9647932a0e25bdb3a5b
|
[
"MIT"
] | 13
|
2020-11-30T17:27:22.000Z
|
2020-12-22T17:43:13.000Z
|
day-07/part-1/youyoun.py
|
evqna/adventofcode-2020
|
526bb9c87057d02bda4de9647932a0e25bdb3a5b
|
[
"MIT"
] | 3
|
2020-12-01T08:49:40.000Z
|
2022-03-26T21:47:38.000Z
|
from tool.runners.python import SubmissionPy
class YouyounSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
lines = s.splitlines()
contains_sg = set()
bags = {}
for l in lines:
container, contains = [x.strip() for x in
l.replace('.', '').replace('bags', '').replace('bag', '').split(" contain ")]
contains = [x.strip()[2:] for x in contains.split(',')]
bags[container] = contains
if 'shiny gold' in contains:
contains_sg.add(container)
counter = 0
stack = contains_sg.copy()
processed = set()
while len(stack) > 0:
curr_b = stack.pop()
if curr_b in processed:
continue
processed.add(curr_b)
counter += 1
# Search for parent bag of current bag
for k, v in bags.items():
if curr_b in v:
stack.add(k)
return counter
| 31.571429
| 112
| 0.489593
|
70acbfbf1eb1da7eb33dedc8f637d79cf8525a38
| 18,330
|
py
|
Python
|
src/lamplib/src/genny/tasks/auto_tasks.py
|
MikhailShchatko/genny
|
00938dc557ef1ad9b6b2d950447bc0e372e951ef
|
[
"Apache-2.0"
] | 30
|
2019-01-30T17:21:44.000Z
|
2022-01-21T00:05:33.000Z
|
src/lamplib/src/genny/tasks/auto_tasks.py
|
MikhailShchatko/genny
|
00938dc557ef1ad9b6b2d950447bc0e372e951ef
|
[
"Apache-2.0"
] | 358
|
2019-01-15T21:51:57.000Z
|
2022-03-30T16:10:42.000Z
|
src/lamplib/src/genny/tasks/auto_tasks.py
|
MikhailShchatko/genny
|
00938dc557ef1ad9b6b2d950447bc0e372e951ef
|
[
"Apache-2.0"
] | 50
|
2019-01-15T20:01:15.000Z
|
2022-03-24T16:19:52.000Z
|
"""
Generates evergreen tasks based on the current state of the repo.
"""
import enum
import glob
import os
import re
from typing import NamedTuple, List, Optional, Set
import yaml
import structlog
from shrub.command import CommandDefinition
from shrub.config import Configuration
from shrub.variant import TaskSpec
from genny.cmd_runner import run_command
SLOG = structlog.get_logger(__name__)
#
# The classes are listed here in dependency order to avoid having to quote typenames.
#
# For comprehension, start at main(), then class Workload, then class Repo. Rest
# are basically just helpers.
#
class YamlReader:
# You could argue that YamlReader, WorkloadLister, and maybe even Repo
# should be the same class - perhaps renamed to System or something?
# Maybe make these methods static to avoid having to pass an instance around.
def load(self, workspace_root: str, path: str) -> dict:
"""
:param workspace_root: effective cwd
:param path: path relative to workspace_root
:return: deserialized yaml file
"""
joined = os.path.join(workspace_root, path)
if not os.path.exists(joined):
raise Exception(f"File {joined} not found.")
with open(joined) as handle:
return yaml.safe_load(handle)
# Really just here for easy mocking.
def exists(self, path: str) -> bool:
return os.path.exists(path)
def load_set(self, workspace_root: str, files: List[str]) -> dict:
"""
:param workspace_root:
effective cwd
:param files:
files to load relative to cwd
:return:
Key the basename (no extension) of the file and value the loaded contents.
E.g. load_set("expansions") => {"expansions": {"contents":["of","expansions.yml"]}}
"""
out = dict()
for to_load in [f for f in files if self.exists(f)]:
basename = str(os.path.basename(to_load).split(".yml")[0])
out[basename] = self.load(workspace_root=workspace_root, path=to_load)
return out
class WorkloadLister:
"""
Lists files in the repo dir etc.
Separate from the Repo class for easier testing.
"""
def __init__(self, workspace_root: str, genny_repo_root: str, reader: YamlReader):
self.workspace_root = workspace_root
self.genny_repo_root = genny_repo_root
self._expansions = None
self.reader = reader
def all_workload_files(self) -> Set[str]:
pattern = os.path.join(self.workspace_root, "src", "*", "src", "workloads", "**", "*.yml")
return {*glob.glob(pattern)}
def modified_workload_files(self) -> Set[str]:
"""Relies on git to find files in src/workloads modified versus origin/master"""
src_path = os.path.join(self.workspace_root, "src")
all_repo_directories = {
path for path in os.listdir(src_path) if os.path.isdir(os.path.join(src_path, path))
}
command = (
"git diff --name-only --diff-filter=AMR "
"$(git merge-base HEAD origin/master) -- src/workloads/"
)
modified_workloads = set()
for repo_directory in all_repo_directories:
repo_path = os.path.join(src_path, repo_directory)
lines = run_command(cmd=[command], cwd=repo_path, shell=True, check=True).stdout
modified_workloads.update(
{os.path.join(repo_path, line) for line in lines if line.endswith(".yml")}
)
return modified_workloads
class OpName(enum.Enum):
"""
What kind of tasks we're generating in this invocation.
"""
ALL_TASKS = object()
VARIANT_TASKS = object()
PATCH_TASKS = object()
class CLIOperation(NamedTuple):
"""
Represents the "input" to what we're doing"
"""
mode: OpName
variant: Optional[str]
genny_repo_root: str
workspace_root: str
@staticmethod
def create(
mode_name: str, reader: YamlReader, genny_repo_root: str, workspace_root: str
) -> "CLIOperation":
mode = OpName.ALL_TASKS
variant = None
if mode_name == "all_tasks":
mode = OpName.ALL_TASKS
if mode_name == "patch_tasks":
mode = OpName.PATCH_TASKS
variant = reader.load(workspace_root, "expansions.yml")["build_variant"]
if mode_name == "variant_tasks":
mode = OpName.VARIANT_TASKS
variant = reader.load(workspace_root, "expansions.yml")["build_variant"]
return CLIOperation(
mode, variant, genny_repo_root=genny_repo_root, workspace_root=workspace_root
)
class CurrentBuildInfo:
def __init__(self, reader: YamlReader, workspace_root: str):
self.reader = reader
self.workspace_root = workspace_root
self.conts = self.expansions()
def expansions(self):
return self.reader.load(self.workspace_root, "expansions.yml")
def has(self, key: str, acceptable_values: List[str]) -> bool:
"""
:param key: a key from environment (expansions.yml, bootstrap.yml, etc)
:param acceptable_values: possible values we accept
:return: if the actual value from env[key] is in the list of acceptable values
"""
if key not in self.conts:
return False
actual = self.conts[key]
return any(actual == acceptable_value for acceptable_value in acceptable_values)
class GeneratedTask(NamedTuple):
name: str
bootstrap_key: Optional[str]
bootstrap_value: Optional[str]
workload: "Workload"
class AutoRunBlock(NamedTuple):
when: dict
then_run: dict
class Workload:
"""
Represents a workload yaml file.
Is a "child" object of Repo.
"""
file_path: str
"""Path relative to repo root."""
is_modified: bool
auto_run_info: Optional[List[AutoRunBlock]] = None
"""The list of `When/ThenRun` blocks, if present"""
def __init__(self, workspace_root: str, file_path: str, is_modified: bool, reader: YamlReader):
self.workspace_root = workspace_root
self.file_path = file_path
self.is_modified = is_modified
conts = reader.load(workspace_root, self.file_path)
SLOG.info(f"Running auto-tasks for workload: {self.file_path}")
if "AutoRun" not in conts:
return
auto_run = conts["AutoRun"]
if not isinstance(auto_run, list):
raise ValueError(f"AutoRun must be a list, instead got type {type(auto_run)}")
self._validate_auto_run(auto_run)
auto_run_info = []
for block in auto_run:
if "ThenRun" in block:
then_run = block["ThenRun"]
else:
then_run = []
auto_run_info.append(AutoRunBlock(block["When"], then_run))
self.auto_run_info = auto_run_info
@property
def file_base_name(self) -> str:
return str(os.path.basename(self.file_path).split(".yml")[0])
@property
def snake_case_base_name(self) -> str:
return self._to_snake_case(self.file_base_name)
@property
def relative_path(self) -> str:
return self.file_path.replace(self.workspace_root, ".")
def generate_requested_tasks(self, then_run) -> List[GeneratedTask]:
"""
:return: tasks requested.
"""
tasks = []
if len(then_run) == 0:
tasks += [GeneratedTask(self.snake_case_base_name, None, None, self)]
for then_run_block in then_run:
# Just a sanity check; we check this in _validate_auto_run
assert len(then_run_block) == 1
[(bootstrap_key, bootstrap_value)] = then_run_block.items()
task_name = f"{self.snake_case_base_name}_{self._to_snake_case(bootstrap_value)}"
tasks.append(GeneratedTask(task_name, bootstrap_key, bootstrap_value, self))
return tasks
def all_tasks(self) -> List[GeneratedTask]:
"""
:return: all possible tasks irrespective of the current build-variant etc.
"""
if not self.auto_run_info:
return [GeneratedTask(self.snake_case_base_name, None, None, self)]
tasks = []
for block in self.auto_run_info:
tasks += self.generate_requested_tasks(block.then_run)
return self._dedup_task(tasks)
def variant_tasks(self, build: CurrentBuildInfo) -> List[GeneratedTask]:
"""
:param build: info about current build
:return: tasks that we should do given the current build e.g. if we have When/ThenRun info etc.
"""
if not self.auto_run_info:
return []
tasks = []
for block in self.auto_run_info:
when = block.when
then_run = block.then_run
# All When conditions must be true. We set okay: False if any single one is not true.
okay = True
for key, condition in when.items():
if len(condition) != 1:
raise ValueError(
f"Need exactly one condition per key in When block."
f" Got key ${key} with condition ${condition}."
)
if "$eq" in condition:
acceptable_values = condition["$eq"]
if not isinstance(acceptable_values, list):
acceptable_values = [acceptable_values]
if not build.has(key, acceptable_values):
okay = False
elif "$neq" in condition:
unacceptable_values = condition["$neq"]
if not isinstance(unacceptable_values, list):
unacceptable_values = [unacceptable_values]
if build.has(key, unacceptable_values):
okay = False
else:
raise ValueError(
f"The only supported operators are $eq and $neq. Got ${condition.keys()}"
)
if okay:
tasks += self.generate_requested_tasks(then_run)
return self._dedup_task(tasks)
@staticmethod
def _dedup_task(tasks: List[GeneratedTask]) -> List[GeneratedTask]:
"""
Evergreen barfs if a task is declared more than once, and the AutoTask impl may add the same task twice to the
list. For an example, if we have two When blocks that are both true (and no ThenRun task), we will add the base
task twice. So we need to dedup the final task list.
:return: unique tasks.
"""
# Sort the result to make checking dict equality in unittests easier.
return sorted(list(set([task for task in tasks])))
@staticmethod
def _validate_auto_run(auto_run):
"""Perform syntax validation on the auto_run section."""
if not isinstance(auto_run, list):
raise ValueError(f"AutoRun must be a list, instead got {type(auto_run)}")
for block in auto_run:
if not isinstance(block["When"], dict) or "When" not in block:
raise ValueError(
f"Each AutoRun block must consist of a 'When' and optional 'ThenRun' section,"
f" instead got {block}"
)
if "ThenRun" in block:
if not isinstance(block["ThenRun"], list):
raise ValueError(
f"ThenRun must be of type list. Instead was type {type(block['ThenRun'])}."
)
for then_run_block in block["ThenRun"]:
if not isinstance(then_run_block, dict):
raise ValueError(
f"Each block in ThenRun must be of type dict."
f" Instead was type {type(then_run_block)}."
)
elif len(then_run_block) != 1:
raise ValueError(
f"Each block in ThenRun must contain one key/value pair. Instead was length"
f" {len(then_run_block)}."
)
# noinspection RegExpAnonymousGroup
@staticmethod
def _to_snake_case(camel_case):
"""
Converts CamelCase to snake_case, useful for generating test IDs
https://stackoverflow.com/questions/1175208/
:return: snake_case version of camel_case.
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_case)
s2 = re.sub("-", "_", s1)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s2).lower()
class Repo:
"""
Represents the git checkout.
"""
def __init__(self, lister: WorkloadLister, reader: YamlReader, workspace_root: str):
self._modified_repo_files = None
self.workspace_root = workspace_root
self.lister = lister
self.reader = reader
def all_workloads(self) -> List[Workload]:
all_files = self.lister.all_workload_files()
modified = self.lister.modified_workload_files()
return [
Workload(
workspace_root=self.workspace_root,
file_path=fpath,
is_modified=fpath in modified,
reader=self.reader,
)
for fpath in all_files
]
def modified_workloads(self) -> List[Workload]:
return [workload for workload in self.all_workloads() if workload.is_modified]
def all_tasks(self) -> List[GeneratedTask]:
"""
:return: All possible tasks fom all possible workloads
"""
# Double list-comprehensions always read backward to me :(
return [task for workload in self.all_workloads() for task in workload.all_tasks()]
def variant_tasks(self, build: CurrentBuildInfo) -> List[GeneratedTask]:
"""
:return: Tasks to schedule given the current variant (runtime)
"""
return [task for workload in self.all_workloads() for task in workload.variant_tasks(build)]
def patch_tasks(self, build: CurrentBuildInfo) -> List[GeneratedTask]:
"""
:return: Tasks for modified workloads current variant (runtime)
"""
return [
task for workload in self.modified_workloads() for task in workload.variant_tasks(build)
]
def tasks(self, op: CLIOperation, build: CurrentBuildInfo) -> List[GeneratedTask]:
"""
:param op: current cli invocation
:param build: current build info
:return: tasks that should be scheduled given the above
"""
if op.mode == OpName.ALL_TASKS:
tasks = self.all_tasks()
elif op.mode == OpName.PATCH_TASKS:
tasks = self.patch_tasks(build)
elif op.mode == OpName.VARIANT_TASKS:
tasks = self.variant_tasks(build)
else:
raise Exception("Invalid operation mode")
return tasks
class ConfigWriter:
"""
Takes tasks and converts them to shrub Configuration objects.
"""
def __init__(self, op: CLIOperation):
self.op = op
def write(self, tasks: List[GeneratedTask], write: bool = True) -> Configuration:
"""
:param tasks: tasks to write
:param write: boolean to actually write the file - exposed for testing
:return: the configuration object to write (exposed for testing)
"""
if self.op.mode != OpName.ALL_TASKS:
config: Configuration = self.variant_tasks(tasks, self.op.variant)
else:
config = self.all_tasks_modern(tasks)
output_file = os.path.join(self.op.workspace_root, "build", "TaskJSON", "Tasks.json")
success = False
raised = None
if write:
try:
out_text = config.to_json()
os.makedirs(os.path.dirname(output_file), exist_ok=True)
if os.path.exists(output_file):
os.unlink(output_file)
with open(output_file, "w") as output:
output.write(out_text)
SLOG.debug("Wrote task json", output_file=output_file, contents=out_text)
success = True
except Exception as e:
raised = e
raise e
finally:
SLOG.info(
f"{'Succeeded' if success else 'Failed'} to write to {output_file} from cwd={os.getcwd()}."
f"{raised if raised else ''}"
)
return config
@staticmethod
def variant_tasks(tasks: List[GeneratedTask], variant: str) -> Configuration:
c = Configuration()
c.variant(variant).tasks([TaskSpec(task.name) for task in tasks])
return c
@staticmethod
def all_tasks_modern(tasks: List[GeneratedTask]) -> Configuration:
c = Configuration()
c.exec_timeout(64800) # 18 hours
for task in tasks:
bootstrap = {
"test_control": task.name,
"auto_workload_path": task.workload.relative_path,
}
if task.bootstrap_key:
bootstrap[task.bootstrap_key] = task.bootstrap_value
t = c.task(task.name)
t.priority(5)
t.commands(
[
CommandDefinition()
.command("timeout.update")
.params({"exec_timeout_secs": 86400, "timeout_secs": 7200}), # 24 hours
CommandDefinition().function("f_run_dsi_workload").vars(bootstrap),
]
)
return c
def main(mode_name: str, genny_repo_root: str, workspace_root: str) -> None:
reader = YamlReader()
build = CurrentBuildInfo(reader=reader, workspace_root=workspace_root)
op = CLIOperation.create(
mode_name=mode_name,
reader=reader,
genny_repo_root=genny_repo_root,
workspace_root=workspace_root,
)
lister = WorkloadLister(
workspace_root=workspace_root, genny_repo_root=genny_repo_root, reader=reader
)
repo = Repo(lister=lister, reader=reader, workspace_root=workspace_root)
tasks = repo.tasks(op=op, build=build)
writer = ConfigWriter(op)
writer.write(tasks)
| 36.082677
| 119
| 0.600709
|
b7121a82b75d8389ab7d6d49f9fa28d39854fb66
| 1,167
|
py
|
Python
|
ecc/tasks/pohlig-hellman/gen_task.py
|
kabachook/ecc
|
40cfe20bf27bf9f1b7c8e05bfbdbef73e00e2ba6
|
[
"MIT"
] | 8
|
2020-01-10T02:44:41.000Z
|
2021-12-19T17:29:10.000Z
|
ecc/tasks/pohlig-hellman/gen_task.py
|
kabachook/ecc
|
40cfe20bf27bf9f1b7c8e05bfbdbef73e00e2ba6
|
[
"MIT"
] | null | null | null |
ecc/tasks/pohlig-hellman/gen_task.py
|
kabachook/ecc
|
40cfe20bf27bf9f1b7c8e05bfbdbef73e00e2ba6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from argparse import ArgumentParser
import sys
from sage.all import *
parser = ArgumentParser("Generate task on Pohlig-Hellman algo")
parser.add_argument("--p-start", type=int, default=2**128, help="min(p)")
parser.add_argument("--p-stop", type=int, default=2**256, help="max(p)")
if __name__ == "__main__":
args = parser.parse_args()
p = random_prime(args.p_stop, lbound=args.p_start)
F = GF(p)
E = None
print(f"Finding curve with smooth order over field {F}", file=sys.stderr)
while True:
a, b = randint(1, p), randint(1, p)
E_t = EllipticCurve(F, [a, b])
print(E_t)
order = E_t.order()
print(f"Order = {order}")
print(f"{order} = ", end="", file=sys.stderr)
print(factor(order), file=sys.stderr)
print("Countinue?[y]", file=sys.stderr)
answer = input().lower()
if "y" in answer:
E = E_t
E.set_order(order)
break
G = E.gens()[0]
d = randint(E.order() // 10, E.order() - 1)
P = d * G
print(f"{P} = {d}*{G}", file=sys.stderr)
print(f"{P} = d*{G}")
print(f"d = ?")
| 27.785714
| 77
| 0.563839
|
8dec13af8042fbd7a297b5ae1656030421a41f8a
| 10,164
|
py
|
Python
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
GregAC/opentitan
|
40b607b776d7b10cfc2899cc0d724d00dc0c91a2
|
[
"Apache-2.0"
] | 2
|
2019-11-21T14:05:14.000Z
|
2020-07-10T12:40:54.000Z
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
GregAC/opentitan
|
40b607b776d7b10cfc2899cc0d724d00dc0c91a2
|
[
"Apache-2.0"
] | null | null | null |
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
GregAC/opentitan
|
40b607b776d7b10cfc2899cc0d724d00dc0c91a2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import List, Optional, Sequence
from .trace import Trace
from .ext_regs import TraceExtRegChange, ExtRegChange
class TraceWSR(Trace):
def __init__(self, wsr_name: str, new_value: int):
self.wsr_name = wsr_name
self.new_value = new_value
def trace(self) -> str:
return '{} = {:#x}'.format(self.wsr_name, self.new_value)
def rtl_trace(self) -> str:
return '> {}: {}'.format(self.wsr_name,
Trace.hex_value(self.new_value, 256))
class WSR:
'''Models a Wide Status Register'''
def __init__(self, name: str):
self.name = name
def read_unsigned(self) -> int:
'''Get the stored value as a 256-bit unsigned value'''
raise NotImplementedError()
def write_unsigned(self, value: int) -> None:
'''Set the stored value as a 256-bit unsigned value'''
raise NotImplementedError()
def read_signed(self) -> int:
'''Get the stored value as a 256-bit signed value'''
uval = self.read_unsigned()
return uval - (1 << 256 if uval >> 255 else 0)
def write_signed(self, value: int) -> None:
'''Set the stored value as a 256-bit signed value'''
assert -(1 << 255) <= value < (1 << 255)
uval = (1 << 256) + value if value < 0 else value
self.write_unsigned(uval)
def commit(self) -> None:
'''Commit pending changes'''
return
def abort(self) -> None:
'''Abort pending changes'''
return
def changes(self) -> Sequence[Trace]:
'''Return list of pending architectural changes'''
return []
class DumbWSR(WSR):
'''Models a WSR without special behaviour'''
def __init__(self, name: str):
super().__init__(name)
self._value = 0
self._next_value = None # type: Optional[int]
def read_unsigned(self) -> int:
return self._value
def write_unsigned(self, value: int) -> None:
assert 0 <= value < (1 << 256)
self._next_value = value
def commit(self) -> None:
if self._next_value is not None:
self._value = self._next_value
self._next_value = None
def abort(self) -> None:
self._next_value = None
def changes(self) -> List[TraceWSR]:
return ([TraceWSR(self.name, self._next_value)]
if self._next_value is not None
else [])
class RandWSR(WSR):
'''The magic RND WSR
RND is special as OTBN can stall on reads to it. A read from RND either
immediately returns data from a cache of a previous EDN request (triggered
by writing to the RND_PREFETCH CSR) or waits for data from the EDN. To
model this, anything reading from RND must first call `request_value` which
returns True if the value is available.
'''
def __init__(self, name: str):
super().__init__(name)
self._random_value = None # type: Optional[int]
self._random_value_read = False
self.pending_request = False
self.req_high = False
def read_unsigned(self) -> int:
assert self._random_value is not None
self._random_value_read = True
return self._random_value
def read_u32(self) -> int:
'''Read a 32-bit unsigned result'''
return self.read_unsigned() & ((1 << 32) - 1)
def write_unsigned(self, value: int) -> None:
'''Writes to RND are ignored
Note this is different to `set_unsigned`. This is used by executing
instruction, see `set_unsigned` docstring for more details
'''
return
def commit(self) -> None:
if self._random_value_read:
self._random_value = None
self._random_value_read = False
def request_value(self) -> bool:
'''Signals intent to read RND, returns True if a value is available'''
if self._random_value is not None:
return True
self.pending_request = True
return False
def changes(self) -> List[Trace]:
# We are not tracing the value of RND. Instead we are tracing modelled
# EDN request for RND.
ret = [] # type: List[Trace]
if self.req_high and self._random_value is not None:
ret = [TraceExtRegChange('RND_REQ', ExtRegChange('=', 0, True, 0))]
self.req_high = False
elif self.pending_request:
self.req_high = True
ret = [TraceExtRegChange('RND_REQ', ExtRegChange('=', 1, True, 1))]
return ret
def set_unsigned(self, value: int) -> None:
'''Sets a random value that can be read by a future `read_unsigned`
This is different to `write_unsigned`, that is used by an executing
instruction to write to RND. This is used by the simulation environment
to provide a value that is later read by `read_unsigned` and doesn't
relate to instruction execution (e.g. in an RTL simulation it monitors
the EDN bus and supplies the simulator with an RND value when a fresh
one is seen on the EDN bus).
'''
assert 0 <= value < (1 << 256)
self._random_value = value
if self.pending_request:
self.pending_request = False
class URNDWSR(WSR):
'''Models URND PRNG Structure'''
def __init__(self, name: str):
super().__init__(name)
self._seed = [0x84ddfadaf7e1134d, 0x70aa1c59de6197ff,
0x25a4fe335d095f1e, 0x2cba89acbe4a07e9]
self.state = [self._seed,
4 * [0], 4 * [0],
4 * [0], 4 * [0]]
self.out = 4 * [0]
self._next_value = None # type: Optional[int]
self._value = None # type: Optional[int]
self.running = False
# Function to left rotate a 64b number n by d bits
def leftRotate64(self, n: int, d: int) -> int:
return ((n << d) & ((1 << 64) - 1)) | (n >> (64 - d))
def read_u32(self) -> int:
'''Read a 32-bit unsigned result'''
return self.read_unsigned() & ((1 << 32) - 1)
def write_unsigned(self, value: int) -> None:
'''Writes to URND are ignored'''
return
def read_unsigned(self) -> int:
assert self._value is not None
return self._value
def state_update(self, data_in: List[int]) -> List[int]:
a_in = data_in[3]
b_in = data_in[2]
c_in = data_in[1]
d_in = data_in[0]
a_out = a_in ^ b_in ^ d_in
b_out = a_in ^ b_in ^ c_in
c_out = a_in ^ ((b_in << 17) & ((1 << 64) - 1)) ^ c_in
d_out = self.leftRotate64(d_in, 45) ^ self.leftRotate64(b_in, 45)
assert a_out < (1 << 64)
assert b_out < (1 << 64)
assert c_out < (1 << 64)
assert d_out < (1 << 64)
return [d_out, c_out, b_out, a_out]
def set_seed(self, value: List[int]) -> None:
self.running = True
self.state[0] = value
def step(self) -> None:
if self.running:
mid = 4 * [0]
self._next_value = 0
for i in range(4):
self.state[i + 1] = self.state_update(self.state[i])
mid[i] = (self.state[i][3] + self.state[i][0]) & ((1 << 64) - 1)
self.out[i] = (self.leftRotate64(mid[i], 23) + self.state[i][3]) & ((1 << 64) - 1)
self._next_value = (self._next_value | (self.out[i] << (64 * i))) & ((1 << 256) - 1)
self.state[0] = self.state[4]
def commit(self) -> None:
if self._next_value is not None:
self._value = self._next_value
def abort(self) -> None:
self._next_value = 0
def changes(self) -> List[TraceWSR]:
return ([])
class WSRFile:
'''A model of the WSR file'''
def __init__(self) -> None:
self.MOD = DumbWSR('MOD')
self.RND = RandWSR('RND')
self.URND = URNDWSR('URND')
self.ACC = DumbWSR('ACC')
self.KeyS0L = DumbWSR('KeyS0L')
self.KeyS0H = DumbWSR('KeyS0H')
self.KeyS1L = DumbWSR('KeyS1L')
self.KeyS1H = DumbWSR('KeyS1H')
self._by_idx = {
0: self.MOD,
1: self.RND,
2: self.URND,
3: self.ACC,
4: self.KeyS0L,
5: self.KeyS0H,
6: self.KeyS1L,
7: self.KeyS1H,
}
# Use fixed sideload keys for now. This matches the fixed keys used in
# the testbenches. Eventually the model will snoop the incoming key as
# it snoops the incoming EDN data for RND/URND now.
self.KeyS0L._value = 0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF
self.KeyS0H._value = 0xDEADBEEFDEADBEEFDEADBEEFDEADBEEF
self.KeyS1L._value = 0xBAADF00DBAADF00DBAADF00DBAADF00DBAADF00DBAADF00DBAADF00DBAADF00D
self.KeyS1H._value = 0xBAADF00DBAADF00DBAADF00DBAADF00D
def check_idx(self, idx: int) -> bool:
'''Return True if idx is a valid WSR index'''
return idx in self._by_idx
def read_at_idx(self, idx: int) -> int:
'''Read the WSR at idx as an unsigned 256-bit value
Assumes that idx is a valid index (call check_idx to ensure this).
'''
return self._by_idx[idx].read_unsigned()
def write_at_idx(self, idx: int, value: int) -> None:
'''Write the WSR at idx as an unsigned 256-bit value
Assumes that idx is a valid index (call check_idx to ensure this).
'''
return self._by_idx[idx].write_unsigned(value)
def commit(self) -> None:
self.MOD.commit()
self.RND.commit()
self.URND.commit()
self.ACC.commit()
def abort(self) -> None:
self.MOD.abort()
self.RND.abort()
self.URND.abort()
self.ACC.abort()
def changes(self) -> List[Trace]:
ret = [] # type: List[Trace]
ret += self.MOD.changes()
ret += self.RND.changes()
ret += self.URND.changes()
ret += self.ACC.changes()
return ret
| 32.681672
| 100
| 0.585596
|
7912a66cb25fd4c55ec625c8b315217f793ecdd8
| 5,618
|
py
|
Python
|
saleor/graphql/order/schema.py
|
0xfab-ri/saleor
|
99f262bcce67fb0b66c2717bcb1a17d56bc69252
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/schema.py
|
0xfab-ri/saleor
|
99f262bcce67fb0b66c2717bcb1a17d56bc69252
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/schema.py
|
0xfab-ri/saleor
|
99f262bcce67fb0b66c2717bcb1a17d56bc69252
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
from graphql_jwt.decorators import login_required
from ..core.enums import ReportingPeriod
from ..core.fields import FilterInputConnectionField, PrefetchingConnectionField
from ..core.types import FilterInputObjectType, TaxedMoney
from ..decorators import permission_required
from ..descriptions import DESCRIPTIONS
from .bulk_mutations.draft_orders import DraftOrderBulkDelete, DraftOrderLinesBulkDelete
from .bulk_mutations.orders import OrderBulkCancel
from .enums import OrderStatusFilter
from .filters import DraftOrderFilter, OrderFilter
from .mutations.draft_orders import (
DraftOrderComplete,
DraftOrderCreate,
DraftOrderDelete,
DraftOrderLineDelete,
DraftOrderLinesCreate,
DraftOrderLineUpdate,
DraftOrderUpdate,
)
from .mutations.fulfillments import (
FulfillmentCancel,
FulfillmentCreate,
FulfillmentUpdateTracking,
)
from .mutations.orders import (
OrderAddNote,
OrderCancel,
OrderCapture,
OrderClearMeta,
OrderClearPrivateMeta,
OrderMarkAsPaid,
OrderRefund,
OrderUpdate,
OrderUpdateMeta,
OrderUpdatePrivateMeta,
OrderUpdateShipping,
OrderVoid,
)
from .resolvers import (
resolve_draft_orders,
resolve_homepage_events,
resolve_order,
resolve_order_by_token,
resolve_orders,
resolve_orders_total,
)
from .types import Order, OrderEvent
class OrderFilterInput(FilterInputObjectType):
class Meta:
filterset_class = OrderFilter
class OrderDraftFilterInput(FilterInputObjectType):
class Meta:
filterset_class = DraftOrderFilter
class OrderQueries(graphene.ObjectType):
homepage_events = PrefetchingConnectionField(
OrderEvent,
description="""List of activity events to display on
homepage (at the moment it only contains order-events).""",
)
order = graphene.Field(
Order,
description="Lookup an order by ID.",
id=graphene.Argument(graphene.ID, description="ID of an order.", required=True),
)
orders = FilterInputConnectionField(
Order,
filter=OrderFilterInput(description="Filtering options for orders."),
query=graphene.String(description=DESCRIPTIONS["order"]),
created=graphene.Argument(
ReportingPeriod, description="Filter orders from a selected timespan."
),
status=graphene.Argument(
OrderStatusFilter, description="Filter order by status"
),
description="List of orders.",
)
draft_orders = FilterInputConnectionField(
Order,
filter=OrderDraftFilterInput(description="Filtering options for draft orders."),
query=graphene.String(description=DESCRIPTIONS["order"]),
created=graphene.Argument(
ReportingPeriod, description="Filter draft orders from a selected timespan."
),
description="List of draft orders.",
)
orders_total = graphene.Field(
TaxedMoney,
description="Return the total sales amount from a specific period.",
period=graphene.Argument(ReportingPeriod, description="A period of time."),
)
order_by_token = graphene.Field(
Order,
description="Lookup an order by token.",
token=graphene.Argument(
graphene.UUID, description="The order's token.", required=True
),
)
@permission_required("order.manage_orders")
def resolve_homepage_events(self, *_args, **_kwargs):
return resolve_homepage_events()
@login_required
def resolve_order(self, info, **data):
return resolve_order(info, data.get("id"))
@permission_required("order.manage_orders")
def resolve_orders(self, info, created=None, status=None, query=None, **_kwargs):
return resolve_orders(info, created, status, query)
@permission_required("order.manage_orders")
def resolve_draft_orders(self, info, created=None, query=None, **_kwargs):
return resolve_draft_orders(info, created, query)
@permission_required("order.manage_orders")
def resolve_orders_total(self, info, period, **_kwargs):
return resolve_orders_total(info, period)
def resolve_order_by_token(self, _info, token):
return resolve_order_by_token(token)
class OrderMutations(graphene.ObjectType):
draft_order_complete = DraftOrderComplete.Field()
draft_order_create = DraftOrderCreate.Field()
draft_order_delete = DraftOrderDelete.Field()
draft_order_bulk_delete = DraftOrderBulkDelete.Field()
draft_order_lines_bulk_delete = DraftOrderLinesBulkDelete.Field()
draft_order_lines_create = DraftOrderLinesCreate.Field()
draft_order_line_delete = DraftOrderLineDelete.Field()
draft_order_line_update = DraftOrderLineUpdate.Field()
draft_order_update = DraftOrderUpdate.Field()
order_add_note = OrderAddNote.Field()
order_cancel = OrderCancel.Field()
order_capture = OrderCapture.Field()
order_clear_private_meta = OrderClearPrivateMeta.Field()
order_clear_meta = OrderClearMeta.Field()
order_fulfillment_cancel = FulfillmentCancel.Field()
order_fulfillment_create = FulfillmentCreate.Field()
order_fulfillment_update_tracking = FulfillmentUpdateTracking.Field()
order_mark_as_paid = OrderMarkAsPaid.Field()
order_refund = OrderRefund.Field()
order_update = OrderUpdate.Field()
order_update_meta = OrderUpdateMeta.Field()
order_update_private_meta = OrderUpdatePrivateMeta.Field()
order_update_shipping = OrderUpdateShipping.Field()
order_void = OrderVoid.Field()
order_bulk_cancel = OrderBulkCancel.Field()
| 35.333333
| 88
| 0.736027
|
ea7b1e9b9c8a84dce442167fcab0afcfbce82d4c
| 248
|
py
|
Python
|
7/confirmed_users.py
|
liuhanyu200/pygame
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
[
"BSD-3-Clause"
] | null | null | null |
7/confirmed_users.py
|
liuhanyu200/pygame
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
[
"BSD-3-Clause"
] | null | null | null |
7/confirmed_users.py
|
liuhanyu200/pygame
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
[
"BSD-3-Clause"
] | null | null | null |
# coding:utf-8
unconfirmed_users = ['liuhanyu', 'luoliuzhou', 'wangyue', 'xiaolizi']
confirmed_users = []
while unconfirmed_users:
user = unconfirmed_users.pop()
confirmed_users.append(user)
print(confirmed_users)
print(unconfirmed_users)
| 24.8
| 69
| 0.758065
|
130b68cda8930df59bd73fd45249d1973a85e2f7
| 1,557
|
py
|
Python
|
examples/AdAccountCampaignsPostMAIA.py
|
pasha-r/facebook-python-ads-sdk
|
76feadd77baed839516b53297628e7a254c8c3c0
|
[
"CNRI-Python"
] | null | null | null |
examples/AdAccountCampaignsPostMAIA.py
|
pasha-r/facebook-python-ads-sdk
|
76feadd77baed839516b53297628e7a254c8c3c0
|
[
"CNRI-Python"
] | null | null | null |
examples/AdAccountCampaignsPostMAIA.py
|
pasha-r/facebook-python-ads-sdk
|
76feadd77baed839516b53297628e7a254c8c3c0
|
[
"CNRI-Python"
] | 1
|
2018-09-24T14:04:48.000Z
|
2018-09-24T14:04:48.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.campaign import Campaign
from facebook_business.api import FacebookAdsApi
access_token = '<ACCESS_TOKEN>'
app_secret = '<APP_SECRET>'
app_id = '<APP_ID>'
id = '<AD_ACCOUNT_ID>'
FacebookAdsApi.init(access_token=access_token)
fields = [
]
params = {
'name': 'Mobile App Installs Campaign',
'objective': 'APP_INSTALLS',
'status': 'PAUSED',
}
print(AdAccount(id).create_campaign(
fields=fields,
params=params,
))
| 37.97561
| 76
| 0.776493
|
9d6cbbe73d9518a1f4480a99b1d5a8324cf74ed5
| 8,956
|
py
|
Python
|
chia/server/start_service.py
|
13thProgression/gold-blockchain
|
2a1b19ed9d088315fd717b9b521e21f229522c4b
|
[
"Apache-2.0"
] | 3
|
2022-02-10T09:42:06.000Z
|
2022-03-09T16:39:22.000Z
|
chia/server/start_service.py
|
lahyunLee/gold-blockchain
|
b9f90babeb58c3c0fa384153ca01ae973e19cffd
|
[
"Apache-2.0"
] | null | null | null |
chia/server/start_service.py
|
lahyunLee/gold-blockchain
|
b9f90babeb58c3c0fa384153ca01ae973e19cffd
|
[
"Apache-2.0"
] | 2
|
2022-03-16T14:22:56.000Z
|
2022-03-19T11:11:31.000Z
|
import asyncio
import logging
import logging.config
import os
import signal
from sys import platform
from typing import Any, Callable, List, Optional, Tuple
from chia.daemon.server import service_launch_lock_path, singleton
from chia.server.ssl_context import chia_ssl_ca_paths, private_ssl_ca_paths
try:
import uvloop
except ImportError:
uvloop = None
from chia.rpc.rpc_server import start_rpc_server
from chia.server.outbound_message import NodeType
from chia.server.server import ChiaServer
from chia.server.upnp import UPnP
from chia.types.peer_info import PeerInfo
from chia.util.chia_logging import initialize_logging
from chia.util.config import load_config, load_config_cli
from chia.util.ints import uint16
from chia.util.setproctitle import setproctitle
from .reconnect_task import start_reconnect_task
# this is used to detect whether we are running in the main process or not, in
# signal handlers. We need to ignore signals in the sub processes.
main_pid: Optional[int] = None
class Service:
def __init__(
self,
root_path,
node: Any,
peer_api: Any,
node_type: NodeType,
advertised_port: int,
service_name: str,
network_id: str,
upnp_ports: List[int] = [],
server_listen_ports: List[int] = [],
connect_peers: List[PeerInfo] = [],
auth_connect_peers: bool = True,
on_connect_callback: Optional[Callable] = None,
rpc_info: Optional[Tuple[type, int]] = None,
parse_cli_args=True,
connect_to_daemon=True,
) -> None:
self.root_path = root_path
self.config = load_config(root_path, "config.yaml")
ping_interval = self.config.get("ping_interval")
self.self_hostname = self.config.get("self_hostname")
self.daemon_port = self.config.get("daemon_port")
assert ping_interval is not None
self._connect_to_daemon = connect_to_daemon
self._node_type = node_type
self._service_name = service_name
self._rpc_task: Optional[asyncio.Task] = None
self._rpc_close_task: Optional[asyncio.Task] = None
self._network_id: str = network_id
proctitle_name = f"gold_{service_name}"
setproctitle(proctitle_name)
self._log = logging.getLogger(service_name)
if parse_cli_args:
service_config = load_config_cli(root_path, "config.yaml", service_name)
else:
service_config = load_config(root_path, "config.yaml", service_name)
initialize_logging(service_name, service_config["logging"], root_path)
self._rpc_info = rpc_info
private_ca_crt, private_ca_key = private_ssl_ca_paths(root_path, self.config)
chia_ca_crt, chia_ca_key = chia_ssl_ca_paths(root_path, self.config)
inbound_rlp = self.config.get("inbound_rate_limit_percent")
outbound_rlp = self.config.get("outbound_rate_limit_percent")
assert inbound_rlp and outbound_rlp
self._server = ChiaServer(
advertised_port,
node,
peer_api,
node_type,
ping_interval,
network_id,
inbound_rlp,
outbound_rlp,
root_path,
service_config,
(private_ca_crt, private_ca_key),
(chia_ca_crt, chia_ca_key),
name=f"{service_name}_server",
)
f = getattr(node, "set_server", None)
if f:
f(self._server)
else:
self._log.warning(f"No set_server method for {service_name}")
self._connect_peers = connect_peers
self._auth_connect_peers = auth_connect_peers
self._upnp_ports = upnp_ports
self._server_listen_ports = server_listen_ports
self._api = peer_api
self._node = node
self._did_start = False
self._is_stopping = asyncio.Event()
self._stopped_by_rpc = False
self._on_connect_callback = on_connect_callback
self._advertised_port = advertised_port
self._reconnect_tasks: List[asyncio.Task] = []
self.upnp: Optional[UPnP] = None
async def start(self, **kwargs) -> None:
# we include `kwargs` as a hack for the wallet, which for some
# reason allows parameters to `_start`. This is serious BRAIN DAMAGE,
# and should be fixed at some point.
# TODO: move those parameters to `__init__`
if self._did_start:
return None
assert self.self_hostname is not None
assert self.daemon_port is not None
self._did_start = True
self._enable_signals()
await self._node._start(**kwargs)
for port in self._upnp_ports:
if self.upnp is None:
self.upnp = UPnP()
self.upnp.remap(port)
await self._server.start_server(self._on_connect_callback)
self._reconnect_tasks = [
start_reconnect_task(self._server, _, self._log, self._auth_connect_peers) for _ in self._connect_peers
]
self._log.info(f"Started {self._service_name} service on network_id: {self._network_id}")
self._rpc_close_task = None
if self._rpc_info:
rpc_api, rpc_port = self._rpc_info
self._rpc_task = asyncio.create_task(
start_rpc_server(
rpc_api(self._node),
self.self_hostname,
self.daemon_port,
uint16(rpc_port),
self.stop,
self.root_path,
self.config,
self._connect_to_daemon,
)
)
async def run(self) -> None:
lockfile = singleton(service_launch_lock_path(self.root_path, self._service_name))
if lockfile is None:
self._log.error(f"{self._service_name}: already running")
raise ValueError(f"{self._service_name}: already running")
await self.start()
await self.wait_closed()
def _enable_signals(self) -> None:
global main_pid
main_pid = os.getpid()
signal.signal(signal.SIGINT, self._accept_signal)
signal.signal(signal.SIGTERM, self._accept_signal)
if platform == "win32" or platform == "cygwin":
# pylint: disable=E1101
signal.signal(signal.SIGBREAK, self._accept_signal) # type: ignore
def _accept_signal(self, signal_number: int, stack_frame):
self._log.info(f"got signal {signal_number}")
# we only handle signals in the main process. In the ProcessPoolExecutor
# processes, we have to ignore them. We'll shut them down gracefully
# from the main process
global main_pid
if os.getpid() != main_pid:
return
self.stop()
def stop(self) -> None:
if not self._is_stopping.is_set():
self._is_stopping.set()
# start with UPnP, since this can take a while, we want it to happen
# in the background while shutting down everything else
for port in self._upnp_ports:
if self.upnp is not None:
self.upnp.release(port)
self._log.info("Cancelling reconnect task")
for _ in self._reconnect_tasks:
_.cancel()
self._log.info("Closing connections")
self._server.close_all()
self._node._close()
self._node._shut_down = True
self._log.info("Calling service stop callback")
if self._rpc_task is not None:
self._log.info("Closing RPC server")
async def close_rpc_server() -> None:
if self._rpc_task:
await (await self._rpc_task)()
self._rpc_close_task = asyncio.create_task(close_rpc_server())
async def wait_closed(self) -> None:
await self._is_stopping.wait()
self._log.info("Waiting for socket to be closed (if opened)")
self._log.info("Waiting for ChiaServer to be closed")
await self._server.await_closed()
if self._rpc_close_task:
self._log.info("Waiting for RPC server")
await self._rpc_close_task
self._log.info("Closed RPC server")
self._log.info("Waiting for service _await_closed callback")
await self._node._await_closed()
if self.upnp is not None:
# this is a blocking call, waiting for the UPnP thread to exit
self.upnp.shutdown()
self._log.info(f"Service {self._service_name} at port {self._advertised_port} fully closed")
async def async_run_service(*args, **kwargs) -> None:
service = Service(*args, **kwargs)
return await service.run()
def run_service(*args, **kwargs) -> None:
if uvloop is not None:
uvloop.install()
return asyncio.run(async_run_service(*args, **kwargs))
| 35.121569
| 115
| 0.634435
|
e41380b40db25696cf8665919c8c420a7c91dd32
| 857
|
py
|
Python
|
pokebattle/urls.py
|
pamella/pokebattle
|
a90b27d87b12dd079356ad4b5a15a196004e1c5e
|
[
"MIT"
] | 2
|
2019-02-27T16:11:11.000Z
|
2019-12-10T14:01:18.000Z
|
pokebattle/urls.py
|
pamella/pokebattle
|
a90b27d87b12dd079356ad4b5a15a196004e1c5e
|
[
"MIT"
] | 14
|
2019-02-21T14:36:22.000Z
|
2022-02-26T09:49:44.000Z
|
pokebattle/urls.py
|
pamella/pokebattle
|
a90b27d87b12dd079356ad4b5a15a196004e1c5e
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls import include, url # noqa
from django.contrib import admin
from django.views.generic import TemplateView
import django_js_reverse.views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'),
url(r'^$', TemplateView.as_view(template_name='base.html'), name='home'),
url(r'', include('battles.urls', namespace='battles')),
url(r'', include('users.urls')),
url(r'social/', include('social_django.urls', namespace='social')),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'api/', include('battles.endpoints_urls', namespace='api_battles')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 30.607143
| 77
| 0.693116
|
0d01cdf06a2d22089ca250d607cb7b49c0f7501c
| 1,465
|
py
|
Python
|
common/ClusterFactorLinearThetaEffect.py
|
keccaksasca/keccaksasca
|
ca83ae2add1e47b14576ffcbe1462bc2f099acf8
|
[
"CC0-1.0"
] | 4
|
2020-12-15T17:59:04.000Z
|
2022-02-25T02:19:19.000Z
|
common/ClusterFactorLinearThetaEffect.py
|
keccaksasca/keccaksasca
|
ca83ae2add1e47b14576ffcbe1462bc2f099acf8
|
[
"CC0-1.0"
] | null | null | null |
common/ClusterFactorLinearThetaEffect.py
|
keccaksasca/keccaksasca
|
ca83ae2add1e47b14576ffcbe1462bc2f099acf8
|
[
"CC0-1.0"
] | null | null | null |
from Node import Node
from ClusterFactorLinear import ClusterFactorLinear
import numpy as np
import settings
# linear factor --> everything that can be written using just XORs (such as theta, parity, etc)
# this variant --> all messages are aligned! --> can be used for computing parity and for theta, but not for theta effect
class ClusterFactorLinearThetaEffect(ClusterFactorLinear):
def __init__(self, name):
super().__init__(name)
# packed in own function to keep core the same
def gatherIncoming(self):
#edge 0 --> aligned input
#edge 1 --> non-aligned, 7 bits, upper part
#edge 2 --> non-aligned, 1 bit, lower part
#edge 3 --> aligned output
msgin = np.zeros(shape=(4, self.numvalues), dtype=settings.NUMPY_DATATYPE)
msgin[0, :] = self.edges[0].m2f
msgin[3, :] = self.edges[3].m2f
msgin[1, ::2] = self.edges[1].m2f #only where LSB=0
msgin[2, :2] = self.edges[2].m2f #set where all upper bits are 0, only lower bit either 0 or 1 (because xor with 0 has no effect)
return msgin
# packed in own function to keep core the same
def spreadOutgoing(self, msgout):
self.edges[0].m2n = msgout[0, :]
self.edges[3].m2n = msgout[3, :]
self.edges[1].m2n = msgout[1, ::2] #extract where LSB=0 (MSB=1 is not a valid solution here)
self.edges[2].m2n = msgout[2, :2] #extract only lowest 2 cases, as upper bits must not be set
| 39.594595
| 137
| 0.655973
|
a7a492f930070524e05488f5f53289199c86268b
| 32,571
|
py
|
Python
|
arelle/plugin/validateUSBestPractices.py
|
GuoHuiChen/Arelle
|
76b3c720e55348fd91b7be091040d2207f85400c
|
[
"Apache-2.0"
] | 1
|
2018-07-18T04:56:38.000Z
|
2018-07-18T04:56:38.000Z
|
arelle/plugin/validateUSBestPractices.py
|
GuoHuiChen/Arelle
|
76b3c720e55348fd91b7be091040d2207f85400c
|
[
"Apache-2.0"
] | null | null | null |
arelle/plugin/validateUSBestPractices.py
|
GuoHuiChen/Arelle
|
76b3c720e55348fd91b7be091040d2207f85400c
|
[
"Apache-2.0"
] | null | null | null |
from arelle import PluginManager
from arelle.ModelValue import qname
from arelle import Locale, ModelXbrl, XbrlConst
from arelle.FileSource import openFileSource, openFileStream, saveFile
import os, io, re, json, time
from collections import defaultdict
# ((year, ugtNamespace, ugtDocLB, ugtEntryPoint) ...)
ugtDocs = ({"year": 2012,
"namespace": "http://fasb.org/us-gaap/2012-01-31",
"docLB": "http://xbrl.fasb.org/us-gaap/2012/us-gaap-2012-01-31.zip/us-gaap-2012-01-31/elts/us-gaap-doc-2012-01-31.xml",
"entryXsd": "http://xbrl.fasb.org/us-gaap/2012/us-gaap-2012-01-31.zip/us-gaap-2012-01-31/entire/us-gaap-entryPoint-std-2012-01-31.xsd",
},
{"year": 2013,
"namespace": "http://fasb.org/us-gaap/2013-01-31",
"docLB": "http://xbrl.fasb.org/us-gaap/2013/us-gaap-2013-01-31.zip/us-gaap-2013-01-31/elts/us-gaap-doc-2013-01-31.xml",
"entryXsd": "http://xbrl.fasb.org/us-gaap/2013/us-gaap-2013-01-31.zip/us-gaap-2013-01-31/entire/us-gaap-entryPoint-std-2013-01-31.xsd",
},
)
def setup(val):
val.linroleDefinitionIsDisclosure = re.compile(r"-\s+Disclosure\s+-\s",
re.IGNORECASE)
val.linkroleDefinitionStatementSheet = re.compile(r"[^-]+-\s+Statement\s+-\s+.*", # no restriction to type of statement
re.IGNORECASE)
val.ugtNamespace = None
cntlr = val.modelXbrl.modelManager.cntlr
# load deprecated concepts for filed year of us-gaap
for ugt in ugtDocs:
ugtNamespace = ugt["namespace"]
if ugtNamespace in val.modelXbrl.namespaceDocs and len(val.modelXbrl.namespaceDocs[ugtNamespace]) > 0:
val.ugtNamespace = ugtNamespace
usgaapDoc = val.modelXbrl.namespaceDocs[ugtNamespace][0]
deprecationsJsonFile = usgaapDoc.filepathdir + os.sep + "deprecated-concepts.json"
file = None
try:
file = openFileStream(cntlr, deprecationsJsonFile, 'rt', encoding='utf-8')
val.usgaapDeprecations = json.load(file)
file.close()
except Exception:
if file:
file.close()
val.modelXbrl.modelManager.addToLog(_("loading us-gaap {0} deprecated concepts into cache").format(ugt["year"]))
startedAt = time.time()
ugtDocLB = ugt["docLB"]
val.usgaapDeprecations = {}
# load without SEC/EFM validation (doc file would not be acceptable)
priorValidateDisclosureSystem = val.modelXbrl.modelManager.validateDisclosureSystem
val.modelXbrl.modelManager.validateDisclosureSystem = False
deprecationsInstance = ModelXbrl.load(val.modelXbrl.modelManager,
# "http://xbrl.fasb.org/us-gaap/2012/elts/us-gaap-doc-2012-01-31.xml",
# load from zip (especially after caching) is incredibly faster
openFileSource(ugtDocLB, cntlr),
_("built deprecations table in cache"))
val.modelXbrl.modelManager.validateDisclosureSystem = priorValidateDisclosureSystem
if deprecationsInstance is None:
val.modelXbrl.error("arelle:notLoaded",
_("US-GAAP documentation not loaded: %(file)s"),
modelXbrl=val, file=os.path.basename(ugtDocLB))
else:
# load deprecations
for labelRel in deprecationsInstance.relationshipSet(XbrlConst.conceptLabel).modelRelationships:
modelDocumentation = labelRel.toModelObject
conceptName = labelRel.fromModelObject.name
if modelDocumentation.role == 'http://www.xbrl.org/2009/role/deprecatedLabel':
val.usgaapDeprecations[conceptName] = (val.usgaapDeprecations.get(conceptName, ('',''))[0], modelDocumentation.text)
elif modelDocumentation.role == 'http://www.xbrl.org/2009/role/deprecatedDateLabel':
val.usgaapDeprecations[conceptName] = (modelDocumentation.text, val.usgaapDeprecations.get(conceptName, ('',''))[1])
jsonStr = _STR_UNICODE(json.dumps(val.usgaapDeprecations, ensure_ascii=False, indent=0)) # might not be unicode in 2.7
saveFile(cntlr, deprecationsJsonFile, jsonStr) # 2.7 gets unicode this way
deprecationsInstance.close()
del deprecationsInstance # dereference closed modelXbrl
val.modelXbrl.profileStat(_("build us-gaap deprecated concepts cache"), time.time() - startedAt)
ugtCalcsJsonFile = usgaapDoc.filepathdir + os.sep + "ugt-calculations.json"
ugtDefaultDimensionsJsonFile = usgaapDoc.filepathdir + os.sep + "ugt-default-dimensions.json"
file = None
try:
file = openFileStream(cntlr, ugtCalcsJsonFile, 'rt', encoding='utf-8')
val.usgaapCalculations = json.load(file)
file.close()
file = openFileStream(cntlr, ugtDefaultDimensionsJsonFile, 'rt', encoding='utf-8')
val.usgaapDefaultDimensions = json.load(file)
file.close()
except Exception:
if file:
file.close()
val.modelXbrl.modelManager.addToLog(_("loading us-gaap {0} calculations and default dimensions into cache").format(ugt["year"]))
startedAt = time.time()
ugtEntryXsd = ugt["entryXsd"]
val.usgaapCalculations = {}
val.usgaapDefaultDimensions = {}
# load without SEC/EFM validation (doc file would not be acceptable)
priorValidateDisclosureSystem = val.modelXbrl.modelManager.validateDisclosureSystem
val.modelXbrl.modelManager.validateDisclosureSystem = False
calculationsInstance = ModelXbrl.load(val.modelXbrl.modelManager,
# "http://xbrl.fasb.org/us-gaap/2012/entire/us-gaap-entryPoint-std-2012-01-31.xsd",
# load from zip (especially after caching) is incredibly faster
openFileSource(ugtEntryXsd, cntlr),
_("built us-gaap calculations cache"))
val.modelXbrl.modelManager.validateDisclosureSystem = priorValidateDisclosureSystem
if calculationsInstance is None:
val.modelXbrl.error("arelle:notLoaded",
_("US-GAAP calculations not loaded: %(file)s"),
modelXbrl=val, file=os.path.basename(ugtEntryXsd))
else:
# load calculations
for ELR in calculationsInstance.relationshipSet(XbrlConst.summationItem).linkRoleUris:
elrRelSet = calculationsInstance.relationshipSet(XbrlConst.summationItem, ELR)
definition = ""
for roleType in calculationsInstance.roleTypes.get(ELR,()):
definition = roleType.definition
break
isStatementSheet = bool(val.linkroleDefinitionStatementSheet.match(definition))
elrUgtCalcs = {"#roots": [c.name for c in elrRelSet.rootConcepts],
"#definition": definition,
"#isStatementSheet": isStatementSheet}
for relFrom, rels in elrRelSet.fromModelObjects().items():
elrUgtCalcs[relFrom.name] = [rel.toModelObject.name for rel in rels]
val.usgaapCalculations[ELR] = elrUgtCalcs
jsonStr = _STR_UNICODE(json.dumps(val.usgaapCalculations, ensure_ascii=False, indent=0)) # might not be unicode in 2.7
saveFile(cntlr, ugtCalcsJsonFile, jsonStr) # 2.7 gets unicode this way
# load default dimensions
for defaultDimRel in calculationsInstance.relationshipSet(XbrlConst.dimensionDefault).modelRelationships:
if defaultDimRel.fromModelObject is not None and defaultDimRel.toModelObject is not None:
val.usgaapDefaultDimensions[defaultDimRel.fromModelObject.name] = defaultDimRel.toModelObject.name
jsonStr = _STR_UNICODE(json.dumps(val.usgaapDefaultDimensions, ensure_ascii=False, indent=0)) # might not be unicode in 2.7
saveFile(cntlr, ugtDefaultDimensionsJsonFile, jsonStr) # 2.7 gets unicode this way
calculationsInstance.close()
del calculationsInstance # dereference closed modelXbrl
val.modelXbrl.profileStat(_("build us-gaap calculations and default dimensions cache"), time.time() - startedAt)
break
val.deprecatedFactConcepts = defaultdict(list)
val.deprecatedDimensions = defaultdict(list)
val.deprecatedMembers = defaultdict(list)
def factCheck(val, fact):
concept = fact.concept
context = fact.context
if concept is None or context is None:
return # not checkable
try:
if fact.isNumeric:
# 2.3.3 additional unit tests beyond UTR spec
unit = fact.unit
if unit is not None and concept.type is not None and val.validateUTR:
typeName = concept.type.name
if typeName == "perUnitItemType" and any(m.namespaceURI == XbrlConst.iso4217 or
m in (XbrlConst.qnXbrliPure, XbrlConst.qnXbrliShares)
for m in unit.measures[1]):
val.modelXbrl.log('WARNING-SEMANTIC', "US-BPG.2.3.3.perUnitItemType",
_("PureItemType fact %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s has disallowed unit denominator %(denominator)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, denominator=", ".join((str(m) for m in unit.measures[1])))
if not fact.isNil and fact.xValue is not None:
# 2.4.1 decimal disagreement
if fact.decimals and fact.decimals != "INF":
vf = float(fact.value)
if _ISFINITE(vf):
dec = _INT(fact.decimals)
vround = round(vf, dec)
if vf != vround:
val.modelXbrl.log('WARNING-SEMANTIC', "US-BPG.2.4.1",
_("Decimal disagreement %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s has insignificant value %(insignificantValue)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, insignificantValue=Locale.format(val.modelXbrl.locale, "%.*f",
(dec + 2 if dec > 0 else 0, vf - vround),
True))
# 2.5.1 fractions disallowed on a disclosure
if fact.isFraction:
if any(val.linroleDefinitionIsDisclosure.match(roleType.definition)
for rel in val.modelXbrl.relationshipSet(XbrlConst.parentChild).toModelObject(concept)
for roleType in val.modelXbrl.roleTypes.get(rel.linkrole,())):
val.modelXbrl.log('WARNING-SEMANTIC', "US-BPG.2.5.1",
_("Disclosure %(fact)s in context %(contextID)s value %(value)s is a fraction"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, value=fact.value)
# deprecated concept
if concept.qname.namespaceURI == val.ugtNamespace:
if concept.name in val.usgaapDeprecations:
val.deprecatedFactConcepts[concept].append(fact)
elif concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"):
val.deprecatedFactConcepts[concept].append(fact)
if fact.isItem and fact.context is not None:
for dimConcept, modelDim in fact.context.segDimValues.items():
if dimConcept.qname.namespaceURI == val.ugtNamespace:
if dimConcept.name in val.usgaapDeprecations:
val.deprecatedDimensions[dimConcept].append(fact)
elif dimConcept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"):
val.deprecatedDimensions[dimConcept].append(fact)
if modelDim.isExplicit:
member = modelDim.member
if member is not None:
if member.qname.namespaceURI == val.ugtNamespace:
if member.name in val.usgaapDeprecations:
val.deprecatedMembers[member].append(fact)
elif member.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"):
val.deprecatedMembers[member].append(fact)
except Exception as err:
val.modelXbrl.log('WARNING-SEMANTIC', "US-BPG.testingException",
_("%(fact)s in context %(contextID)s unit %(unitID)s value %(value)s cannot be tested due to: %(err)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, err=err)
def final(val, conceptsUsed):
ugtNamespace = val.ugtNamespace
startedAt = time.time()
for depType, depItems in (("Concept", val.deprecatedFactConcepts),
("Dimension", val.deprecatedDimensions),
("Member", val.deprecatedMembers)):
for concept, facts in depItems.items():
if concept.qname.namespaceURI == ugtNamespace:
if concept.name in val.usgaapDeprecations:
deprecation = val.usgaapDeprecations[concept.name]
val.modelXbrl.log('WARNING-SEMANTIC', "FASB:deprecated{0}".format(depType),
_("%(deprecation)s of fact(s) %(fact)s (e.g., in context %(contextID)s value %(value)s) was deprecated on %(date)s: %(documentation)s"),
modelObject=facts, fact=facts[0].qname, contextID=facts[0].contextID, value=facts[0].value,
deprecation=depType,
date=deprecation[0], documentation=deprecation[1])
elif concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"):
val.modelXbrl.log('WARNING-SEMANTIC', "FASB:deprecated{0}".format(depType),
_("%(deprecation)s of facts %(fact)s in context %(contextID)s value %(value)s was deprecated on %(date)s"),
modelObject=facts, fact=facts[0].qname, contextID=facts[0].contextID, value=facts[0].value,
deprecation=depType,
date=concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"))
# check for unused extension concepts
dimensionDefaults = set(defaultMemConcept for defaultMemConcept in val.modelXbrl.dimensionDefaultConcepts.values())
extensionConceptsUnused = [concept
for qn, concept in val.modelXbrl.qnameConcepts.items()
if concept.isItem and
qn.namespaceURI not in val.disclosureSystem.standardTaxonomiesDict
if concept not in conceptsUsed and
# don't report dimension that has a default member
concept not in val.modelXbrl.dimensionDefaultConcepts and
# don't report default members
concept not in dimensionDefaults and
(concept.isDimensionItem or
(concept.type is not None and concept.type.isDomainItemType) or
# this or branch only pertains to fact concepts
not concept.isAbstract)
]
if extensionConceptsUnused:
for concept in sorted(extensionConceptsUnused, key=lambda c: str(c.qname)):
val.modelXbrl.log('INFO-SEMANTIC', "US-BPG.1.7.1.unusedExtensionConcept",
_("Company extension concept is unused: %(concept)s"),
modelObject=concept, concept=concept.qname)
# check for unused concept relationships of standard taxonomy elements
standardRelationships = val.modelXbrl.relationshipSet((XbrlConst.parentChild, XbrlConst.summationItem, XbrlConst.dimensionDomain, XbrlConst.domainMember, XbrlConst.dimensionDefault))
standardConceptsUnused = defaultdict(set) # dict by concept of relationship where unused
standardConceptsDeprecated = defaultdict(set)
for rel in standardRelationships.modelRelationships:
for concept in (rel.fromModelObject, rel.toModelObject):
if (concept is not None and
concept.qname.namespaceURI in val.disclosureSystem.standardTaxonomiesDict and
concept not in conceptsUsed):
if (not concept.isAbstract or
concept.isDimensionItem or
(concept.type is not None and concept.type.isDomainItemType)):
standardConceptsUnused[concept].add(rel.locatorOf(concept))
elif ((concept.qname.namespaceURI == ugtNamespace and
concept.name in val.usgaapDeprecations) or
concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate")):
# catches abstract deprecated concepts in linkbases
standardConceptsDeprecated[concept].add(rel.locatorOf(concept))
for concept, locs in standardConceptsUnused.items():
if concept.qname.namespaceURI == ugtNamespace and concept.name in val.usgaapDeprecations:
deprecation = val.usgaapDeprecations[concept.name]
val.modelXbrl.log('INFO-SEMANTIC', "FASB:deprecatedConcept",
_("Unused concept %(concept)s has extension relationships and was deprecated on %(date)s: %(documentation)s"),
modelObject=locs, concept=concept.qname,
date=deprecation[0], documentation=deprecation[1])
elif concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"):
val.modelXbrl.log('INFO-SEMANTIC', "FASB:deprecatedConcept",
_("Unused concept %(concept)s has extension relationships was deprecated on %(date)s"),
modelObject=locs, concept=concept.qname,
date=concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"))
elif (concept not in val.modelXbrl.dimensionDefaultConcepts and # don't report dimension that has a default member
concept not in dimensionDefaults and # don't report default members
(concept.isDimensionItem or
(concept.type is not None and concept.type.isDomainItemType) or
# this or branch only pertains to fact concepts
not concept.isAbstract)):
val.modelXbrl.log('INFO-SEMANTIC', "US-BPG.1.7.1.unusedStandardConceptInExtensionRelationship",
_("Company extension relationships of unused standard concept: %(concept)s"),
modelObject=locs, concept=concept.qname)
for concept, locs in standardConceptsDeprecated.items():
if concept.qname.namespaceURI == ugtNamespace and concept.name in val.usgaapDeprecations:
deprecation = val.usgaapDeprecations[concept.name]
val.modelXbrl.log('INFO-SEMANTIC', "FASB:deprecatedConcept",
_("Concept %(concept)s has extension relationships and was deprecated on %(date)s: %(documentation)s"),
modelObject=locs, concept=concept.qname,
date=deprecation[0], documentation=deprecation[1])
elif concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"):
val.modelXbrl.log('INFO-SEMANTIC', "FASB:deprecatedConcept",
_("Concept %(concept)s has extension relationships was deprecated on %(date)s"),
modelObject=locs, concept=concept.qname,
date=concept.get("{http://fasb.org/us-gaap/attributes}deprecatedDate"))
val.modelXbrl.profileStat(_("validate US-BGP unused concepts"), time.time() - startedAt)
del standardRelationships, extensionConceptsUnused, standardConceptsUnused, standardConceptsDeprecated, dimensionDefaults
del val.deprecatedFactConcepts
del val.deprecatedDimensions
del val.deprecatedMembers
if hasattr(val, 'usgaapCalculations'):
"""
The UGT calcuations are loaded and cached from the US-GAAP.
UGT calculation link roles are presumed to (and do) reflect the statement sheets they
correspond to, and therefore each set of UGT summation-item arc-sets are cached and
identified as to whether a statement sheet or other.
A concept that has facts in the instance and is a total concept with summation-item
arc-sets in UGT is examined if it appears on any submission face statement
parent-child link role. (No examination is made if the concept is only on
non-face statements of the submission, even if on some UGT face statement.)
Each UGT link role that has facts reported with a total concept has its
summation-item arc-sets examained to see if any compatible pair of UGT total
and item facts in the instance document do not have any submission calculation
sibling or descendant relationship. (Compatible here only means context and unit
equivalence.) Addition of descendancy in the submission was needed to avoid
excessive false positives. Each such issue is reported by filing parent-child
link role, UGT calculation link role, contributing item, and total item. The
report of these items is sorted by contributing item.
"""
startedAt = time.time()
# check for usgaap calculations missing from extension
ugtTotalConceptNames = set(totalConceptName
for ugtRels in val.usgaapCalculations.values()
for totalConceptName in ugtRels.keys())
issues = []
for totalConcept in conceptsUsed:
# is it ugt concept on a filing face sheet statement
if (totalConcept.qname.namespaceURI == ugtNamespace and
totalConcept.qname.localName in ugtTotalConceptNames and
any(val.linkroleDefinitionStatementSheet.match(roleType.definition)
for rel in val.modelXbrl.relationshipSet(XbrlConst.parentChild).toModelObject(totalConcept)
for roleType in val.modelXbrl.roleTypes.get(rel.linkrole,()))):
# is it a total in usgaap-calculations on a statement
for ugtELR, ugtRels in val.usgaapCalculations.items():
if ugtRels["#isStatementSheet"] and totalConcept.name in ugtRels:
# find compatible filed concepts on ugt summation items
for itemName in ugtRels[totalConcept.name]:
itemQname = qname(ugtNamespace,itemName)
itemConcept = val.modelXbrl.qnameConcepts.get(itemQname)
if itemConcept is not None and itemConcept in conceptsUsed:
# and item concept appears on a same face statement with total concept
filingELR = None
for rel in val.modelXbrl.relationshipSet(XbrlConst.parentChild).toModelObject(itemConcept):
for roleType in val.modelXbrl.roleTypes.get(rel.linkrole,()):
if (val.linkroleDefinitionStatementSheet.match(roleType.definition) and
val.modelXbrl.relationshipSet(XbrlConst.parentChild,rel.linkrole)
.isRelated(totalConcept,'sibling-or-descendant',itemConcept)):
filingELR = rel.linkrole
break
if filingELR:
break
if filingELR:
# are there any compatible facts for this sum?
for totalFact in val.modelXbrl.factsByQname[totalConcept.qname]:
for itemFact in val.modelXbrl.factsByQname[itemQname]:
if (totalFact.context is not None and totalFact.context.isEqualTo(itemFact.context) and
totalFact.unit is not None and totalFact.unit.isEqualTo(itemFact.unit)):
foundFiledItemCalc = False
# is there a summation in the filing
for rel in val.modelXbrl.relationshipSet(XbrlConst.summationItem).fromModelObject(totalConcept):
if rel.toModelObject is itemConcept:
foundFiledItemCalc = True
if not foundFiledItemCalc:
issues.append((filingELR,
ugtELR,
itemName,
totalFact,
itemFact))
if issues:
filingELRs = set()
ugtELRs = set()
itemIssuesELRs = defaultdict(set)
contextIDs = set()
for issue in issues:
filingELR, ugtELR, itemName, totalFact, itemFact = issue
filingELRs.add(filingELR)
ugtELRs.add(ugtELR)
contextIDs.add(totalFact.contextID)
contextIDs.add(itemFact.contextID)
itemIssuesELRs[itemName].add((filingELR, ugtELR))
msg = [_("Financial statement calculation missing relationships from total concept to item concepts that are in us-gaap taxonomy. "),
_("\n\nTotal concept: \n%(conceptSum)s. ")]
args = {"conceptSum": totalConcept.qname}
if len(filingELRs) == 1:
msg.append(_("\n\nfiling schedule link role: \n%(filingLinkrole)s. "))
args["filingLinkrole"] = filingELR
if len(ugtELRs) == 1:
msg.append(_("\n\nus-gaap calc link role: \n%(usgaapLinkrole)s. "))
args["usgaapLinkrole"] = ugtELR
if len(filingELRs) == 1 and len(ugtELRs) == 1:
msg.append(_("\n\nSummation items missing: \n"))
for i, itemName in enumerate(sorted(itemIssuesELRs.keys())):
for j, itemIssueELRs in enumerate(sorted(itemIssuesELRs[itemName])):
filingELR, ugtELR = itemIssueELRs
if j == 0:
argName = "missingConcept_{0}".format(i)
if len(filingELRs) == 1 and len(ugtELRs) == 1:
msg.append(_("\n%({0})s. ").format(argName))
else:
msg.append(_("\n\nSummation item: %({0})s. ").format(argName))
args[argName] = itemFact.qname
if len(filingELRs) > 1:
argName = "filingLinkrole_{0}_{1}".format(i,j)
msg.append(_("\n filing schedule: %({0})s. ").format(argName))
args[argName] = filingELR
if len(ugtELRs) > 1:
argName = "usgaapLinkrole_{0}_{1}".format(i,j)
msg.append(_("\n us-gaap linkrole: %({0})s. ").format(argName))
args[argName] = ugtELR
msg.append(_("\n\nCorresponding facts in contexts: \n%(contextIDs)s\n"))
args["contextIDs"] = ", ".join(sorted(contextIDs))
val.modelXbrl.log('WARNING-SEMANTIC', "US-BPG:missingCalculation",
''.join(msg),
**args)
issues = []
val.modelXbrl.profileStat(_("validate US-BGP missing calcs"), time.time() - startedAt)
if hasattr(val, 'usgaapDefaultDimensions'):
"""
The UGT default dimensions are loaded and cached from US-GAAP.
Question E.16 (Updated 02/05/2013):
Filers SHOULD also avoid creating new domains or changing default member elements for pre-defined dimensions.
"""
for defaultDimRel in val.modelXbrl.relationshipSet(XbrlConst.dimensionDefault).modelRelationships:
if (defaultDimRel.fromModelObject is not None and defaultDimRel.toModelObject is not None and
defaultDimRel.fromModelObject.qname.namespaceURI == ugtNamespace and
defaultDimRel.fromModelObject.name in val.usgaapDefaultDimensions and
(defaultDimRel.toModelObject.qname.namespaceURI != ugtNamespace or
defaultDimRel.toModelObject.name != val.usgaapDefaultDimensions[defaultDimRel.fromModelObject.name])):
if defaultDimRel.toModelObject.qname.namespaceURI != ugtNamespace:
msgObjects = (defaultDimRel, defaultDimRel.toModelObject)
else:
msgObjects = defaultDimRel
val.modelXbrl.log('WARNING-SEMANTIC', "secStaffObservation.E.16.defaultDimension",
_("UGT-defined dimension %(dimension)s has extension defined default %(extensionDefault)s, predefined default is %(predefinedDefault)s"),
modelObject=msgObjects,
dimension=defaultDimRel.fromModelObject.qname,
extensionDefault=defaultDimRel.toModelObject.qname,
predefinedDefault=defaultDimRel.fromModelObject.qname.prefix + ":" + val.usgaapDefaultDimensions[defaultDimRel.fromModelObject.name])
val.modelXbrl.profileStat(_("validate SEC staff observation E.16 dimensions"), time.time() - startedAt)
del val.linroleDefinitionIsDisclosure
del val.linkroleDefinitionStatementSheet
del val.ugtNamespace
if hasattr(val, 'usgaapDeprecations'):
del val.usgaapDeprecations
if hasattr(val, 'usgaapDefaultDimensions'):
del val.usgaapDefaultDimensions
if hasattr(val, 'usgaapCalculations'):
del val.usgaapCalculations
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate XBRL-US Best Practice Guidance',
'version': '0.9',
'description': '''XBRL-US Best Practice Guidance Validation.''',
'license': 'Apache-2',
'author': 'Ewe S. Gap',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Validate.EFM.Start': setup,
'Validate.EFM.Fact': factCheck,
'Validate.EFM.Finally': final
}
| 67.715177
| 186
| 0.579043
|
73dfc8490d8d226f4c34c153850134435cabb10f
| 9,391
|
py
|
Python
|
atari/visualize_reward.py
|
yilin-wang/tril
|
fb3f1090d2056c063602c65d8b7d952ea5037872
|
[
"MIT"
] | 1
|
2021-10-17T07:00:05.000Z
|
2021-10-17T07:00:05.000Z
|
atari/visualize_reward.py
|
yilin-wang/tril
|
fb3f1090d2056c063602c65d8b7d952ea5037872
|
[
"MIT"
] | null | null | null |
atari/visualize_reward.py
|
yilin-wang/tril
|
fb3f1090d2056c063602c65d8b7d952ea5037872
|
[
"MIT"
] | null | null | null |
import pickle
import gym
import time
import numpy as np
import random
import torch
from run_test import *
import matplotlib.pylab as plt
import argparse
import pickle
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--env_name', default='', help='Select the environment name to run, i.e. pong')
parser.add_argument('--reward_net_path', default='', help="name and location for learned model params")
parser.add_argument('--seed', default=0, help="random seed for experiments")
parser.add_argument('--models_dir', default = ".", help="top directory where checkpoint models for demos are stored")
parser.add_argument('--save_fig_dir', help ="where to save visualizations")
parser.add_argument('--demo_dir', help ="where to find demos")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print(device)
args = parser.parse_args()
env_name = args.env_name
save_fig_dir = args.save_fig_dir
# if env_name == "spaceinvaders":
# env_id = "SpaceInvadersNoFrameskip-v4"
# elif env_name == "mspacman":
# env_id = "MsPacmanNoFrameskip-v4"
# elif env_name == "videopinball":
# env_id = "VideoPinballNoFrameskip-v4"
# elif env_name == "beamrider":
# env_id = "BeamRiderNoFrameskip-v4"
# else:
# env_id = env_name[0].upper() + env_name[1:] + "NoFrameskip-v4"
env_type = "atari"
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
# print(env_id)
stochastic = True
reward_net_path = args.reward_net_path
env = make_vec_env("SpaceInvadersNoFrameskip-v4", 'atari', 1, 0,
wrapper_kwargs={
'clip_rewards':False,
'episode_life':False,
})
env = VecFrameStack(env, 4)
agent = PPO2Agent(env, env_type, stochastic)
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(4, 16, 7, stride=3)
self.conv2 = nn.Conv2d(16, 16, 5, stride=2)
self.conv3 = nn.Conv2d(16, 16, 3, stride=1)
self.conv4 = nn.Conv2d(16, 16, 3, stride=1)
self.fc1 = nn.Linear(784, 64)
#self.fc1 = nn.Linear(1936,64)
self.fc2 = nn.Linear(64, 1)
def cum_return(self, traj):
'''calculate cumulative return of trajectory'''
sum_rewards = 0
sum_abs_rewards = 0
x = traj.permute(0,3,1,2) #get into NCHW format
x = F.leaky_relu(self.conv1(x))
x = F.leaky_relu(self.conv2(x))
x = F.leaky_relu(self.conv3(x))
x = F.leaky_relu(self.conv4(x))
# x = x.view(-1, 784)
x = x.reshape(-1,784)
x = F.leaky_relu(self.fc1(x))
r = self.fc2(x)
sum_rewards += torch.sum(r)
sum_abs_rewards += torch.sum(torch.abs(r))
return sum_rewards, sum_abs_rewards
def forward(self, traj_i, traj_j):
'''compute cumulative return for each trajectory and return logits'''
cum_r_i, abs_r_i = self.cum_return(traj_i)
cum_r_j, abs_r_j = self.cum_return(traj_j)
return torch.cat([cum_r_i, cum_r_j]), abs_r_i + abs_r_j
def predict_reward_sequence(net, traj):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
rewards_from_obs = []
with torch.no_grad():
for s in traj:
r = net.cum_return(torch.from_numpy(np.array([s])).float().to(device))[0].item()
rewards_from_obs.append(r)
return rewards_from_obs
def predict_traj_return(net, traj):
return sum(predict_reward_sequence(net, traj))
reward = Net()
reward.load_state_dict(torch.load(reward_net_path)) # perhaps change for gpu
reward.to(device)
from baselines.common.trex_utils import preprocess
# model_dir = args.models_dir
demo_dir = args.demo_dir
demonstrations = []
for i in range(12):
# with open('row1_demos/%d' % (i+1),'rb') as fp:
with open(demo_dir + '/%d' % (i+1),'rb') as fp:
dem = pickle.load(fp)
demonstrations.append(dem)
indices = []
min_reward = 100000
max_reward = -100000
cnt = 0
with torch.no_grad():
for j,d in enumerate(demonstrations):
print(cnt)
cnt += 1
for i,s in enumerate(d[2:-1]):
r = reward.cum_return(torch.from_numpy(np.array([s])).float().to(device))[0].item()
indices.append([j,i,r])
if r < min_reward:
min_reward = r
min_frame = s
min_frame_i = i+2
elif r > max_reward:
max_reward = r
max_frame = s
max_frame_i = i+2
rewards = np.array([ind[2] for ind in indices])
ninety = np.percentile(rewards, 95, interpolation='nearest')
nind, = np.where(np.isclose(rewards,ninety))
nind = int(nind[0])
eighty = np.percentile(rewards, 90, interpolation='nearest')
eind, = np.where(np.isclose(rewards,eighty))
eind = int(eind[0])
j = indices[nind][0]
i = indices[nind][1]
frame_95 = demonstrations[j][i+2]
j = indices[eind][0]
i = indices[eind][1]
frame_90 = demonstrations[j][i+2]
def mask_coord(i,j,frames, mask_size, channel):
#takes in i,j pixel and stacked frames to mask
masked = frames.copy()
# masked[:,i:i+mask_size,j:j+mask_size,channel] = 0
masked[i:i+mask_size,j:j+mask_size,channel] = 0
return masked
def gen_attention_maps(frames, mask_size):
orig_frame = frames
#okay so I want to vizualize what makes these better or worse.
# _,height,width,channels = orig_frame.shape
height,width,channels = orig_frame.shape
#find reward without any masking once
r_before = reward.cum_return(torch.from_numpy(np.array([orig_frame])).float().to(device))[0].item()
heat_maps = []
for c in range(4): #four stacked frame channels
delta_heat = np.zeros((height, width))
for i in range(height-mask_size):
for j in range(width - mask_size):
#get masked frames
masked_ij = mask_coord(i,j,orig_frame, mask_size, c)
r_after = r = reward.cum_return(torch.from_numpy(np.array([masked_ij])).float().to(device))[0].item()
r_delta = abs(r_after - r_before)
#save to heatmap
delta_heat[i:i+mask_size, j:j+mask_size] += r_delta
heat_maps.append(delta_heat)
return heat_maps
#plot heatmap
mask_size = 3
delta_heat_max = gen_attention_maps(max_frame, mask_size)
delta_heat_min = gen_attention_maps(min_frame, mask_size)
delta_heat_95 = gen_attention_maps(frame_95, mask_size)
delta_heat_90 = gen_attention_maps(frame_90, mask_size)
plt.figure(1)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(delta_heat_95[cnt],cmap='seismic', interpolation='nearest')
# plt.axis('off')
plt.imshow(delta_heat_95[1],cmap='seismic', interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "95_attention.png", bbox_inches='tight')
plt.figure(2)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(delta_heat_95[cnt],cmap='seismic', interpolation='nearest')
# plt.axis('off')
plt.imshow(frame_95[:,:,1])
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "95_frame.png", bbox_inches='tight')
plt.figure(3)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(delta_heat_95[cnt],cmap='seismic', interpolation='nearest')
# plt.axis('off')
plt.imshow(delta_heat_90[1],cmap='seismic', interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "90_attention.png", bbox_inches='tight')
plt.figure(4)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(delta_heat_95[cnt],cmap='seismic', interpolation='nearest')
# plt.axis('off')
plt.imshow(frame_90[:,:,1])
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "90_frame.png", bbox_inches='tight')
# In[45]:
plt.figure(5)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(delta_heat_max[cnt],cmap='seismic', interpolation='nearest')
# plt.axis('off')
plt.imshow(delta_heat_max[1],cmap='seismic', interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "max_attention.png", bbox_inches='tight')
plt.figure(6)
# print(max_frame_i)
# print(max_reward)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(max_frame[:,:,cnt])
# plt.axis('off')
# plt.imshow(max_frame)
# plt.axis('off')
plt.imshow(max_frame[:,:,1])
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "max_frame.png", bbox_inches='tight')
plt.figure(7)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(delta_heat_min[cnt],cmap='seismic', interpolation='nearest')
# plt.axis('off')
plt.imshow(delta_heat_min[1],cmap='seismic', interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "min_attention.png", bbox_inches='tight')
plt.figure(8)
# for cnt in range(4):
# plt.subplot(1,4,cnt+1)
# plt.imshow(min_frame[:,:,cnt])
# plt.axis('off')
# plt.imshow(min_frame)
# plt.axis('off')
plt.imshow(min_frame[:,:,1])
plt.axis('off')
plt.tight_layout()
plt.savefig(save_fig_dir + "/" + env_name + "min_frame.png", bbox_inches='tight')
| 31.096026
| 117
| 0.659781
|
fd5d9e5a72bb7dbe7f373de6bde73eae88bd6049
| 13,016
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_alerts
short_description: Configuration of alerts in ManageIQ
extends_documentation_fragment:
- community.general.manageiq
author: Elad Alfassa (@elad661) <ealfassa@redhat.com
description:
- The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
options:
state:
description:
- absent - alert should not exist,
- present - alert should exist,
required: False
choices: ['absent', 'present']
default: 'present'
description:
description:
- The unique alert description in ManageIQ.
- Required when state is "absent" or "present".
resource_type:
description:
- The entity type for the alert in ManageIQ. Required when state is "present".
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
'ExtManagementSystem', 'MiddlewareServer']
expression_type:
description:
- Expression type.
default: hash
choices: ["hash", "miq"]
expression:
description:
- The alert expression for ManageIQ.
- Can either be in the "Miq Expression" format or the "Hash Expression format".
- Required if state is "present".
enabled:
description:
- Enable or disable the alert. Required if state is "present".
type: bool
options:
description:
- Additional alert options, such as notification type and frequency
'''
EXAMPLES = '''
- name: Add an alert with a "hash expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 01
options:
notifications:
email:
to: ["example@example.com"]
from: "example@example.com"
resource_type: ContainerNode
expression:
eval_method: hostd_log_threshold
mode: internal
options: {}
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Add an alert with a "miq expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 02
options:
notifications:
email:
to: ["example@example.com"]
from: "example@example.com"
resource_type: Vm
expression_type: miq
expression:
and:
- CONTAINS:
tag: Vm.managed-environment
value: prod
- not:
CONTAINS:
tag: Vm.host.managed-environment
value: prod
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete an alert from ManageIQ
manageiq_alerts:
state: absent
description: Test Alert 01
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQAlert(object):
""" Represent a ManageIQ alert. Can be initialized with both the format
we receive from the server and the format we get from the user.
"""
def __init__(self, alert):
self.description = alert['description']
self.db = alert['db']
self.enabled = alert['enabled']
self.options = alert['options']
self.hash_expression = None
self.miq_expressipn = None
if 'hash_expression' in alert:
self.hash_expression = alert['hash_expression']
if 'miq_expression' in alert:
self.miq_expression = alert['miq_expression']
if 'exp' in self.miq_expression:
# miq_expression is a field that needs a special case, because
# it's returned surrounded by a dict named exp even though we don't
# send it with that dict.
self.miq_expression = self.miq_expression['exp']
def __eq__(self, other):
""" Compare two ManageIQAlert objects
"""
return self.__dict__ == other.__dict__
class ManageIQAlerts(object):
""" Object to execute alert management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
def get_alerts(self):
""" Get all alerts from ManageIQ
"""
try:
response = self.client.get(self.alerts_url + '?expand=resources')
except Exception as e:
self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
return response.get('resources', [])
def validate_hash_expression(self, expression):
""" Validate a 'hash expression' alert definition
"""
# hash expressions must have the following fields
for key in ['options', 'eval_method', 'mode']:
if key not in expression:
msg = "Hash expression is missing required field {key}".format(key=key)
self.module.fail_json(msg)
def create_alert_dict(self, params):
""" Create a dict representing an alert
"""
if params['expression_type'] == 'hash':
# hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
self.validate_hash_expression(params['expression'])
expression_type = 'hash_expression'
else:
# actually miq_expression, but we call it "expression" for backwards-compatibility
expression_type = 'expression'
# build the alret
alert = dict(description=params['description'],
db=params['resource_type'],
options=params['options'],
enabled=params['enabled'])
# add the actual expression.
alert.update({expression_type: params['expression']})
return alert
def add_alert(self, alert):
""" Add a new alert to ManageIQ
"""
try:
result = self.client.post(self.alerts_url, action='create', resource=alert)
msg = "Alert {description} created successfully: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Creating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to create a hash expression
msg = msg.format(description=alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def delete_alert(self, alert):
""" Delete an alert
"""
try:
result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
id=alert['id']),
action="delete")
msg = "Alert {description} deleted: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Deleting alert {description} failed: {error}"
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def update_alert(self, existing_alert, new_alert):
""" Update an existing alert with the values from `new_alert`
"""
new_alert_obj = ManageIQAlert(new_alert)
if new_alert_obj == ManageIQAlert(existing_alert):
# no change needed - alerts are identical
return dict(changed=False, msg="No update needed")
else:
try:
url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
result = self.client.post(url, action="edit", resource=new_alert)
# make sure that the update was indeed successful by comparing
# the result to the expected result.
if new_alert_obj == ManageIQAlert(result):
# success!
msg = "Alert {description} updated successfully: {details}"
msg = msg.format(description=existing_alert['description'], details=result)
return dict(changed=True, msg=msg)
else:
# unexpected result
msg = "Updating alert {description} failed, unexpected result {details}"
msg = msg.format(description=existing_alert['description'], details=result)
self.module.fail_json(msg=msg)
except Exception as e:
msg = "Updating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to update a hash expression
msg = msg.format(description=existing_alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=existing_alert['description'], error=e)
self.module.fail_json(msg=msg)
def main():
argument_spec = dict(
description=dict(type='str'),
resource_type=dict(type='str', choices=['Vm',
'ContainerNode',
'MiqServer',
'Host',
'Storage',
'EmsCluster',
'ExtManagementSystem',
'MiddlewareServer']),
expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
expression=dict(type='dict'),
options=dict(type='dict'),
enabled=dict(type='bool'),
state=dict(required=False, default='present',
choices=['present', 'absent']),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(argument_spec=argument_spec,
required_if=[('state', 'present', ['description',
'resource_type',
'expression',
'enabled',
'options']),
('state', 'absent', ['description'])])
state = module.params['state']
description = module.params['description']
manageiq = ManageIQ(module)
manageiq_alerts = ManageIQAlerts(manageiq)
existing_alert = manageiq.find_collection_resource_by("alert_definitions",
description=description)
# we need to add or update the alert
if state == "present":
alert = manageiq_alerts.create_alert_dict(module.params)
if not existing_alert:
# an alert with this description doesn't exist yet, let's create it
res_args = manageiq_alerts.add_alert(alert)
else:
# an alert with this description exists, we might need to update it
res_args = manageiq_alerts.update_alert(existing_alert, alert)
# this alert should not exist
elif state == "absent":
# if we have an alert with this description, delete it
if existing_alert:
res_args = manageiq_alerts.delete_alert(existing_alert)
else:
# it doesn't exist, and that's okay
msg = "Alert '{description}' does not exist in ManageIQ"
msg = msg.format(description=description)
res_args = dict(changed=False, msg=msg)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| 37.402299
| 112
| 0.58213
|
b17675be01dd58a00c19b4527ce1dce7c316954a
| 3,191
|
py
|
Python
|
django_pages/comments/__init__.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 3
|
2015-11-24T02:30:48.000Z
|
2018-11-01T10:10:24.000Z
|
django_pages/comments/__init__.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 1
|
2015-04-18T16:37:36.000Z
|
2015-04-18T16:37:36.000Z
|
django_pages/comments/__init__.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 2
|
2015-11-24T02:01:00.000Z
|
2019-04-09T15:33:56.000Z
|
# -*- encoding: utf-8 -*-
import random
from django.utils.html import strip_tags
from .forms import CommentForm
from .models import Comment
def get_client_ip(request):
"""
Returns requester's IP address from HTTP request
@param request: Http Request
@return string
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def set_humanity_check(request):
"""
sets session['humanity'] to dictionary {0: True/False, 1: True/False, .. 3: True/False}
@param request: Http Request
@return None
"""
result = {}
# this ensures we don't have empty checkboxes
while result == {} or result == {0: False, 1: False, 2: False, 3: False}:
for i in xrange(4):
if random.randint(0, 1):
result[i] = True
else:
result[i] = False
request.session['humanity'] = result
# also fill the random number into session
# this number will be filled into form using JS
request.session['random_number'] = str(random.random())
print request.session['random_number']
def translate_humanity(request):
"""
translates request.session['humanity'] dictionary {0: True, 1: False, ..}
into 'One, Two, Three' according to numbers that are True
@param request: Http Request
@return string
"""
numbers = []
translation = {0: 'one', 1: 'two', 2: 'three', 3: 'four'}
for i in request.session['humanity']:
if request.session['humanity'][i]:
numbers.append(translation[i])
check_string = ', '.join(numbers)
return check_string
def is_human(request, data):
if request.session['humanity'] == data:
return True
return False
def handle_comment(request, post):
"""
handles comment and either saves it or displays error
@param request: Http Request
@param post: Post object
@return None or errorfrom django.utils.timezone import make_aware, get_current_timezone
"""
form = CommentForm(request.POST)
ip = get_client_ip(request)
if form.is_valid():
user = strip_tags(form.cleaned_data['user'])
comment = strip_tags(form.cleaned_data['comment'])
one = form.cleaned_data['one']
two = form.cleaned_data['two']
three = form.cleaned_data['three']
four = form.cleaned_data['four']
random_number = form.cleaned_data['fillmeup']
if random_number != request.session['random_number']:
form.spam = 'Please check that you have JavaScript enabled.'
return form
if is_human(request, {0: one, 1: two, 2: three, 3: four}):
# check for duplicate comments
if Comment.objects.filter(post=post, user=user, comment=comment):
return CommentForm()
comment = Comment(post=post, user=user, comment=comment, ip=ip)
comment.save()
return CommentForm()
else:
form.spam = 'Wrong antispam check'
return form
else:
return form
| 25.943089
| 91
| 0.619242
|
3d13d9ddea6586cb96e196b825316507eb8539e2
| 2,397
|
py
|
Python
|
django/codentino/apps/user/models.py
|
kutver/codentino
|
526ce722ad198764b4e7fc5032f1690d59197d03
|
[
"MIT"
] | null | null | null |
django/codentino/apps/user/models.py
|
kutver/codentino
|
526ce722ad198764b4e7fc5032f1690d59197d03
|
[
"MIT"
] | null | null | null |
django/codentino/apps/user/models.py
|
kutver/codentino
|
526ce722ad198764b4e7fc5032f1690d59197d03
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
"""Define a model manager for User model."""
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not username:
raise ValueError('The given username must be set')
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email, password=None, **extra_fields):
"""Create and save a regular User with the given email and password."""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
class User(AbstractUser):
"""User model."""
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
primary_key=True
)
email = models.EmailField(_('email address'), unique=True, blank=False)
REQUIRED_FIELDS = ['email']
objects = UserManager()
| 37.453125
| 94
| 0.67501
|
2e1d21ab9e4497ebdcff0e81e90c9c61ec175875
| 732
|
py
|
Python
|
src/pyglue/DocStrings/ProcessorMetadata.py
|
omenos/OpenColorIO
|
7316c3be20752278924dd3f213bff297ffb63a14
|
[
"BSD-3-Clause"
] | 7
|
2015-07-01T03:19:43.000Z
|
2021-03-27T11:02:16.000Z
|
src/pyglue/DocStrings/ProcessorMetadata.py
|
dictoon/OpenColorIO
|
64adcad300adfd166280d2e7b1fb5c3ce7dca482
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyglue/DocStrings/ProcessorMetadata.py
|
dictoon/OpenColorIO
|
64adcad300adfd166280d2e7b1fb5c3ce7dca482
|
[
"BSD-3-Clause"
] | 2
|
2019-03-05T20:43:59.000Z
|
2019-11-11T20:35:55.000Z
|
class ProcessorMetadata:
"""
ProcessorMetadata
This contains meta information about the process that generated
this processor. The results of these functions do not
impact the pixel processing.
"""
def __init__(self):
pass
def getFiles(self):
"""
getFiles()
Returns a list of file references used internally by this processor
:return: list of filenames
:rtype: list
"""
pass
def getLooks(self):
"""
getLooks()
Returns a list of looks used internally by this processor
:return: list of look names
:rtype: list
"""
pass
| 20.914286
| 75
| 0.546448
|
1bcfcaf0e4c8dd55074537796a466709bbd69f2f
| 1,032
|
py
|
Python
|
models/base_lstm.py
|
Remper/emojinet
|
c219392d6bce63fe2a0844d0c5bda3b0d48a16c6
|
[
"Apache-2.0"
] | null | null | null |
models/base_lstm.py
|
Remper/emojinet
|
c219392d6bce63fe2a0844d0c5bda3b0d48a16c6
|
[
"Apache-2.0"
] | null | null | null |
models/base_lstm.py
|
Remper/emojinet
|
c219392d6bce63fe2a0844d0c5bda3b0d48a16c6
|
[
"Apache-2.0"
] | 1
|
2020-06-21T08:02:28.000Z
|
2020-06-21T08:02:28.000Z
|
from keras import Model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Bidirectional, LSTM, regularizers
import numpy as np
from keras.optimizers import Adam
def base_lstm(vocabulary_size: int, embedding_size: int, max_seq_length: int, embedding_matrix: np.array, y_dictionary: dict) -> Model:
model = Sequential()
model.add(Embedding(input_dim=vocabulary_size,
output_dim=embedding_size,
weights=[embedding_matrix],
input_length=max_seq_length,
trainable=True,
embeddings_regularizer=regularizers.l2(0.000001)))
model.add(Dropout(0.4))
model.add(Bidirectional(LSTM(256)))
model.add(Dense(len(y_dictionary), activation='softmax'))
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
| 41.28
| 135
| 0.657946
|
1e3a4f1c4e7a0d03bb2b1987e4f576b8bf5e1152
| 6,013
|
py
|
Python
|
userbot/utils/tools.py
|
ashwinstr/RemixGeng
|
cb5d2f1ffc6417134fe4eb51c33ca2f46e501d2d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/utils/tools.py
|
ashwinstr/RemixGeng
|
cb5d2f1ffc6417134fe4eb51c33ca2f46e501d2d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/utils/tools.py
|
ashwinstr/RemixGeng
|
cb5d2f1ffc6417134fe4eb51c33ca2f46e501d2d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2020 Adek Maulana
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import asyncio
import hashlib
import os.path
import re
import shlex
from os.path import basename
from typing import Optional
from typing import Tuple
from telethon.tl.functions.channels import GetParticipantRequest
from telethon.tl.tlobject import TLObject
from telethon.tl.types import ChannelParticipantAdmin
from telethon.tl.types import ChannelParticipantCreator
from telethon.tl.types import MessageEntityPre
from userbot import bot
from userbot import LOGS
async def md5(fname: str) -> str:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def humanbytes(size: int) -> str:
if size is None or isinstance(size, str):
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(seconds: int) -> str:
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (((str(days) + " day(s), ") if days else "") +
((str(hours) + " hour(s), ") if hours else "") +
((str(minutes) + " minute(s), ") if minutes else "") +
((str(seconds) + " second(s), ") if seconds else ""))
return tmp[:-2]
def human_to_bytes(size: str) -> int:
units = {
"M": 2**20,
"MB": 2**20,
"G": 2**30,
"GB": 2**30,
"T": 2**40,
"TB": 2**40,
}
size = size.upper()
if not re.match(r" ", size):
size = re.sub(r"([KMGT])", r" \1", size)
number, unit = [string.strip() for string in size.split()]
return int(float(number) * units[unit])
async def is_admin(chat_id, user_id):
req_jo = await bot(GetParticipantRequest(channel=chat_id, user_id=user_id))
chat_participant = req_jo.participant
return isinstance(chat_participant,
ChannelParticipantCreator) or isinstance(
chat_participant, ChannelParticipantAdmin)
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
""" run command in terminal """
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(video_file: str, duration: int,
path: str = "") -> Optional[str]:
""" take a screenshot """
LOGS.info(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
ttl = duration // 2
thumb_image_path = path or os.path.join("./temp/",
f"{basename(video_file)}.jpg")
command = f"ffmpeg -ss {ttl} -i '{video_file}' -vframes 1 '{thumb_image_path}'"
err = (await runcmd(command))[1]
if err:
LOGS.error(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
def parse_pre(text):
text = text.strip()
return (
text,
[
MessageEntityPre(offset=0,
length=len(add_surrogate(text)),
language="")
],
)
def yaml_format(obj, indent=0, max_str_len=256, max_byte_len=64):
"""
Pretty formats the given object as a YAML string which is returned.
(based on TLObject.pretty_format)
"""
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
if not obj:
return "dict:"
items = obj.items()
has_items = len(items) > 1
has_multiple_items = len(items) > 2
result.append(obj.get("_", "dict") + (":" if has_items else ""))
if has_multiple_items:
result.append("\n")
indent += 2
for k, v in items:
if k == "_" or v is None:
continue
formatted = yaml_format(v, indent)
if not formatted.strip():
continue
result.append(" " * (indent if has_multiple_items else 1))
result.append(f"{k}:")
if not formatted[0].isspace():
result.append(" ")
result.append(f"{formatted}")
result.append("\n")
if has_items:
result.pop()
if has_multiple_items:
indent -= 2
elif isinstance(obj, str):
# truncate long strings and display elipsis
result = repr(obj[:max_str_len])
if len(obj) > max_str_len:
result += "…"
return result
elif isinstance(obj, bytes):
# repr() bytes if it's printable, hex like "FF EE BB" otherwise
if all(0x20 <= c < 0x7F for c in obj):
return repr(obj)
else:
return ("<…>" if len(obj) > max_byte_len else " ".join(
f"{b:02X}" for b in obj))
elif isinstance(obj, datetime.datetime):
# ISO-8601 without timezone offset (telethon dates are always UTC)
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif hasattr(obj, "__iter__"):
# display iterables one after another at the base indentation level
result.append("\n")
indent += 2
for x in obj:
result.append(f"{' ' * indent}- {yaml_format(x, indent + 2)}")
result.append("\n")
result.pop()
indent -= 2
else:
return repr(obj)
return "".join(result)
| 31.647368
| 83
| 0.573923
|
0a746f4df5a943b5a173f2ddd546c567e6da0415
| 715
|
py
|
Python
|
final_project/server.py
|
PhideasAmbrosianus/xzceb-flask_eng_fr
|
00020eabd7961efb0ed788d83b7f55613db66149
|
[
"Apache-2.0"
] | null | null | null |
final_project/server.py
|
PhideasAmbrosianus/xzceb-flask_eng_fr
|
00020eabd7961efb0ed788d83b7f55613db66149
|
[
"Apache-2.0"
] | null | null | null |
final_project/server.py
|
PhideasAmbrosianus/xzceb-flask_eng_fr
|
00020eabd7961efb0ed788d83b7f55613db66149
|
[
"Apache-2.0"
] | null | null | null |
from machinetranslation import translator
from flask import Flask, render_template, request
import json
import machinetranslation as mt
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
translated_text = mt.english_to_french(textToTranslate)
return translated_text
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
translated_text = mt.french_to_english(textToTranslate)
return translated_text
@app.route("/")
def renderIndexPage():
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| 27.5
| 59
| 0.762238
|
d0a32e749dcae3e12bbf56f49cb8896aaa10e69e
| 143
|
py
|
Python
|
src/sage/dynamics/cellular_automata/all.py
|
saraedum/sage-renamed
|
d2da67b14da2ad766a5906425d60d43a3b3e1270
|
[
"BSL-1.0"
] | 1
|
2016-11-04T16:31:48.000Z
|
2016-11-04T16:31:48.000Z
|
src/sage/dynamics/cellular_automata/all.py
|
rwst/sage
|
a9d274b9338e6ee24bf35ea8d25875507e51e455
|
[
"BSL-1.0"
] | null | null | null |
src/sage/dynamics/cellular_automata/all.py
|
rwst/sage
|
a9d274b9338e6ee24bf35ea8d25875507e51e455
|
[
"BSL-1.0"
] | null | null | null |
from sage.misc.lazy_import import lazy_import
lazy_import("sage.dynamics.cellular_automata.solitons",
["SolitonCellularAutomata"])
| 35.75
| 55
| 0.776224
|
640b79c7b5556aaf8fa91e392eef26e050fb5f25
| 2,286
|
py
|
Python
|
networking_odl/common/client.py
|
rgonc/networking-odl
|
a58af1f67c05b11feb983f44e1de5e0160336bc4
|
[
"Apache-2.0"
] | null | null | null |
networking_odl/common/client.py
|
rgonc/networking-odl
|
a58af1f67c05b11feb983f44e1de5e0160336bc4
|
[
"Apache-2.0"
] | null | null | null |
networking_odl/common/client.py
|
rgonc/networking-odl
|
a58af1f67c05b11feb983f44e1de5e0160336bc4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
from networking_odl.openstack.common._i18n import _LE
from networking_odl.openstack.common._i18n import _LI
LOG = logging.getLogger(__name__)
class OpenDaylightRestClient(object):
def __init__(self, url, username, password, timeout):
self.url = url
self.timeout = timeout
self.auth = (username, password)
def sendjson(self, method, urlpath, obj):
"""Send json to the OpenDaylight controller."""
headers = {'Content-Type': 'application/json'}
data = jsonutils.dumps(obj, indent=2) if obj else None
url = '/'.join([self.url, urlpath])
LOG.debug("Sending METHOD (%(method)s) URL (%(url)s) JSON (%(obj)s)",
{'method': method, 'url': url, 'obj': obj})
r = requests.request(method, url=url,
headers=headers, data=data,
auth=self.auth, timeout=self.timeout)
r.raise_for_status()
new_obj = None
try:
new_obj = r.json()
except Exception:
LOG.debug("requests result is not json")
LOG.debug("%(result)s", {'result': new_obj})
return new_obj
def try_delete(self, urlpath):
try:
self.sendjson('delete', urlpath, None)
except requests.HTTPError as e:
# The resource is already removed. ignore 404 gracefully
if e.response.status_code != 404:
raise
LOG.debug("%(urlpath)s doesn't exist", {'urlpath': urlpath})
return False
return True
| 35.71875
| 78
| 0.630796
|
b2276035c62cbf957db01a258cf0721dd4321329
| 3,013
|
py
|
Python
|
sdk/relay/azure-mgmt-relay/azure/mgmt/relay/models/relay_update_parameters.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/relay/azure-mgmt-relay/azure/mgmt/relay/models/relay_update_parameters.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/relay/azure-mgmt-relay/azure/mgmt/relay/models/relay_update_parameters.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_namespace_patch import ResourceNamespacePatch
class RelayUpdateParameters(ResourceNamespacePatch):
"""Description of a namespace resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: SKU of the namespace.
:type sku: ~azure.mgmt.relay.models.Sku
:ivar provisioning_state: Possible values include: 'Created', 'Succeeded',
'Deleted', 'Failed', 'Updating', 'Unknown'
:vartype provisioning_state: str or
~azure.mgmt.relay.models.ProvisioningStateEnum
:ivar created_at: The time the namespace was created.
:vartype created_at: datetime
:ivar updated_at: The time the namespace was updated.
:vartype updated_at: datetime
:ivar service_bus_endpoint: Endpoint you can use to perform Service Bus
operations.
:vartype service_bus_endpoint: str
:ivar metric_id: Identifier for Azure Insights metrics.
:vartype metric_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_at': {'readonly': True},
'updated_at': {'readonly': True},
'service_bus_endpoint': {'readonly': True},
'metric_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningStateEnum'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'},
'metric_id': {'key': 'properties.metricId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RelayUpdateParameters, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.provisioning_state = None
self.created_at = None
self.updated_at = None
self.service_bus_endpoint = None
self.metric_id = None
| 38.628205
| 103
| 0.608032
|
ed6dcf96ba82757af12bd4a4c3b4609449f0d796
| 764
|
py
|
Python
|
src/main.py
|
SamadiPour/iran-ips
|
f28d58726395f12edeb254703f87d1a69d84a5d0
|
[
"MIT"
] | 33
|
2021-12-05T20:05:21.000Z
|
2022-03-24T01:24:36.000Z
|
src/main.py
|
SamadiPour/iran-ips
|
f28d58726395f12edeb254703f87d1a69d84a5d0
|
[
"MIT"
] | null | null | null |
src/main.py
|
SamadiPour/iran-ips
|
f28d58726395f12edeb254703f87d1a69d84a5d0
|
[
"MIT"
] | 3
|
2021-12-05T20:10:15.000Z
|
2021-12-10T10:35:24.000Z
|
import ipaddress
import os
from functools import reduce
import create_config
import utils
from get_ips import *
def collect_and_clean(*list_of_ips: Iterable[Iterable[str]]) -> Iterable[ipaddress.IPv4Network]:
ip_set = reduce(lambda x, y: set(x).union(set(y)), list_of_ips)
ip_set = map(utils.convert_to_ip_network, ip_set)
ip_set = filter(lambda x: x is not None, ip_set)
ip_set = list(ip_set)
utils.remove_subnet_ips(ip_set)
return sorted(ip_set)
if __name__ == '__main__':
if not os.path.exists("output"):
os.mkdir("output")
ips = collect_and_clean(ito_gov(), geo_lite2(), ip2location())
ips_str = map(str, ips)
utils.save_to_file('output/iran_ips.txt', "\n".join(ips_str))
create_config.openvpn(ips)
| 25.466667
| 96
| 0.708115
|
64e79c31c202c1e6b137480f9e2fe5796abdcc39
| 1,775
|
py
|
Python
|
EL/el_agent.py
|
shibukazu/baby-steps-of-rl-ja
|
8cfe51be61473f47d384abcff1e36467da2713f1
|
[
"Apache-2.0"
] | null | null | null |
EL/el_agent.py
|
shibukazu/baby-steps-of-rl-ja
|
8cfe51be61473f47d384abcff1e36467da2713f1
|
[
"Apache-2.0"
] | null | null | null |
EL/el_agent.py
|
shibukazu/baby-steps-of-rl-ja
|
8cfe51be61473f47d384abcff1e36467da2713f1
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
class ELAgent():
def __init__(self, epsilon):
self.Q = {}
self.epsilon = epsilon
self.reward_log = []
def policy(self, s, actions):
# Q関数に対するepsilon-greedyで行動する(ような方策とする)
if np.random.random() < self.epsilon:
return np.random.randint(len(actions))
else:
if s in self.Q and sum(self.Q[s]) != 0:
return np.argmax(self.Q[s])
else:
return np.random.randint(len(actions))
def init_log(self):
self.reward_log = []
def log(self, reward):
self.reward_log.append(reward)
def show_reward_log(self, interval=50, episode=-1):
if episode > 0:
rewards = self.reward_log[-interval:]
mean = np.round(np.mean(rewards), 3)
std = np.round(np.std(rewards), 3)
print("At Episode {} average reward is {} (+/-{}).".format(
episode, mean, std))
else:
indices = list(range(0, len(self.reward_log), interval))
means = []
stds = []
for i in indices:
rewards = self.reward_log[i:(i + interval)]
means.append(np.mean(rewards))
stds.append(np.std(rewards))
means = np.array(means)
stds = np.array(stds)
plt.figure()
plt.title("Reward History")
plt.grid()
plt.fill_between(indices, means - stds, means + stds,
alpha=0.1, color="g")
plt.plot(indices, means, "o-", color="g",
label="Rewards for each {} episode".format(interval))
plt.legend(loc="best")
plt.show()
| 32.87037
| 74
| 0.512676
|
48ba434195118b18e6cbe4506522c7cc03ccb758
| 10,636
|
py
|
Python
|
src/finn/custom_op/fpgadataflow/templates.py
|
daiki98/finn
|
a68335384c0a8f0a1126714cbad1aa22a3e50e09
|
[
"BSD-3-Clause"
] | 1
|
2021-03-12T17:20:09.000Z
|
2021-03-12T17:20:09.000Z
|
src/finn/custom_op/fpgadataflow/templates.py
|
surangamh/finn
|
af783db8dc2a1d2e95bd569d39464b935520b6d2
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/custom_op/fpgadataflow/templates.py
|
surangamh/finn
|
af783db8dc2a1d2e95bd569d39464b935520b6d2
|
[
"BSD-3-Clause"
] | null | null | null |
# flake8: noqa
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# template for single node execution
docompute_template = """
#define AP_INT_MAX_W $AP_INT_MAX_W$
#include "cnpy.h"
#include "npy2apintstream.hpp"
#include <vector>
#include "bnn-library.h"
// includes for network parameters
$GLOBALS$
// defines for network parameters
$DEFINES$
int main(){
$PRAGMAS$
$STREAMDECLARATIONS$
$READNPYDATA$
$DOCOMPUTE$
$DATAOUTSTREAM$
$SAVEASCNPY$
}
"""
# templates for single node ip generation
# cpp file
ipgen_template = """
#define AP_INT_MAX_W $AP_INT_MAX_W$
#include "bnn-library.h"
// includes for network parameters
$GLOBALS$
// defines for network parameters
$DEFINES$
$BLACKBOXFUNCTION$
{
$PRAGMAS$
$DOCOMPUTE$
}
"""
# tcl script for IP generation
ipgentcl_template = """
set config_proj_name $PROJECTNAME$
puts "HLS project: $config_proj_name"
set config_hwsrcdir "$HWSRCDIR$"
puts "HW source dir: $config_hwsrcdir"
set config_proj_part "$FPGAPART$"
set config_bnnlibdir "$FINNHLSLIBDIR$"
set config_toplevelfxn "$TOPFXN$"
set config_clkperiod $CLKPERIOD$
open_project $config_proj_name
add_files $config_hwsrcdir/top_$TOPFXN$.cpp -cflags "-std=c++0x -I$config_bnnlibdir"
set_top $config_toplevelfxn
open_solution sol1
set_part $config_proj_part
config_compile -ignore_long_run_time -disable_unroll_code_size_check
config_interface -m_axi_addr64
config_rtl -auto_prefix
$EXTRA_DIRECTIVES$
create_clock -period $config_clkperiod -name default
csynth_design
export_design -format ip_catalog
exit 0
"""
# verilog wrapper for decoupled mem mode
decoupled_wrapper = """
module $TOPNAME$(
ap_clk,
ap_rst_n,
in0_V_V_TDATA,
in0_V_V_TVALID,
in0_V_V_TREADY,
out_V_V_TDATA,
out_V_V_TVALID,
out_V_V_TREADY
);
input ap_clk;
input ap_rst_n;
input $IN_RANGE$ in0_V_V_TDATA;
input in0_V_V_TVALID;
output in0_V_V_TREADY;
output $OUT_RANGE$ out_V_V_TDATA;
output out_V_V_TVALID;
input out_V_V_TREADY;
reg [31:0] config_address = 0;
reg config_ce = 0;
reg config_we = 0;
reg [31:0] config_d0 = 0;
wire [31:0] config_q0;
//multiple wire AXI Streams
wire m_axis_0_afull;
// FIFO count to generate programmable full
wire [5:0] fifo_0_count;
wire m_axis_0_tready;
wire m_axis_0_tvalid;
wire $WEIGHT_RANGE$ m_axis_0_tdata;
//memstream component
memstream
#(
//parameters to enable/disable axi-mm, set number of streams, set readmemh for
// memory, set per-stream offsets in memory, set per-stream widths
.CONFIG_EN(1),
.NSTREAMS(1),
.MEM_DEPTH($MEM_DEPTH$),
.MEM_WIDTH($WEIGHT_WIDTH$),
.MEM_INIT("./"),
.RAM_STYLE("$RAM_STYLE$"),
//widths per stream
.STRM0_WIDTH($WEIGHT_WIDTH$),
//depths per stream
.STRM0_DEPTH($WSTREAM_DEPTH$),
//offsets for each stream
.STRM0_OFFSET(0)
)
mem
(
.aclk(ap_clk),
.aresetn(ap_rst_n),
//optional configuration interface compatible with ap_memory
.config_address(config_address),
.config_ce(config_ce),
.config_we(config_we),
.config_d0(config_d0),
.config_q0(config_q0),
//multiple output AXI Streams, TDATA width rounded to multiple of 8 bits
.m_axis_0_afull(m_axis_0_afull),
.m_axis_0_tready(m_axis_0_tready),
.m_axis_0_tvalid(m_axis_0_tvalid),
.m_axis_0_tdata(m_axis_0_tdata)
);
//MVA_Stream_Unit
$LAYER_NAME$
MVA_Stream_U
(
.ap_clk(ap_clk), //input
.ap_rst_n(ap_rst_n), //input
.in0_V_V_TDATA(in0_V_V_TDATA), //$IN_RANGE$ input
.in0_V_V_TVALID(in0_V_V_TVALID), //input
.in0_V_V_TREADY(in0_V_V_TREADY), //output
.weights_V_V_TDATA(m_axis_0_tdata), //$WEIGHT_RANGE$ input
.weights_V_V_TVALID(m_axis_0_tvalid), //input
.weights_V_V_TREADY(m_axis_0_tready), //output
.out_V_V_TDATA(out_V_V_TDATA), //$OUT_RANGE$ output
.out_V_V_TVALID(out_V_V_TVALID), //output
.out_V_V_TREADY(out_V_V_TREADY) //input
);
endmodule
"""
ip_package_tcl = """
## IP Info
set Vendor "xilinx.com"
set Library "hls"
set IPName "$TOPNAME$"
set Version "1.0"
set DisplayName "$TOPNAME$"
set Description "An IP generated by Xilinx FINN"
set Device "zynq"
set Catalog "/UserIP"
set RootDir "$VERILOG_DIR$"
## Variables
set Top "$TOPNAME$"
set VerilogFiles [glob -nocomplain $RootDir/*]
## Enter IP directory
cd [file dir [info script]]
## Generate sub cores
set IPs ""
set IPFiles ""
## Basic info
set core [ipx::create_core $Vendor $Library $IPName $Version]
set_property display_name $DisplayName $core
set_property description $Description $core
set_property taxonomy $Catalog $core
set_property supported_families { \
artix7 Production \
artix7l Production \
kintex7 Production \
kintex7l Production \
kintexu Production \
kintexuplus Production \
virtex7 Production \
virtexu Production \
virtexuplus Production \
virtexuplusHBM Production \
zynq Production \
zynquplus Production \
aartix7 Production \
azynq Production \
qartix7 Production \
qkintex7 Production \
qkintex7l Production \
qvirtex7 Production \
qzynq Production \
} $core;
## Add verilog files
if {[llength $VerilogFiles] > 0} {
# synthesis
set group [ipx::add_file_group xilinx_verilogsynthesis $core]
foreach f [concat $VerilogFiles $IPFiles] {
set current_file [ipx::add_file $f $group]
if {[file ext $f] == ".dat"} {
set_property type "mif" $current_file
}
}
set_property model_name $Top $group
if {$IPs != ""} {
set_property component_subcores $IPs $group
}
# simulation
set group [ipx::add_file_group xilinx_verilogbehavioralsimulation $core]
foreach f [concat $VerilogFiles $IPFiles] {
set current_file [ipx::add_file $f $group]
if {[file ext $f] == ".dat"} {
set_property type "mif" $current_file
}
}
set_property model_name $Top $group
if {$IPs != ""} {
set_property component_subcores $IPs $group
}
}
## Import ports
ipx::add_ports_from_hdl \
-top_level_hdl_file $RootDir/$Top.v \
-top_module_name $Top \
$core
## Infer interfaces
ipx::infer_bus_interface ap_clk xilinx.com:signal:clock_rtl:1.0 [ipx::current_core]
ipx::infer_bus_interface ap_rst_n xilinx.com:signal:reset_rtl:1.0 [ipx::current_core]
ipx::infer_bus_interface {in0_V_V_TDATA in0_V_V_TVALID in0_V_V_TREADY} xilinx.com:interface:axis_rtl:1.0 [ipx::current_core]
ipx::infer_bus_interface {out_V_V_TREADY out_V_V_TDATA out_V_V_TVALID} xilinx.com:interface:axis_rtl:1.0 [ipx::current_core]
ipx::associate_bus_interfaces -busif in0_V_V -clock ap_clk [ipx::current_core]
ipx::associate_bus_interfaces -busif out_V_V -clock ap_clk [ipx::current_core]
## Finalize
set_property core_revision 2 [ipx::current_core]
ipx::create_xgui_files [ipx::current_core]
ipx::update_checksums [ipx::current_core]
ipx::save_core [ipx::current_core]
ipx::archive_core $Top.zip [ipx::current_core]
"""
strm_fifo_wrapper = """
module $TOPNAME$(
ap_clk,
ap_rst_n,
count,
in0_V_V_TDATA,
in0_V_V_TVALID,
in0_V_V_TREADY,
out_V_V_TDATA,
out_V_V_TVALID,
out_V_V_TREADY
);
input ap_clk;
input ap_rst_n;
output $COUNT_RANGE$ count;
input $IN_RANGE$ in0_V_V_TDATA;
input in0_V_V_TVALID;
output in0_V_V_TREADY;
output $OUT_RANGE$ out_V_V_TDATA;
output out_V_V_TVALID;
input out_V_V_TREADY;
Q_srl #(
.depth($DEPTH$),
.width($WIDTH$)
)
$LAYER_NAME$
(
.clock(ap_clk),
.reset(!ap_rst_n),
.count(count),
.i_d(in0_V_V_TDATA),
.i_v(in0_V_V_TVALID),
.i_r(in0_V_V_TREADY),
.o_d(out_V_V_TDATA),
.o_v(out_V_V_TVALID),
.o_r(out_V_V_TREADY)
);
endmodule
"""
decoupled_thresholding_template = """
template <
unsigned ImgDim, unsigned NumChannels, unsigned PE,
typename TSrcI = Identity, typename TDstI = Identity,
int ActVal=0, typename TT, unsigned int NumSteps,
typename TI, typename TO>
void Thresholding_Stream_Batch(hls::stream<TI> &in,
hls::stream<TO> &out,
hls::stream<ap_uint<PE*NumSteps*TT::width>> &weight,
int const reps)
{
// how many different rows each neuron will compute
// alternatively: number of vertical matrix chunks
unsigned const NF = NumChannels / PE;
ThresholdsActivation<1, PE, NumSteps, TT, TO, ActVal, std::less_equal<TT>> internal_thr;
#pragma HLS ARRAY_PARTITION variable=internal_thr.m_thresholds complete dim=0
// everything merged into a common iteration space (one "big" loop instead
// of smaller nested loops) to get the pipelinening the way we want
for (unsigned i = 0; i < reps * ImgDim * ImgDim * NF; i++)
{
#pragma HLS PIPELINE II=1
ap_uint<PE*NumSteps*TT::width> packed_thr;
packed_thr = weight.read();
// slicer to get 1 PE's worth of thresholds
auto const pe_slicer = Slice<ap_uint<NumSteps*TT::width>>()(packed_thr);
TI inElem;
inElem = in.read();
auto outElem = TDstI().template operator()<TO>();
for (unsigned pe = 0; pe < PE; pe++)
{
#pragma HLS UNROLL
// slicer to get individual thresholds
auto const thr_slicer = Slice<TT>()(pe_slicer(pe, 0));
for (unsigned nt = 0; nt < NumSteps; nt++)
{
#pragma HLS UNROLL
internal_thr.m_thresholds[pe][0][nt] = thr_slicer(nt, 0);
}
auto const act = TSrcI()(inElem);
outElem(pe,0,1) = internal_thr.activate(0, pe, act(pe,0));
}
out.write(outElem);
}
}
"""
| 25.878345
| 124
| 0.733734
|
33070d014423f921f38d097860ae6a6bd735a7b7
| 13,669
|
py
|
Python
|
Code/instr_tester/controlhandler.py
|
DaveSeidel/QB_Nebulae_V2
|
4a0218bb6a05e835e74b126729a1c3cd221fc9b5
|
[
"MIT"
] | 40
|
2019-12-30T03:44:36.000Z
|
2022-02-07T23:09:42.000Z
|
Code/instr_tester/controlhandler.py
|
DaveSeidel/QB_Nebulae_V2
|
4a0218bb6a05e835e74b126729a1c3cd221fc9b5
|
[
"MIT"
] | 11
|
2020-03-08T10:22:57.000Z
|
2022-03-22T21:18:32.000Z
|
Code/instr_tester/controlhandler.py
|
DaveSeidel/QB_Nebulae_V2
|
4a0218bb6a05e835e74b126729a1c3cd221fc9b5
|
[
"MIT"
] | 23
|
2020-01-20T11:12:20.000Z
|
2022-03-02T20:39:09.000Z
|
# Import SPI library (for hardware SPI) and MCP3008 library.
#import Adafruit_#GPIO.SPI as SPI
#import RPi.#GPIO as #GPIO
import ctcsound
#import switch as libSwitch
#import shiftregister as libSR
import control
import os
#import settings
import time
#import Adafruit_MCP3008
# Defines for Button/Gate Types
BUTTON_GATE_GPIO = 0
BUTTON_SR_GATE_GPIO = 1
BUTTON_GATE_SR = 2
BUTTON_GPIO_GATE_SR = 3
BUTTON_GPIO_GATE_NONE = 4
BUTTON_SR_GATE_NONE = 5
FREEZE_GATE_PIN = 4
RESET_GATE_PIN = 25
NEXT_GATE_PIN = 23
RECORD_GATE_PIN = 24
# Main Class. Holds all control.ControlChannels
class ControlHandler(object):
def __init__(self, csound, numberFiles, configData):
self.csound = csound # Share csound instance with object
#GPIO.setmode(#GPIO.BCM) # init #GPIO
#GPIO.setwarnings(False) # silence #GPIO warnings (this is probably not the best.)
self.eol_pin = 16
self.eol_state = False
#GPIO.setup(self.eol_pin, #GPIO.OUT)
#self.shiftReg = libSR.ShiftRegister()
self.numFiles = numberFiles
self.control_mode = "normal"
self.configData = configData
self.in_ticks = 23
self.modeChangeControl = None
#self.settings = settings.SettingManager()
#self.settings.read()
# Set Defaults/Read Config
digitalControlList = [
"reset", "freeze", "source", "record", "file", "recordstate",
"reset_alt", "freeze_alt", "source_alt", "record_alt", "file_alt"
]
self.defaultConfig = dict()
self.populateDefaultConfig()
digitalConfig = dict()
for ctrl in digitalControlList:
if self.configData.has_key(ctrl):
digitalConfig[ctrl] = self.configData.get(ctrl)
else:
digitalConfig[ctrl] = self.defaultConfig.get(ctrl)
self.channels = [control.ControlChannel(self.csound, "speed", 0.25, "static"),
control.ControlChannel(self.csound, "pitch", 0.5, "static"),
control.ControlChannel(self.csound, "start", 0.0, "static"),
control.ControlChannel(self.csound, "size", 1.0, "static"),
control.ControlChannel(self.csound, "mix", 0.0,"static"),
control.ControlChannel(self.csound, "density", 0.001, "static"),
control.ControlChannel(self.csound, "overlap", 0.0012, "static"),
control.ControlChannel(self.csound, "degrade", 1.0, "static"),
control.ControlChannel(self.csound, "file", 0, "static"),
control.ControlChannel(self.csound, "reset", 0, "static"),
control.ControlChannel(self.csound, "freeze", 0, "static"),
control.ControlChannel(self.csound, "source", 0, "static"),
control.ControlChannel(self.csound, "record", 0, "static"),
control.ControlChannel(self.csound, "recordstate", 0, "static")]
self.altchannels = [control.ControlChannel(self.csound, "speed", 0.25, "static"),
control.ControlChannel(self.csound, "pitch_alt", 0.5, "static"),
control.ControlChannel(self.csound, "start_alt", 0.0, "static"),
control.ControlChannel(self.csound, "size_alt", 1.0, "static"),
control.ControlChannel(self.csound, "mix_alt", 0.0,"static"),
control.ControlChannel(self.csound, "density_alt", 0.000, "static"),
control.ControlChannel(self.csound, "overlap_alt", 0.0012, "static"),
control.ControlChannel(self.csound, "degrade_alt", 0.5, "static"),
control.ControlChannel(self.csound, "file_alt", 0, "static"),
control.ControlChannel(self.csound, "reset_alt", 0, "static"),
control.ControlChannel(self.csound, "freeze_alt", 0, "static"),
control.ControlChannel(self.csound, "source_alt", 0, "static"),
control.ControlChannel(self.csound, "record_alt", 0, "static")]
self.channeldict = {}
for chn in self.channels:
self.channeldict[chn.name] = chn
self.altchanneldict = {}
for chn in self.altchannels:
self.altchanneldict[chn.name] = chn
# The Exit instrmode channel, is probably fine to leave for good,
# but the exit_secondarymode channel should be absorbed into the altchannels list
#self.exit_instrmode_chn = control.control.ControlChannel(self.csound, "exit", 0, "digital",data_channel=BUTTON_GATE_SR, sr=self.shiftReg, gate_pin=libSR.PIN_SOURCE_GATE,button_pin=libSR.PIN_SOURCE, longtouch_cb=self.enterNormalMode)
# self.exit_secondarymode_chn = control.control.ControlChannel(self.csound, "record", 0, "digital",data_channel=BUTTON_SR_GATE_#GPIO, sr=self.shiftReg, gate_pin=RECORD_GATE_PIN,button_pin=libSR.PIN_RECORD, longtouch_cb=self.enterNormalMode)
## Clean this stuff up.
self.now = int(round(time.time() * 1000))
self.instr_sel_idx = 0
self.eol_comm = control.CommChannel(self.csound, "eol")
self.size_status_comm = control.CommChannel(self.csound, "sizestatus")
self.eol_gate_timer = 0
self.reset_led_pin = 12
#GPIO.setup(self.reset_led_pin, #GPIO.OUT)
self.channeldict["recordstate"].setIgnoreGate(True)
#for chn in self.channels:
# if self.settings.hasSetting(chn.name):
# chn.setValue(float(self.settings.load(chn.name)))
# chn.update()
#self.enterSecondaryMode()
#for chn in self.altchannels:
# if self.settings.hasSetting(chn.name):
# chn.setValue(float(self.settings.load(chn.name)))
# chn.update()
#self.enterNormalMode()
# Generic Global Control Functions
def setValue(self, name, value):
self.channeldict[name].setValue(value)
def setAltValue(self, name, value):
self.altchanneldict[name].setValue(value)
def getValue(self, name):
return self.channeldict[name].getValue()
def getAltValue(self, name):
return self.altchanneldict[name].getValue()
def getStaticVal(self, name):
return self.channeldict[name].getStaticVal()
def getInstrValue(self, name):
return self.instrchanneldict[name].getValue()
def getInstrSelIdx(self):
return self.instr_sel_idx
def updateChannel(self, name):
self.channeldict[name].update()
def updateAltChannel(self, name):
self.altchanneldict[name].update()
def mode(self):
return self.control_mode
def enterNormalMode(self):
print "entering normal"
if self.modeChangeControl is not None:
self.channeldict[self.modeChangeControl].setIgnoreNextButton()
if self.control_mode == "secondary controls":
self.resistSecondarySettings()
#self.settings.update(self.now)
#self.settings.write()
self.control_mode = "normal"
def enterSecondaryMode(self):
self.modeChangeControl = "file"
self.altchanneldict[self.modeChangeControl + "_alt"].setIgnoreNextButton()
if self.control_mode == "normal":
print "entering secondary"
self.resistNormalSettings()
self.control_mode = "secondary controls"
#self.settings.update(self.now)
#self.settings.write()
def enterInstrSelMode(self):
self.modeChangeControl = "source"
self.instrchanneldict[self.modeChangeControl + "_instr"].setIgnoreNextButton()
if self.control_mode == "normal":
print "entering instr selector"
self.control_mode = "instr selector"
def resistNormalSettings(self):
for i in range(0, 8):
self.altchannels[i].setModeChangeValue(self.channels[i].getPotValue())
def resistSecondarySettings(self):
for i in range(0, 8):
self.channels[i].setModeChangeValue(self.altchannels[i].getPotValue())
def populateDefaultConfig(self):
self.defaultConfig["reset"] = ["triggered", "rising"]
self.defaultConfig["freeze"] = ["latching", "rising"]
self.defaultConfig["source"] = ["latching", "falling"]
self.defaultConfig["file"] = ["incremental", "falling"]
self.defaultConfig["record"] = ["latching", "falling"]
self.defaultConfig["recordstate"] = ["momentary", "rising"]
self.defaultConfig["reset_alt"] = ["triggered", "rising"]
self.defaultConfig["freeze_alt"] = ["latching", "rising"]
self.defaultConfig["source_alt"] = ["latching", "falling"]
self.defaultConfig["file_alt"] = ["incremental", "falling"]
self.defaultConfig["record_alt"] = ["latching", "falling"]
self.defaultConfig["ksmps"] = ["128"]
self.defaultConfig["sr"] = ["44100"]
def restoreAltToDefault(self):
self.altchanneldict["speed_alt"].setValue(1.0)
self.altchanneldict["pitch_alt"].setValue(0.0)
self.altchanneldict["start_alt"].setValue(0.0)
self.altchanneldict["size_alt"].setValue(0.0)
self.altchanneldict["mix_alt"].setValue(0.0)
self.altchanneldict["density_alt"].setValue(0.0)
self.altchanneldict["overlap_alt"].setValue(0.0)
self.altchanneldict["degrade_alt"].setValue(0.5)
self.altchanneldict["reset_alt"].setValue(0.0)
self.altchanneldict["freeze_alt"].setValue(0.0)
self.altchanneldict["source_alt"].setValue(0.0)
self.altchanneldict["record_alt"].setValue(0.0)
self.altchanneldict["file_alt"].setValue(0.0)
def setInputLevel(self, scalar):
tick = scalar
prev_ticks = self.in_ticks
self.in_ticks = tick
if prev_ticks != self.in_ticks:
cmd = 'amixer set \'Capture\' ' + str(tick)
os.system(cmd)
def handleEndOfLoop(self):
self.eol_comm.update()
self.size_status_comm.update()
if self.size_status_comm.getState() == 0:
if self.eol_comm.getState() == 1:
self.eol_comm.clearState()
#GPIO.output(self.eol_pin, False)
if self.control_mode == "normal":
pass
#GPIO.output(self.reset_led_pin, False)
self.eol_gate_timer = 0
self.eol_state = True
if self.eol_state is True:
self.eol_gate_timer += 1
if self.eol_gate_timer > 2:
#GPIO.output(self.eol_pin, True)
if self.control_mode == "normal":
#GPIO.output(self.reset_led_pin, True)
pass
self.eol_state = False
#else:
#GPIO.output(self.eol_pin, True)
#GPIO.output(self.reset_led_pin, False)
def updateAll(self):
##GPIO.output(self.eol_pin, False)
self.now = int(round(time.time() * 1000))
#self.shiftReg.update()
self.handleEndOfLoop()
#self.printAllControls()
if self.control_mode == "secondary controls":
for chn in self.altchannels:
chn.update()
#self.settings.save(chn.name, chn.getValue())
for chn in self.channels:
chn.setIgnoreHID(True)
chn.update()
self.channeldict["file"].input.setIncOrder(self.altchanneldict["file_alt"].getValue())
in_scalar = self.altchanneldict["speed_alt"].getValue()
self.setInputLevel(in_scalar)
if self.altchanneldict["reset_alt"].curVal == True:
print "Restoring Defaults!"
self.restoreAltToDefault()
self.enterNormalMode()
elif self.control_mode == "instr selector":
#self.exit_instrmode_chn.update()
for idx, chn in enumerate(self.instr_sel_controls):
chn.update()
if chn.getValue() == 1:
self.instr_sel_idx = idx
else: #includes "normal"
self.channeldict["recordstate"].update()
recordstate = self.channeldict["recordstate"].getValue()
for chn in self.channels:
if chn.name != "recordstate":
#self.settings.save(chn.name, chn.getValue())
chn.setIgnoreGate(recordstate)
chn.setIgnoreHID(False)
chn.update()
if chn.source == "digital" and chn.name != "record" and chn.name != "recordstate":
if recordstate == 1 or recordstate == True:
if chn.hasChanged() == True:
print "channel: " + chn.name + " has changed."
self.channeldict["record"].setIgnoreNextButton()
def printAllControls(self):
line = "############################\n"
line += "Primary Channels:"
for i,chn in enumerate(self.channels):
if i % 4 == 0:
line +="\n"
line += chn.name + ": " + str(chn.getValue()) + "\t"
line += "\nSecondary Channels:"
for i,chn in enumerate(self.altchannels):
if i % 4 == 0:
line += "\n"
line += chn.name + ": " + str(chn.getValue()) + "\t"
line += "\n############################\n"
print line
##GPIO.output(self.eol_pin, True)
| 43.951768
| 247
| 0.593021
|
a7f845d53334fcd92c9200ae7221830eb2f82e3c
| 5,157
|
py
|
Python
|
spillway/serializers.py
|
timgates42/django-spillway
|
f5700e21e545106005a99ba0804f7d6c88038553
|
[
"BSD-3-Clause"
] | 62
|
2015-01-20T22:21:09.000Z
|
2019-11-25T12:57:53.000Z
|
spillway/serializers.py
|
timgates42/django-spillway
|
f5700e21e545106005a99ba0804f7d6c88038553
|
[
"BSD-3-Clause"
] | 24
|
2015-01-07T00:03:10.000Z
|
2021-06-10T17:34:35.000Z
|
spillway/serializers.py
|
timgates42/django-spillway
|
f5700e21e545106005a99ba0804f7d6c88038553
|
[
"BSD-3-Clause"
] | 19
|
2015-01-12T18:08:29.000Z
|
2020-08-10T17:16:31.000Z
|
from django.core import exceptions
from django.contrib.gis import geos
from django.contrib.gis.db import models
from django.db.models.fields.files import FieldFile
from rest_framework import serializers
from greenwich.srs import SpatialReference
from spillway import query, collections as sc
from spillway.fields import GeometryField
from spillway.renderers.gdal import BaseGDALRenderer
serializers.ModelSerializer.serializer_field_mapping.update({
models.GeometryField: GeometryField,
models.PointField: GeometryField,
models.LineStringField: GeometryField,
models.PolygonField: GeometryField,
models.MultiPointField: GeometryField,
models.MultiLineStringField: GeometryField,
models.MultiPolygonField: GeometryField,
models.GeometryCollectionField: GeometryField
})
class GeoModelSerializer(serializers.ModelSerializer):
"""Serializer class for GeoModels."""
def __new__(cls, *args, **kwargs):
cls.Meta.geom_field = getattr(cls.Meta, 'geom_field', None)
return super(GeoModelSerializer, cls).__new__(cls, *args, **kwargs)
def get_fields(self):
"""Returns a fields dict for this serializer with a 'geometry' field
added.
"""
fields = super(GeoModelSerializer, self).get_fields()
# Set the geometry field name when it's undeclared.
if not self.Meta.geom_field:
for name, field in fields.items():
if isinstance(field, GeometryField):
self.Meta.geom_field = name
break
return fields
class FeatureListSerializer(serializers.ListSerializer):
"""Feature list serializer for GeoModels."""
@property
def data(self):
return super(serializers.ListSerializer, self).data
def to_representation(self, data):
data = [self.child.to_representation(item) for item in data]
try:
srid = query.get_srid(self.instance)
except AttributeError:
srid = None
return sc.FeatureCollection(features=data, crs=srid)
class FeatureSerializer(GeoModelSerializer):
"""Feature serializer for GeoModels."""
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
meta = getattr(cls, 'Meta', None)
list_serializer_cls = getattr(
meta, 'list_serializer_cls', FeatureListSerializer)
return list_serializer_cls(*args, **kwargs)
@property
def data(self):
if not hasattr(self, '_data'):
self._data = super(FeatureSerializer, self).data
if 'crs' not in self._data:
try:
field = self.fields[self.Meta.geom_field]
srid = getattr(self.instance, field.source).srid
except (AttributeError, geos.GEOSException):
pass
else:
self._data['crs'] = sc.NamedCRS(srid)
return self._data
def to_representation(self, instance):
native = super(FeatureSerializer, self).to_representation(instance)
geometry = native.pop(self.Meta.geom_field)
pk = native.pop(instance._meta.pk.name, None)
return sc.Feature(pk, geometry, native)
def to_internal_value(self, data):
if sc.has_features(data):
for feat in data['features']:
return self.to_internal_value(feat)
try:
sref = SpatialReference(data['crs']['properties']['name'])
except KeyError:
sref = None
# Force evaluation of fields property.
if not self.fields and self.Meta.geom_field is None:
raise exceptions.FieldDoesNotExist('Geometry field not found')
record = {self.Meta.geom_field: data.get('geometry')}
record.update(data.get('properties', {}))
feature = super(FeatureSerializer, self).to_internal_value(record)
if feature and sref:
geom = feature[self.Meta.geom_field]
geom.srid = sref.srid
return feature
class RasterModelSerializer(GeoModelSerializer):
"""Serializer class for raster models."""
def __new__(cls, *args, **kwargs):
cls.Meta.raster_field = getattr(cls.Meta, 'raster_field', None)
return super(RasterModelSerializer, cls).__new__(cls, *args, **kwargs)
def get_fields(self):
fields = super(RasterModelSerializer, self).get_fields()
if not self.Meta.raster_field:
for name, field in fields.items():
if isinstance(field, serializers.FileField):
self.Meta.raster_field = name
break
fieldname = self.Meta.raster_field
request = self.context.get('request')
renderer = getattr(request, 'accepted_renderer', None)
try:
obj = self.instance[0]
except (IndexError, TypeError):
obj = self.instance
modelfield = getattr(obj, fieldname, None)
if (isinstance(renderer, BaseGDALRenderer)
or not isinstance(modelfield, FieldFile)):
fields[fieldname] = serializers.ReadOnlyField()
return fields
| 37.369565
| 78
| 0.647469
|
8de4e2b794176df49179fbc9a4b9e10d48c93dfd
| 3,097
|
py
|
Python
|
haruhi_dl/extractor/tube8.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 32
|
2021-01-18T03:52:17.000Z
|
2022-02-17T20:43:39.000Z
|
haruhi_dl/extractor/tube8.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 12
|
2021-02-06T08:12:08.000Z
|
2021-12-11T23:17:41.000Z
|
haruhi_dl/extractor/tube8.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 6
|
2021-01-29T16:46:31.000Z
|
2022-01-20T18:40:03.000Z
|
from __future__ import unicode_literals
import re
from ..utils import (
int_or_none,
str_to_int,
)
from .keezmovies import KeezMoviesIE
class Tube8IE(KeezMoviesIE):
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
'md5': '65e20c48e6abff62ed0c3965fff13a39',
'info_dict': {
'id': '229795',
'display_id': 'kasia-music-video',
'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
'age_limit': 18,
'duration': 230,
'categories': ['Teen'],
'tags': ['dancing'],
},
}, {
'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage, **kwargs):
return re.findall(
r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)',
webpage)
def _real_extract(self, url):
webpage, info = self._extract_info(url)
if not info['title']:
info['title'] = self._html_search_regex(
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
description = self._html_search_regex(
r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<span class="username">\s*(.+?)\s*<',
webpage, 'uploader', fatal=False)
like_count = int_or_none(self._search_regex(
r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
view_count = str_to_int(self._search_regex(
r'Views:\s*</dt>\s*<dd>([\d,\.]+)',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._search_regex(
r'<span id="allCommentsCount">(\d+)</span>',
webpage, 'comment count', fatal=False))
category = self._search_regex(
r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)',
webpage, 'category', fatal=False)
categories = [category] if category else None
tags_str = self._search_regex(
r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)',
webpage, 'tags', fatal=False)
tags = [t for t in re.findall(
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None
info.update({
'description': description,
'uploader': uploader,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
})
return info
| 35.597701
| 137
| 0.529545
|
d39331e00ce9036e4ee6fe64d3122b6d03e25f01
| 10,543
|
py
|
Python
|
src/train.py
|
YuliangXiu/pytorch_pose_proposal_networks
|
73bed82ba221c698d63f50cc235d18c5d082170c
|
[
"MIT"
] | 102
|
2019-02-15T11:40:59.000Z
|
2022-01-19T10:48:31.000Z
|
src/train.py
|
zoombapup/pytorch_pose_proposal_networks
|
73bed82ba221c698d63f50cc235d18c5d082170c
|
[
"MIT"
] | 11
|
2019-02-15T14:25:17.000Z
|
2021-02-21T03:54:33.000Z
|
src/train.py
|
zoombapup/pytorch_pose_proposal_networks
|
73bed82ba221c698d63f50cc235d18c5d082170c
|
[
"MIT"
] | 22
|
2019-02-19T07:11:17.000Z
|
2021-10-03T03:54:00.000Z
|
import os
import cv2
from tensorboardX import SummaryWriter
import torch.optim as optim
import torch.utils.data
from src.utils import *
import src.config as cfg
from src.dataset.mpi import MPI
from src.model.model import which_model
train_dset = MPI(cfg.IMG_DIR, cfg.ANNOTATION_PATH, is_train=True)
train_loader = torch.utils.data.DataLoader(train_dset, batch_size=cfg.BATCH_SIZE,
shuffle=True, num_workers=cfg.NUM_WORKS)
writer = SummaryWriter(log_dir=cfg.SUMMARY_PATH, purge_step=0)
# Initialize network
if os.path.exists(cfg.CHECKPOINT_PATH):
checkpoint = torch.load(cfg.CHECKPOINT_PATH)
step = checkpoint['step']
start = checkpoint['epoch'] + 1
net = which_model(cfg.IS_SHALLOW, net_state_dict=checkpoint['net_state_dict'])
else:
net = which_model(cfg.IS_SHALLOW)
start = 0
step = -1
net.to(device)
optimizer = optim.SGD(net.parameters(), lr=cfg.LR, momentum=cfg.MOMENTUM,
weight_decay=cfg.WEIGHT_DECAY)
if os.path.exists(cfg.CHECKPOINT_PATH):
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
def main():
net.train()
loss_recorder = AverageMeter(num=6)
for epoch in range(start, cfg.MAX_EPOCH):
loss_recorder.reset()
for i, (input, target, mask) in enumerate(train_loader, 1):
input = input.to(device)
target, mask = target.to(device), mask.to(device)
output = net(input)
output_detach = output.detach()
iou = get_iou(output_detach, target, mask)
mask_iou = torch.zeros_like(mask)
mask_iou[:, :, :, 1:96:6] = mask[:, :, :, 1:96:6]
mask_iou = mask_iou.type(torch.uint8)
target[mask_iou] = iou
# Get each outpout and target
output_resp = output[:, :, :, 0:96:6]
output_iou = output[:, :, :, 1:96:6]
output_coor_x, output_coor_y = output[:, :, :, 2:96:6], output[:, :, :, 3:96:6]
output_w, output_h = output[:, :, :, 4:96:6], output[:, :, :, 5:96:6]
output_limb = output[:, :, :, 96:]
# output_limb = output[:, :, :, 96:].detach().cpu().numpy().reshape((12, 12, 9, 9, 15))
target_resp = target[:, :, :, 0:96:6]
target_iou = target[:, :, :, 1:96:6]
target_coor_x = target[:, :, :, 2:96:6] / cfg.CELL_SIZE - x_offset
target_coor_y = target[:, :, :, 3:96:6] / cfg.CELL_SIZE - y_offset
target_w = torch.sqrt(target[:, :, :, 4:96:6] / cfg.IMG_SIZE)
target_h = torch.sqrt(target[:, :, :, 5:96:6] / cfg.IMG_SIZE)
target_limb = target[:, :, :, 96:]
# target_limb = target[:, :, :, 96:].cpu().numpy().reshape((12, 12, 9, 9, 15))
mask_resp = mask[:, :, :, 0:96:6]
mask_joint = mask[:, :, :, 1:96:6]
mask_limb = mask[:, :, :, 96:]
# mask_limb = mask[:, :, :, 96:].cpu().numpy().reshape((12, 12, 9, 9, 15))
# draw_box(input, output_detach)
for n in range(output.shape[0]):
print('resp:')
print(output_resp[n, :, :, :][target_resp[n, :, :, :].type(torch.uint8)])
print('iou:')
print(output_iou[n][mask_joint[n].type(torch.uint8)])
print(target_iou[n][mask_joint[n].type(torch.uint8)])
print('x:')
print(output_coor_x[n][mask_joint[n].type(torch.uint8)])
print(target_coor_x[n][mask_joint[n].type(torch.uint8)])
print('y:')
print(output_coor_y[n][mask_joint[n].type(torch.uint8)])
print(target_coor_y[n][mask_joint[n].type(torch.uint8)])
print('w:')
print(output_w[n][mask_joint[n].type(torch.uint8)])
print(target_w[n][mask_joint[n].type(torch.uint8)])
print('h:')
print(output_h[n][mask_joint[n].type(torch.uint8)])
print(target_h[n][mask_joint[n].type(torch.uint8)])
print('limb:')
print(output_limb[n][target_limb[n].type(torch.uint8)])
print()
# Calculate each loss
loss_resp = cfg.scale_resp * square_error(output_resp, target_resp, mask_resp)
loss_iou = cfg.scale_iou * square_error(output_iou, target_iou, mask_joint)
loss_coor = cfg.scale_coor * (square_error(output_coor_x, target_coor_x, mask_joint) +
square_error(output_coor_y, target_coor_y, mask_joint))
loss_size = cfg.scale_size * (square_error(output_w, target_w, mask_joint) +
square_error(output_h, target_h, mask_joint))
loss_limb = cfg.scale_limb * square_error(output_limb, target_limb, mask_limb)
loss = loss_resp + loss_iou + loss_coor + loss_size + loss_limb
loss_recorder.update(loss_resp.item(), loss_iou.item(), loss_coor.item(),
loss_size.item(), loss_limb.item(), loss.item(),
n=output.shape[0])
# Modify learning rate
global step
step += 1
lr = cfg.LR * (1 - step / (cfg.MAX_EPOCH * len(train_loader)))
optimizer.param_groups[0]['lr'] = lr
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
print('epoch:{}, step:{}, lr:{:.6f} resp_loss:{:.2f}, iou_loss:{:.3f}, coor_loss:{:.2f}, '
'size_loss:{:.3f}, limb_loss:{:.2f}, loss:{:.2f}'.format(epoch, i, lr,
loss_recorder.avg[0],
loss_recorder.avg[1],
loss_recorder.avg[2],
loss_recorder.avg[3],
loss_recorder.avg[4],
loss_recorder.avg[5]))
print()
writer.add_scalar('loss_resp', loss_recorder.avg[0], epoch)
writer.add_scalar('loss_iou', loss_recorder.avg[1], epoch)
writer.add_scalar('loss_coor', loss_recorder.avg[2], epoch)
writer.add_scalar('loss_size', loss_recorder.avg[3], epoch)
writer.add_scalar('loss_limb', loss_recorder.avg[4], epoch)
writer.add_scalar('loss', loss_recorder.avg[5], epoch)
torch.save({'epoch': epoch,
'step': step,
'net_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
cfg.CHECKPOINT_PATH)
def get_iou(output, target, mask):
output, target, mask = output[:, :, :, :96].clone(), target[:, :, :, :96].clone(), \
mask[:, :, :, :96].clone()
output[:, :, :, 2::6] += x_offset
output[:, :, :, 3::6] += y_offset
mask[:, :, :, 0::6] = 0
mask[:, :, :, 1::6] = 0
mask = mask.type(torch.uint8)
ox_p = output[mask][0::4] * cfg.CELL_SIZE
oy_p = output[mask][1::4] * cfg.CELL_SIZE
w_p = output[mask][2::4].pow(2) * cfg.IMG_SIZE
h_p = output[mask][3::4].pow(2) * cfg.IMG_SIZE
ox_gt = target[mask][0::4]
oy_gt = target[mask][1::4]
w_gt = target[mask][2::4]
h_gt = target[mask][3::4]
tl_x = torch.max(ox_p - 0.5 * w_p, ox_gt - 0.5 * w_gt)
tl_y = torch.max(oy_p - 0.5 * h_p, oy_gt - 0.5 * h_gt)
br_x = torch.min(ox_p + 0.5 * w_p, ox_gt + 0.5 * w_gt)
br_y = torch.min(oy_p + 0.5 * h_p, oy_gt + 0.5 * h_gt)
delta_x, delta_y = br_x - tl_x, br_y - tl_y
condition = (delta_x < 0) | (delta_y < 0)
intersection = torch.where(condition, torch.zeros_like(delta_x), delta_x * delta_y)
union = torch.max(w_p * h_p + w_gt * h_gt - intersection,
torch.full_like(delta_x, 1e-10))
iou = intersection / union
iou = torch.clamp(iou, 0, 1)
return iou
def square_error(output, target, mask):
return 1 / output.shape[0] * torch.sum(mask * (output - target).pow(2))
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, num):
self.avg = np.zeros(num)
self.sum = np.zeros(num)
self.count = 0
def reset(self):
self.avg[:] = 0
self.sum[:] = 0
self.count = 0
def update(self, *val, n=1):
val_array = np.array(val)
self.sum += val_array * n
self.count += n
self.avg = self.sum / self.count
def draw_box(img, output):
img = img.cpu().numpy().transpose(0, 2, 3, 1)
img = img * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])
img = np.clip(img, 0, 1)
output = output.cpu().numpy()
output[:, :, :, 2:96:6] = (output[:, :, :, 2:96:6] + x_offset.cpu().numpy()) * cfg.CELL_SIZE
output[:, :, :, 3:96:6] = (output[:, :, :, 3:96:6] + y_offset.cpu().numpy()) * cfg.CELL_SIZE
output[:, :, :, 4:96:6] = output[:, :, :, 4:96:6] ** 2
output[:, :, :, 5:96:6] = output[:, :, :, 5:96:6] ** 2
output[:, :, :, 4:96:6] *= cfg.IMG_SIZE
output[:, :, :, 5:96:6] *= cfg.IMG_SIZE
n = img.shape[0]
alpha = 0.8
for i in range(n):
overlay = img[i].copy()
for j in range(1, 16):
exist_output = output[i, :, :, 6 * j] > cfg.thres1
box_output = output[i][exist_output][:, 6 * j + 2:6 * j + 6]
box_output[:, 0], box_output[:, 1], box_output[:, 2], box_output[:, 3] = \
box_output[:, 0] - 0.5 * box_output[:, 2], box_output[:, 1] - 0.5 * box_output[:, 3], \
box_output[:, 0] + 0.5 * box_output[:, 2], box_output[:, 1] + 0.5 * box_output[:, 3]
for b in box_output:
cv2.rectangle(overlay, (b[0], b[1]), (b[2], b[3]), colors[j - 1], -1)
img_transparent = cv2.addWeighted(overlay, alpha, img[i], 1 - alpha, 0)[:, :, ::-1]
img_transparent[:, ::cfg.CELL_SIZE, :] = np.array([1., 1, 1])
img_transparent[::cfg.CELL_SIZE, :, :] = np.array([1., 1, 1])
cv2.namedWindow('joint box', cv2.WINDOW_NORMAL)
cv2.resizeWindow('joint box', 800, 800)
cv2.imshow('joint box', img_transparent)
cv2.waitKey(0)
if __name__ == '__main__':
main()
| 42.341365
| 106
| 0.525752
|
5dcdba0f5189202d266aa6e919920e0e75b7309c
| 3,222
|
py
|
Python
|
datasets/Part 3 - Classification/Section 17 - Kernel SVM/my_kernel_svm.py
|
nylvam/machinelearning-az
|
2ff139082b61ace5a94ef86517c84febee3b7ecb
|
[
"MIT"
] | null | null | null |
datasets/Part 3 - Classification/Section 17 - Kernel SVM/my_kernel_svm.py
|
nylvam/machinelearning-az
|
2ff139082b61ace5a94ef86517c84febee3b7ecb
|
[
"MIT"
] | null | null | null |
datasets/Part 3 - Classification/Section 17 - Kernel SVM/my_kernel_svm.py
|
nylvam/machinelearning-az
|
2ff139082b61ace5a94ef86517c84febee3b7ecb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 18:56:54 2020
@author: ramonpuga
"""
# Kernel SVM
# Importar las librerias
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importar el dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
# Matriz X con todas las filas, y todas las columnas menos la última
X = dataset.iloc[:, [2,3]].values
# Vector y con todas las filas y la última columna
y = dataset.iloc[:, -1].values
# Dividir el data set en conjunto de training y de test
from sklearn.model_selection import train_test_split
# Aplicamos un porcentaje del 25% (0.25) para el test y un valor de selección alatoria de 0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Escalado (estandarización o normalización) de variables
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
# Aplicamos y fijamos el metodo de estandarización a todas las columnas X
X_train = sc_X.fit_transform(X_train)
# Aplicamos el mismo metodo de estandarización que para los datos de Training
X_test = sc_X.transform(X_test)
# Ajustar el clasificador en el Conjunto de Training
from sklearn.svm import SVC
classifier = SVC(kernel = "rbf", random_state = 0)
classifier.fit(X_train, y_train)
# Predicción de los resultados con el conjunto de Testing
y_pred = classifier.predict(X_test)
# Elaborar una matriz de confusión
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Representación gráfica de los resultados del algoritmo
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Conjunto de Training)')
plt.xlabel('Edad')
plt.ylabel('Sueldo Estimado')
plt.legend()
plt.show()
# Representación gráfica de los resultados del algoritmo
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Conjunto de Test)')
plt.xlabel('Edad')
plt.ylabel('Sueldo Estimado')
plt.legend()
plt.show()
| 37.905882
| 105
| 0.687772
|
777baa03dfbf487772a03c781d797aa83f082913
| 3,781
|
py
|
Python
|
scripts/gen_tree_json.py
|
pennykamp/bonsai
|
c7066b7dca3f29e5f944ffb14337b519191972c6
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 25
|
2019-04-05T21:18:16.000Z
|
2021-09-03T04:55:19.000Z
|
scripts/gen_tree_json.py
|
pennykamp/bonsai
|
c7066b7dca3f29e5f944ffb14337b519191972c6
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-07-03T07:42:09.000Z
|
2021-11-19T15:41:11.000Z
|
scripts/gen_tree_json.py
|
pennykamp/bonsai
|
c7066b7dca3f29e5f944ffb14337b519191972c6
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5
|
2019-09-09T01:19:22.000Z
|
2021-06-28T19:49:59.000Z
|
import json
import numpy as np
import networkx as nx
def parse_model(tree_path, bonsai_format=True):
"""return a tree in networkx.DiGraph"""
READ_IS_LEAF = 'is_leaf'
READ_CHILDREN = 'children'
READ_DEPTH = 'depth'
READ_LABELS = 'labels'
READ_FEATURE_DIM = 'feature_dim'
READ_FEATURE_COL = 'feature_col'
with open(tree_path, 'r') as f:
g = nx.DiGraph()
# skip |X| and |Y|
for l in range(2):
f.readline()
num_nodes = int(f.readline().strip())
g.add_nodes_from(np.arange(num_nodes))
state = READ_IS_LEAF
current_node = -1
for l in f:
l = l.strip()
if state == READ_IS_LEAF:
inner_column_index = -1
current_node += 1
is_leaf = bool(int(l))
g.node[current_node]['is_leaf'] = is_leaf
print('current node', current_node)
print('is leaf', is_leaf)
if is_leaf and bonsai_format:
state = READ_DEPTH
else:
state = READ_CHILDREN
elif state == READ_CHILDREN:
children = map(int, l.split())
for child in children:
if not g.node[current_node]['is_leaf']:
g.add_edge(current_node, child)
state = READ_DEPTH
print('children', list(g[current_node].keys()))
elif state == READ_DEPTH:
g.node[current_node]['depth'] = int(l)
print('depth', int(l))
state = READ_LABELS
elif state == READ_LABELS:
segs = l.split()
num_labels, labels = int(segs[0]), list(map(int, segs[1:]))
assert len(labels) == num_labels
g.node[current_node]['labels'] = labels
g.node[current_node]['num-labels'] = len(labels)
print('#labels', len(labels))
state = READ_FEATURE_DIM
elif state == READ_FEATURE_DIM:
n_feature_cols, _ = map(int, l.split())
state = READ_FEATURE_COL
print('n_feature_cols', n_feature_cols)
elif state == READ_FEATURE_COL:
# skip feature for now
inner_column_index += 1
if inner_column_index == (n_feature_cols - 1):
state = READ_IS_LEAF
print('to new node')
else:
state = READ_FEATURE_COL
print('is feature col, skip')
assert num_nodes == g.number_of_nodes(), '{} != {}'.format(
num_nodes, g.number_of_nodes())
assert g.number_of_edges() == (num_nodes - 1), '{} != {}'.format(
g.number_of_edges(), (num_nodes - 1))
assert len(list(nx.weakly_connected_components(g))) == 1
return g
def gen_nested_tree(g):
def aux(n):
"""return a list of child nodes"""
# print('at node ', n, g.node[n])
if g.node[n]['is_leaf']:
return {'name': n, 'value': g.node[n]['num-labels']}
else:
return {
'name': n,
'children': [aux(c) for c in g[n].keys()]
}
return aux(0)
if True:
input_path = '/home/xiaoh1/code/parabel-v2-old/sandbox/results/eurlex/model/0.tree'
output_path = '../outputs/tree_json/parabel.json'
else:
input_path = '/scratch/cs/xml/bonsai-model/eurlex/d3-rid1/0.tree'
output_path = '../outputs/tree_json/eurlex-d3.json'
print('input ', input_path)
g = parse_model(input_path, bonsai_format=False)
d = gen_nested_tree(g)
print('output to ', output_path)
json.dump(d, open(output_path, 'w'))
| 35.009259
| 87
| 0.526316
|
025834705b31d16cd9d5b40f9d6fae156d884153
| 1,827
|
py
|
Python
|
generator/setup.py
|
romulobusatto/google-api-php-client-services
|
7f3d938a1e4b364afa633b5ba13a0d3c9bc156bf
|
[
"Apache-2.0"
] | 709
|
2018-09-13T01:13:59.000Z
|
2022-03-31T10:28:41.000Z
|
generator/setup.py
|
romulobusatto/google-api-php-client-services
|
7f3d938a1e4b364afa633b5ba13a0d3c9bc156bf
|
[
"Apache-2.0"
] | 188
|
2018-09-14T23:14:15.000Z
|
2022-03-29T22:02:40.000Z
|
generator/setup.py
|
romulobusatto/google-api-php-client-services
|
7f3d938a1e4b364afa633b5ba13a0d3c9bc156bf
|
[
"Apache-2.0"
] | 194
|
2016-04-16T22:29:22.000Z
|
2018-09-03T07:31:55.000Z
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Google APIs Client Generator."""
from setuptools import find_packages # pylint:disable=g-import-not-at-top
from setuptools import setup
setup(
name='google-apis-client-generator',
version='1.4.2',
description='Google Apis Client Generator',
long_description=open('README.md').read(),
author='Tony Aiuto',
author_email='aiuto@google.com',
maintainer='Brent Shaffer',
maintainer_email='betterbrent@google.com',
url='https://github.com/googleapis/google-api-php-client-services/',
packages=find_packages('src'),
package_dir={'': 'src'},
entry_points={
'console_scripts': [
('generate_library = '
'googleapis.codegen.script_stubs:RunGenerateLibrary'),
('expand_templates = '
'googleapis.codegen.script_stubs:RunExpandTemplates')
]},
include_package_data=True,
# TODO(user): Any 1.8 version is OK, but django<1.9 seems to do the wrong
# thing. It installs version 1.9rc1, which is not compatible.
install_requires=['django==1.8.12',
'httplib2',
'google-apputils',
'python-gflags'],
zip_safe=False)
| 38.0625
| 77
| 0.681445
|
e556e85cb69c29428776c4ffdbac9c3dd1277c23
| 1,435
|
py
|
Python
|
kfusiontables/kft/utils/check.py
|
kula1922/kfusiontables
|
149ddaddb95319a237bb94525db17b1b3a5ac66f
|
[
"BSD-3-Clause"
] | 4
|
2016-04-10T10:27:36.000Z
|
2018-10-12T13:45:25.000Z
|
kfusiontables/kft/utils/check.py
|
kula1922/kfusiontables
|
149ddaddb95319a237bb94525db17b1b3a5ac66f
|
[
"BSD-3-Clause"
] | 2
|
2020-06-05T17:30:32.000Z
|
2021-06-01T21:52:49.000Z
|
kfusiontables/kft/utils/check.py
|
kula1922/kfusiontables
|
149ddaddb95319a237bb94525db17b1b3a5ac66f
|
[
"BSD-3-Clause"
] | null | null | null |
from kfusiontables.kft.exceptions import (
IncorrectParametersNumberException
)
from kfusiontables.models import TableMap
from kfusiontables.kft.utils.convert import (
get_model_from_table_name,
get_model_from_table_id
)
def _fusiontable_table_exist(sender):
"""
Check from sender if fusiontables table exist.
"""
try:
table_map = TableMap.objects.get(
table_name="{0};{1}".format(
sender._meta.app_label,
sender._meta.model_name
)
)
if table_map.ft_id:
return True
except TableMap.DoesNotExist:
pass
return False
def fusiontable_table_exist(table_name=None, table_id=None, sender=None):
"""
Check if fusiontables table exist.
"""
params_count = len(
list(filter(lambda x: x, [table_name, table_id, sender]))
)
if 0 > params_count > 1:
raise IncorrectParametersNumberException(
"Fuction 'table_exist' got {0} but can take only 1.".format(
params_count
)
)
if table_name:
sender = get_model_from_table_name(table_name)
if table_id:
sender = get_model_from_table_id(table_id)
return _fusiontable_table_exist(sender)
def is_fusiontablesync(model):
"""
Check if mode is fusiontables syncable.
"""
return True if getattr(model, '_fusiontablesync', False) else False
| 26.574074
| 73
| 0.64669
|
92d0b604b0363b3b3c6bb6ad93cd1f56f0ee7504
| 15,460
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_profile_http_compression.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_profile_http_compression.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_profile_http_compression.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_profile_http_compression
short_description: Manage HTTP compression profiles on a BIG-IP
description:
- Manage HTTP compression profiles on a BIG-IP device.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the compression profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(httpcompression) profile.
type: str
description:
description:
- Description of the HTTP compression profile.
type: str
buffer_size:
description:
- Maximum number of compressed bytes the system buffers before inserting
a Content-Length header (which specifies the compressed size) into the response.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: int
gzip_level:
description:
- Specifies the degree to which the system compresses the content.
- Higher compression levels cause the compression process to be slower.
- Valid values are between 1 (least compression and fastest) to 9 (most
compression and slowest).
type: int
choices:
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
gzip_memory_level:
description:
- Number of kilobytes of memory the system uses for internal compression
buffers when compressing a server response.
type: int
choices:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
gzip_window_size:
description:
- Number of kilobytes in the window size the system uses when compressing
a server response.
type: int
choices:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create an HTTP compression profile
bigip_profile_http_compression:
name: profile1
description: Custom HTTP Compression Profile
buffer_size: 131072
gzip_level: 6
gzip_memory_level: 16k
gzip_window_size: 64k
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the resource.
returned: changed
type: str
sample: My custom profile
buffer_size:
description: The new buffer size of the profile.
returned: changed
type: int
sample: 4096
gzip_memory_level:
description: The new GZIP memory level of the profile, in KB.
returned: changed
type: int
sample: 16
gzip_level:
description: The new GZIP level of the profile. Smaller is less compression.
returned: changed
type: int
sample: 2
gzip_window_size:
description: The new GZIP window size of the profile, in KB.
returned: changed
type: int
sample: 64
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'bufferSize': 'buffer_size',
'defaultsFrom': 'parent',
'gzipMemoryLevel': 'gzip_memory_level',
'gzipLevel': 'gzip_level',
'gzipWindowSize': 'gzip_window_size',
}
api_attributes = [
'description',
'bufferSize',
'defaultsFrom',
'gzipMemoryLevel',
'gzipLevel',
'gzipWindowSize',
]
returnables = [
'description',
'buffer_size',
'gzip_memory_level',
'gzip_level',
'gzip_window_size',
]
updatables = [
'description',
'buffer_size',
'gzip_memory_level',
'gzip_level',
'gzip_window_size',
'parent',
]
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
@property
def gzip_memory_level(self):
if self._values['gzip_memory_level'] is None:
return None
return self._values['gzip_memory_level'] / 1024
@property
def gzip_window_size(self):
if self._values['gzip_window_size'] is None:
return None
return self._values['gzip_window_size'] / 1024
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def gzip_memory_level(self):
if self._values['gzip_memory_level'] is None:
return None
return self._values['gzip_memory_level'] * 1024
@property
def gzip_window_size(self):
if self._values['gzip_window_size'] is None:
return None
return self._values['gzip_window_size'] * 1024
class ReportableChanges(Changes):
@property
def gzip_memory_level(self):
if self._values['gzip_memory_level'] is None:
return None
return self._values['gzip_memory_level'] / 1024
@property
def gzip_window_size(self):
if self._values['gzip_window_size'] is None:
return None
return self._values['gzip_window_size'] / 1024
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
raise F5ModuleError(resp.content)
def read_current_from_device(self): # lgtm [py/similar-function]
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
buffer_size=dict(type='int'),
description=dict(),
gzip_level=dict(
type='int',
choices=[1, 2, 3, 4, 5, 6, 7, 8, 9]
),
gzip_memory_level=dict(
type='int',
choices=[1, 2, 4, 8, 16, 32, 64, 128, 256]
),
gzip_window_size=dict(
type='int',
choices=[1, 2, 4, 8, 16, 32, 64, 128]
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 28.682746
| 94
| 0.596119
|
760224087599bfbd0355f9ce4a0a92928490e136
| 3,693
|
py
|
Python
|
autotest/alg/reproject.py
|
dtusk/gdal1
|
30dcdc1eccbca2331674f6421f1c5013807da609
|
[
"MIT"
] | 3
|
2017-01-12T10:18:56.000Z
|
2020-03-21T16:42:55.000Z
|
autotest/alg/reproject.py
|
ShinNoNoir/gdal-1.11.5-vs2015
|
5d544e176a4c11f9bcd12a0fe66f97fd157824e6
|
[
"MIT"
] | null | null | null |
autotest/alg/reproject.py
|
ShinNoNoir/gdal-1.11.5-vs2015
|
5d544e176a4c11f9bcd12a0fe66f97fd157824e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test ReprojectImage() algorithm.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal, gdalconst, osr
###############################################################################
# Test a trivial case.
def reproject_1():
try:
x = gdal.ReprojectImage
except:
return 'skip'
drv = gdal.GetDriverByName( 'GTiff' )
src_ds = gdal.Open('../gcore/data/byte.tif')
dst_ds = drv.Create('tmp/byte.tif', src_ds.RasterXSize, src_ds.RasterYSize, gdal.GDT_Byte )
dst_ds.SetProjection(src_ds.GetProjectionRef())
dst_ds.SetGeoTransform(src_ds.GetGeoTransform())
gdal.ReprojectImage( src_ds, dst_ds)
cs_expected = src_ds.GetRasterBand(1).Checksum()
cs = dst_ds.GetRasterBand(1).Checksum()
dst_ds = None
drv.Delete( 'tmp/byte.tif' )
if cs != cs_expected:
print('Got: ', cs)
gdaltest.post_reason( 'got wrong checksum' )
return 'fail'
else:
return 'success'
###############################################################################
# Test a real reprojection case.
def reproject_2():
try:
x = gdal.ReprojectImage
except:
return 'skip'
sr = osr.SpatialReference()
sr.ImportFromEPSG(32611)
sr2 = osr.SpatialReference()
sr2.ImportFromEPSG(4326)
drv = gdal.GetDriverByName( 'GTiff' )
src_ds = gdal.Open('../gcore/data/byte.tif')
dst_ds = drv.Create('tmp/byte_4326.tif', 22, 18, gdal.GDT_Byte )
dst_ds.SetGeoTransform([-117.641169915168746,0.000598105625684,0,33.900668703925191,0,-0.000598105625684])
gdal.ReprojectImage( src_ds, dst_ds, sr.ExportToWkt(), sr2.ExportToWkt())
cs_expected = 4727
cs = dst_ds.GetRasterBand(1).Checksum()
dst_ds = None
drv.Delete( 'tmp/byte_4326.tif' )
if cs != cs_expected:
print('Got: ', cs)
gdaltest.post_reason( 'got wrong checksum' )
return 'fail'
else:
return 'success'
gdaltest_list = [
reproject_1,
reproject_2
]
if __name__ == '__main__':
gdaltest.setup_run( 'reproject' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 29.544
| 110
| 0.619551
|
96b7daeb9dc5c1cf30b724d6aed61ded3b57c196
| 327
|
py
|
Python
|
models.py
|
newnativeabq/airquality-dash
|
3108887a8195e0c473ba2194ff012637ba8ce1de
|
[
"MIT"
] | null | null | null |
models.py
|
newnativeabq/airquality-dash
|
3108887a8195e0c473ba2194ff012637ba8ce1de
|
[
"MIT"
] | null | null | null |
models.py
|
newnativeabq/airquality-dash
|
3108887a8195e0c473ba2194ff012637ba8ce1de
|
[
"MIT"
] | null | null | null |
"""SQLAlchemy models for TwitOff"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return '<Record {}'.format(self.id)
| 23.357143
| 48
| 0.685015
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.