hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a183e429ab2df0bcb4079f035e2dd6d3cb6737a5
| 3,402
|
py
|
Python
|
angr_ctf/solutions/06_angr_symbolic_dynamic_memory.py
|
Hamz-a/angr_playground
|
8216f43bd2ec9a91c796a56bab610b119f8311cf
|
[
"MIT"
] | null | null | null |
angr_ctf/solutions/06_angr_symbolic_dynamic_memory.py
|
Hamz-a/angr_playground
|
8216f43bd2ec9a91c796a56bab610b119f8311cf
|
[
"MIT"
] | null | null | null |
angr_ctf/solutions/06_angr_symbolic_dynamic_memory.py
|
Hamz-a/angr_playground
|
8216f43bd2ec9a91c796a56bab610b119f8311cf
|
[
"MIT"
] | null | null | null |
import angr
import claripy
path_to_bin = "../binaries/06_angr_symbolic_dynamic_memory"
# Find callback
def good_job(state):
# Get the output of the state
stdout = state.posix.dumps(1)
# If the program echo'ed "Good Job." then we've found a good state
return "Good Job." in str(stdout)
# Avoid callback
def try_again(state):
# Get the output of the state
stdout = state.posix.dumps(1)
# If the program echo'ed "Try again." then we found a state that we want to avoid
return "Try again." in str(stdout)
# Create an angr project
project = angr.Project(path_to_bin)
# Create the begin state starting from address 0x08048699 (see r2 output bellow)
# $ r2 -A 06_angr_symbolic_dynamic_memory
# [0x08048490]> pdf @main
# ┌ (fcn) main 395
# │ main (int argc, char **argv, char **envp);
# │ <REDACTED>
# │ 0x08048664 e8e7fdffff call sym.imp.memset ; void *memset(void *s, int c, size_t n)
# │ 0x08048669 83c410 add esp, 0x10
# │ 0x0804866c 83ec0c sub esp, 0xc
# │ 0x0804866f 682e880408 push str.Enter_the_password: ; 0x804882e ; "Enter the password: " ; const char *format
# │ 0x08048674 e877fdffff call sym.imp.printf ; int printf(const char *format)
# │ 0x08048679 83c410 add esp, 0x10
# │ 0x0804867c 8b15acc8bc0a mov edx, dword [obj.buffer1] ; [0xabcc8ac:4]=0
# │ 0x08048682 a1a4c8bc0a mov eax, dword [obj.buffer0] ; [0xabcc8a4:4]=0
# │ 0x08048687 83ec04 sub esp, 4
# │ 0x0804868a 52 push edx
# │ 0x0804868b 50 push eax
# │ 0x0804868c 6843880408 push str.8s__8s ; 0x8048843 ; "%8s %8s" ; const char *format
# │ 0x08048691 e8cafdffff call sym.imp.__isoc99_scanf ; int scanf(const char *format)
# │ 0x08048696 83c410 add esp, 0x10
# │ 0x08048699 c745f4000000. mov dword [local_ch], 0 ; <<< START HERE
# │ ┌─< 0x080486a0 eb64 jmp 0x8048706
entry_state = project.factory.blank_state(addr=0x08048699)
# Create a Symbolic BitVectors for each part of the password (64 bits per part %8s is used in scanf)
password_part0 = claripy.BVS("password_part0", 64)
password_part1 = claripy.BVS("password_part1", 64)
# Setup some heap space
entry_state.memory.store(0xabcc8a4, 0x4000000, endness=project.arch.memory_endness)
entry_state.memory.store(0xabcc8ac, 0x4000A00, endness=project.arch.memory_endness)
# Use the created heap and inject BVS
entry_state.memory.store(0x4000000, password_part0)
entry_state.memory.store(0x4000A00, password_part1)
# Create a simulation manager
simulation_manager = project.factory.simulation_manager(entry_state)
# Pass callbacks for states that we should find and avoid
simulation_manager.explore(avoid=try_again, find=good_job)
# If simulation manager has found a state
if simulation_manager.found:
found_state = simulation_manager.found[0]
# Get flag by solving the symbolic values using the found path
solution0 = found_state.solver.eval(password_part0, cast_to=bytes)
solution1 = found_state.solver.eval(password_part1, cast_to=bytes)
print("{} {}".format(solution0.decode("utf-8"), solution1.decode("utf-8")))
else:
print("No path found...")
| 44.763158
| 131
| 0.663727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,309
| 0.670441
|
a1841c43709e67515946480883952c56edc55654
| 57
|
py
|
Python
|
run.py
|
JonLMyers/MetroTransitAPI
|
d8f467570368cd563d69564b680cfdd47ad6b622
|
[
"MIT"
] | null | null | null |
run.py
|
JonLMyers/MetroTransitAPI
|
d8f467570368cd563d69564b680cfdd47ad6b622
|
[
"MIT"
] | null | null | null |
run.py
|
JonLMyers/MetroTransitAPI
|
d8f467570368cd563d69564b680cfdd47ad6b622
|
[
"MIT"
] | null | null | null |
""" Runs the server """
from aaxus import app
app.run()
| 11.4
| 23
| 0.649123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.403509
|
a1856d81103436f6d6bff2bf0852aa835858a675
| 1,416
|
py
|
Python
|
ConjugateGardient_Python.py
|
rohitj559/HPC_MPI-project
|
2b8abe5044d0e8a5a607f7d534a41bb97174e165
|
[
"MIT"
] | null | null | null |
ConjugateGardient_Python.py
|
rohitj559/HPC_MPI-project
|
2b8abe5044d0e8a5a607f7d534a41bb97174e165
|
[
"MIT"
] | null | null | null |
ConjugateGardient_Python.py
|
rohitj559/HPC_MPI-project
|
2b8abe5044d0e8a5a607f7d534a41bb97174e165
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 20:36:02 2018
@author: Rohit
"""
# =============================================================================
# import numpy as np
# a = np.array([5,4])[np.newaxis]
# print(a)
# print(a.T)
#
# function [x] = conjgrad(A, b, x)
# r = b - A * x;
# p = r;
# rsold = r' * r;
#
# for i = 1:length(b)
# Ap = A * p;
# alpha = rsold / (p' * Ap);
# x = x + alpha * p;
# r = r - alpha * Ap;
# rsnew = r' * r;
# if sqrt(rsnew) < 1e-10
# break;
# end
# p = r + (rsnew / rsold) * p;
# rsold = rsnew;
# end
# end
# =============================================================================
import numpy as np
def ConjGrad(a, b, x):
r = (b - np.dot(a, x));
p = r;
rsold = np.dot(r.T, r);
for i in range(len(b)):
a_p = np.dot(a, p);
alpha = rsold / np.dot(p.T, a_p);
x = x + (alpha * p);
r = r - (alpha * a_p);
rsnew = np.dot(r.T, r);
if (np.sqrt(rsnew) < (10 ** -5)):
break;
p = r + ((rsnew / rsold) * p);
rsold = rsnew;
return p
a = np.array([[3, 2, -1], [2, -1, 1], [-1, 1, -1]]) # 3X3 symmetric matrix
b = (np.array([1, -2, 0])[np.newaxis]).T # 3X1 matrix
x = (np.array([0, 1, 2])[np.newaxis]).T
val = ConjGrad(a, b, x);
print(val)
| 22.125
| 79
| 0.367232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 747
| 0.527542
|
a186a2c3d773bd33d3d6c3ea0aa252bbcefbcff7
| 5,232
|
py
|
Python
|
examples/applications/agritrop-indexing/training_agritrop_baseline.py
|
Ing-David/sentence-transformers
|
4895f2f806d209a41a770e96ba2425aac605497c
|
[
"Apache-2.0"
] | null | null | null |
examples/applications/agritrop-indexing/training_agritrop_baseline.py
|
Ing-David/sentence-transformers
|
4895f2f806d209a41a770e96ba2425aac605497c
|
[
"Apache-2.0"
] | null | null | null |
examples/applications/agritrop-indexing/training_agritrop_baseline.py
|
Ing-David/sentence-transformers
|
4895f2f806d209a41a770e96ba2425aac605497c
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import math
from pathlib import Path
import torch.multiprocessing as mp
import os
from datetime import datetime
import nltk
import pandas as pd
import transformers
from torch import nn
import torch.distributed
from torch._C._distributed_c10d import HashStore
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import InputExampleDocument, BiEncoder
from sentence_transformers import LoggingHandler
from eval_agritrop import create_evaluator
# torch.distributed.init_process_group(backend="nccl",store=HashStore(), world_size=8, rank=0)
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
os.putenv("TOKENIZERS_PARALLELISM", "true")
logger = logging.getLogger(__name__)
#### /print debug information to stdout
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train / evaluate baseline indexing system on abstracts')
parser.add_argument('--dataset', '-d', type=str, nargs=1,
help='Path to the TSV corpus to use', dest='dataset',
default=['datasets/corpus_agritrop_transformers_abstract.tsv'])
parser.add_argument('--save-prefix', '-s', type=str, nargs=1,
help='Prefix for the model save directory', dest='save_prefix',
default=['output/training_agritrop_transformer_baseline-'])
parser.add_argument('--epochs', '-e', type=int, nargs=1, help="The number of epochs (for training)", dest='epochs',
default=[100])
parser.add_argument('--eval', '-l', type=str, nargs=1, help="Load model from directory and evaluate", dest='eval',
default=[])
args = parser.parse_args()
# dataset's path
agritrop_dataset_path = args.dataset[0]
# Define our Cross-Encoder
train_batch_size = 1
num_epochs = args.epochs[0]
load = len(args.eval) > 0
model_save_path = args.save_prefix[0] + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Read Agritrop's dataset
logger.info("Read Agritrop's train dataset")
df_transformer = pd.read_csv(agritrop_dataset_path, sep='\t')
# list sample
train_samples = []
dev_samples = []
test_samples = []
df_document_groups = df_transformer.groupby("doc_ids")
for group in tqdm(df_document_groups):
abstract = group[1]['abstract'].iloc[0]
concept_labels = []
labels = []
for index, row in group[1].iterrows():
split_concept_labels = list(row['sentence2'].split(","))
concate_concept = " ".join(split_concept_labels)
concept_labels.append([concate_concept])
labels.append(int(row['score']))
input_example = InputExampleDocument(document_sentences=[abstract], concept_labels=concept_labels,
labels=labels)
split = group[1]['split'].iloc[0]
if split == 'dev':
dev_samples.append(input_example)
elif split == 'test':
test_samples.append(input_example)
else:
train_samples.append(input_example)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=False, batch_size=train_batch_size)
# print(len(train_dataloader.dataset))
# We use bert-base-cased as base model and set num_labels=1, which predicts a continuous score between 0 and 1
if not load:
logger.info("Training model using 'squeezebert/squeezebert-uncased'...")
model = BiEncoder('squeezebert/squeezebert-uncased', num_labels=1, max_length=512, device="cuda:1",
freeze_transformer=False)
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
# mp.spawn(fit_model, args=(model, train_dataloader,
# None, # evaluator,
# 4, # epochs
# warmup_steps,
# model_save_path,
# True), # use amp
# nprocs=8, join=True)
model.save(model_save_path)
model.fit(train_dataloader=train_dataloader,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path, use_amp=False)
model.save(model_save_path)
else:
load_path = args.eval[0]
logger.info(f"Loading model from {load_path}")
model = BiEncoder(load_path, num_labels=1, max_length=512, device="cpu",
freeze_transformer=False)
logger.info("Evaluating...")
evaluator_dev, evaluator_test = create_evaluator(df_transformer, text_field="abstract", device="cpu")
evaluator_dev(model)
evaluator_test(model)
| 39.938931
| 119
| 0.632072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,654
| 0.316131
|
a18749c6aba22f8c7ec4513c3967c1df5e092f47
| 1,793
|
py
|
Python
|
src/utils/file_manipulation.py
|
SashiniHansika/Relationship-Identifying-Module
|
4a640b68220c7735061cb984a7edccaee322fc33
|
[
"MIT"
] | null | null | null |
src/utils/file_manipulation.py
|
SashiniHansika/Relationship-Identifying-Module
|
4a640b68220c7735061cb984a7edccaee322fc33
|
[
"MIT"
] | null | null | null |
src/utils/file_manipulation.py
|
SashiniHansika/Relationship-Identifying-Module
|
4a640b68220c7735061cb984a7edccaee322fc33
|
[
"MIT"
] | null | null | null |
# open input text scenario
import xml.etree.ElementTree as ET
import os
PATH = "G:\\FYP\\FYP-ER-Relationships-Module\\data"
text_file = open(PATH+"\\input_text.txt", "r")
if text_file.mode == 'r':
# Read the scenario and covert that text file into lowercase
input_text_load = text_file.read()
input_text = input_text_load.lower()
print(input_text)
# Read input XML file
def get_root_of_input_xml():
tree = ET.parse(PATH+"\\input_xml.xml")
root = tree.getroot()
return root
def get_root_of_er_xml():
tree = ET.parse(PATH+'\\first_output.xml')
root = tree.getroot()
print(root)
return root
def remove_files():
if os.path.exists(PATH+"\\first_output.xml"):
os.remove(PATH+"\\first_output.xml")
else:
print('first_output.xml does not exit')
if os.path.exists(PATH+"\\er.csv"):
os.remove(PATH+"\\er.csv")
else:
print('er.csv does not exit')
if os.path.exists(PATH+"\\er.txt"):
os.remove(PATH+"\\er.txt")
else:
print('er.txt does not exit')
if os.path.exists(PATH+"\\output.json"):
os.remove(PATH+"\\output.json")
else:
print('output.json does not exit')
if os.path.exists(PATH+"\\output.xml"):
os.remove(PATH+"\\output.xml")
else:
print('output.xml does not exit')
if os.path.exists(PATH+"\\relation.json"):
os.remove(PATH+"\\relation.json")
else:
print('relation.json does not exit')
if os.path.exists(PATH+"\\relation.xml"):
os.remove(PATH+"\\relation.xml")
else:
print('relation.xml does not exit')
if os.path.exists(PATH+"\\intermediate_text.txt"):
os.remove(PATH+"\\intermediate_text.txt")
else:
print('intermediate_text.txt does not exit')
| 25.614286
| 64
| 0.622421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 689
| 0.384272
|
a187e17bf5a82ceb3711020d4fb1495722b57b3c
| 2,428
|
py
|
Python
|
tests/tensorflow/pruning/test_tensor_processor.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 136
|
2020-06-01T14:03:31.000Z
|
2020-10-28T06:10:50.000Z
|
tests/tensorflow/pruning/test_tensor_processor.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 133
|
2020-05-26T13:48:04.000Z
|
2020-10-28T05:25:55.000Z
|
tests/tensorflow/pruning/test_tensor_processor.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 36
|
2020-05-28T08:18:39.000Z
|
2020-10-27T14:46:58.000Z
|
import pytest
import tensorflow as tf
from nncf.tensorflow.tensor import TFNNCFTensor
from nncf.tensorflow.pruning.tensor_processor import TFNNCFPruningTensorProcessor
@pytest.mark.parametrize('device', ("CPU", 'GPU'))
def test_create_tensor(device):
if not tf.config.list_physical_devices('GPU'):
if device == 'GPU':
pytest.skip('There are no available CUDA devices')
shape = [1, 3, 10, 100]
tensor = TFNNCFPruningTensorProcessor.ones(shape, device)
assert tf.is_tensor(tensor.tensor)
assert tensor.tensor.device.split('/')[-1].split(':')[1] == device
assert list(tensor.tensor.shape) == shape
def test_repeat():
tensor_data = [0., 1.]
repeats = 5
tensor = TFNNCFTensor(tf.Variable(tensor_data))
repeated_tensor = TFNNCFPruningTensorProcessor.repeat(tensor, repeats=repeats)
ref_repeated = []
for val in tensor_data:
for _ in range(repeats):
ref_repeated.append(val)
assert tf.reduce_all(repeated_tensor.tensor == tf.Variable(ref_repeated))
def test_concat():
tensor_data = [0., 1.]
tensors = [TFNNCFTensor(tf.Variable(tensor_data)) for _ in range(3)]
concatenated_tensor = TFNNCFPruningTensorProcessor.concatenate(tensors, axis=0)
assert tf.reduce_all(concatenated_tensor.tensor == tf.Variable(tensor_data * 3))
@pytest.mark.parametrize('all_close', [False, True])
def test_assert_all_close(all_close):
tensor_data = [0., 1.]
tensors = [TFNNCFTensor(tf.Variable(tensor_data)) for _ in range(3)]
if not all_close:
tensors.append(TFNNCFTensor(tf.Variable(tensor_data[::-1])))
with pytest.raises(tf.errors.InvalidArgumentError):
TFNNCFPruningTensorProcessor.assert_allclose(tensors)
else:
TFNNCFPruningTensorProcessor.assert_allclose(tensors)
@pytest.mark.parametrize('all_close', [False, True])
def test_elementwise_mask_propagation(all_close):
tensor_data = [0., 1.]
tensors = [TFNNCFTensor(tf.Variable(tensor_data)) for _ in range(3)]
if not all_close:
tensors.append(TFNNCFTensor(tf.Variable(tensor_data[::-1])))
with pytest.raises(tf.errors.InvalidArgumentError):
TFNNCFPruningTensorProcessor.elementwise_mask_propagation(tensors)
else:
result = TFNNCFPruningTensorProcessor.elementwise_mask_propagation(tensors)
for t in tensors:
tf.debugging.assert_near(result.tensor, t.tensor)
| 38.539683
| 84
| 0.716227
| 0
| 0
| 0
| 0
| 1,563
| 0.64374
| 0
| 0
| 93
| 0.038303
|
a1898d71541edc0c1b30cdf2d00d4add61765cd1
| 4,288
|
py
|
Python
|
src/bot/botstates/TriviaBot.py
|
malmgrens4/TwIOTch
|
a3e05f5fcb5bcd75aba3cf9533ca7c5308e4a2de
|
[
"MIT"
] | null | null | null |
src/bot/botstates/TriviaBot.py
|
malmgrens4/TwIOTch
|
a3e05f5fcb5bcd75aba3cf9533ca7c5308e4a2de
|
[
"MIT"
] | null | null | null |
src/bot/botstates/TriviaBot.py
|
malmgrens4/TwIOTch
|
a3e05f5fcb5bcd75aba3cf9533ca7c5308e4a2de
|
[
"MIT"
] | null | null | null |
from twitchio.dataclasses import Message
from typing import Dict, Callable
from datetime import datetime
from dataclasses import dataclass
from src.bot.gameobservers.Observer import Observer
from src.bot.gameobservers.Subject import Subject
from src.bot.botstates.BotState import BotState
from src.bot.botstates.TeamGameHandler import TeamGameHandler
from src.bot.botstates.DefaultBot import DefaultBot
from src.bot.TeamData import TeamData
@dataclass
class TriviaResponse:
time_to_answer: int = None
answer: str = None
class TriviaBot(TeamGameHandler, BotState, Subject):
def __init__(self, team_data: TeamData, question: str,
options: Dict[str, str], correct_options: [str], send_message: Callable[[str], None]):
super().__init__(team_data=team_data)
self.question = question
self.options = options
self.correct_options = correct_options
self._observers = []
self.won = False
self.winning_team_ids = []
self.team_data = team_data
self.send_message = send_message
self.game_start_time = datetime.utcnow()
"""
Contains teams answers (a list of teams maps:
map containing the user and their answer)
[{user_id: answer}]
"""
self.team_responses: [Dict[int, TriviaResponse]] = None
@property
def observers(self) -> None:
return self._observers
def attach(self, observer: Observer) -> None:
self._observers.append(observer)
def detach(self, observer: Observer) -> None:
self._observers.remove(observer)
async def notify(self) -> None:
for observer in self._observers:
await observer.update(self)
async def game_start(self):
self.team_responses = [{} for _ in range(self.team_data.num_teams)]
await super().game_start()
await self.notify()
async def handle_event_message(self, msg: Message) -> None:
"""
Process incoming user message in trivia state
"""
if not self.game_started:
return
team_id = self.team_data.teams.get(msg.author.id)
if team_id is None:
await self.team_data.handle_join(msg)
team_id = self.team_data.teams.get(msg.author.id)
if msg.author.id in self.team_responses[team_id]:
return
user_input = msg.content.lower()
if user_input in self.options:
time_elapsed = int((datetime.utcnow() - self.game_start_time).total_seconds() * 1000)
self.team_responses[team_id][msg.author.id] = TriviaResponse(time_to_answer=time_elapsed, answer=user_input)
# every user that joined has answered so end the game
if sum([len(responses.values()) for responses in self.team_responses]) == len(self.team_data.teams):
await self.end_game()
return
await self.notify()
def get_tally(self):
"""
:return: Dict {team_id: percentage_right (float)}
"""
team_weights: [float] = [0 for _ in range(self.team_data.num_teams)]
for i, responses in enumerate(self.team_responses):
team_responses: [TriviaResponse] = list(responses.values())
if len(responses) == 0:
team_weights[i] = 0
continue
num_correct_responses: int = len([response for response in team_responses
if response.answer in self.correct_options])
team_weights[i] = num_correct_responses / len(responses)
return team_weights
async def end_game(self):
"""
Talley results and determine a winner
"""
team_weights = self.get_tally()
winning_team_ids = [i for i, team_weight in enumerate(team_weights) if team_weight == max(team_weights)]
if max(team_weights) == 0:
winning_team_ids = []
await self.win(winning_team_ids)
async def win(self, winning_team_ids: int):
self.won = True
self.winning_team_ids = winning_team_ids
self.context.transition_to(DefaultBot())
await self.notify()
async def can_join(self, msg: Message) -> bool:
return not self.won
| 34.304
| 120
| 0.639459
| 3,829
| 0.892957
| 0
| 0
| 159
| 0.03708
| 1,942
| 0.452892
| 417
| 0.097248
|
a189a8ce0239f76496cb3c604a52bf52c941ff4e
| 515
|
py
|
Python
|
playing1.py
|
bert386/rpi-monitor-cam-led
|
d333a8313500be8150e59462df5482b307eb368d
|
[
"Apache-2.0"
] | null | null | null |
playing1.py
|
bert386/rpi-monitor-cam-led
|
d333a8313500be8150e59462df5482b307eb368d
|
[
"Apache-2.0"
] | null | null | null |
playing1.py
|
bert386/rpi-monitor-cam-led
|
d333a8313500be8150e59462df5482b307eb368d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Description:
Todo:
"""
import os
import sys
import logging
from collections import deque
from base_state import BaseState
class Playing1stState(BaseState):
""""""
def __init__(self, state_controller):
super().__init__(state_controller, self.in_state)
@BaseState.decorator_enter
def on_entered(self):
logging.warning("Track01 started ...")
def in_state(self):
pass
@BaseState.decorator_exit
def on_exited(self):
pass
| 16.612903
| 57
| 0.664078
| 357
| 0.693204
| 0
| 0
| 162
| 0.314563
| 0
| 0
| 81
| 0.157282
|
a189f72cd87554b98dd997143822d60a01facb7a
| 518
|
py
|
Python
|
script/isort.py
|
zhoumjane/devops_backend
|
5567b04a042fd4a449063a96821369396a8d8586
|
[
"MIT"
] | 53
|
2021-07-14T03:11:39.000Z
|
2021-09-23T10:39:14.000Z
|
script/isort.py
|
zhoumjane/devops_backend
|
5567b04a042fd4a449063a96821369396a8d8586
|
[
"MIT"
] | null | null | null |
script/isort.py
|
zhoumjane/devops_backend
|
5567b04a042fd4a449063a96821369396a8d8586
|
[
"MIT"
] | 10
|
2021-07-14T06:29:14.000Z
|
2021-09-23T00:25:35.000Z
|
# -*- coding: utf-8 -*-
import time, random
def isort(i_list):
for i in range(1, len(i_list)):
for j in range(i,0, -1):
if i_list[j] < i_list[j-1]:
i_list[j], i_list[j-1] = i_list[j-1], i_list[j]
else:
break
if __name__ == "__main__":
alist = []
for i in range(50000):
alist.append(random.randint(1, 100))
start_time = time.time()
isort(alist)
end_time = time.time() - start_time
print("cost time: %ss" % (end_time))
| 27.263158
| 63
| 0.530888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.094595
|
a18ab5b8f24fd76985216d02e899cfe490730c02
| 1,903
|
py
|
Python
|
test/test_estim/test_scalarnl.py
|
Ryandry1st/vampyre
|
43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e
|
[
"MIT"
] | 59
|
2017-01-27T22:36:38.000Z
|
2021-12-08T04:16:13.000Z
|
test/test_estim/test_scalarnl.py
|
Ryandry1st/vampyre
|
43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e
|
[
"MIT"
] | 10
|
2017-01-11T15:16:11.000Z
|
2021-02-17T10:43:51.000Z
|
test/test_estim/test_scalarnl.py
|
Ryandry1st/vampyre
|
43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e
|
[
"MIT"
] | 18
|
2017-01-11T14:58:32.000Z
|
2021-05-03T16:34:53.000Z
|
"""
test_relu.py: Test suite for the ReLU estimator class :class:ReLUEstim`
"""
from __future__ import print_function
from __future__ import division
import unittest
import numpy as np
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
def logistic_test(zshape=(100,10), rvar=1, tol=1, verbose=False):
"""
Unit test for the logistic estimator.
Generates random data with a logistic model and then estimates the input
logit :code:`z`.
:param zshape: shape of the data :code:`z`
:param rvar: prior variance on :code:`r`
:param tol: tolerance on estimation error. This should be large since we
are using MAP instead of MMSE estimation so the error variance
is not exact
:param verbose: print results
"""
# Create random data
z = np.random.normal(0,1,zshape)
r = z + np.random.normal(0,np.sqrt(rvar),zshape)
pz = 1/(1+np.exp(-z))
u = np.random.uniform(0,1,zshape)
y = (u < pz)
# Create an estimator
est = vp.estim.LogisticEst(y=y,var_axes='all',max_it=100)
# Run the estimator
zhat, zhatvar = est.est(r,rvar)
# Compare the error
zerr = np.mean((z-zhat)**2)
rel_err = np.maximum( zerr/zhatvar, zhatvar/zerr)-1
fail = (rel_err > tol)
if fail or verbose:
print("Error: Actual: {0:12.4e} Est: {1:12.4e} Rel: {2:12.4e}".format(\
zerr, zhatvar, rel_err))
if fail:
raise vp.common.TestException("Estimation error variance"+\
" does not match predicted value")
class TestCases(unittest.TestCase):
def test_logistic(self):
verbose = False
logistic_test(rvar=0.1, verbose=verbose,tol=0.1)
logistic_test(rvar=10, verbose=verbose,tol=0.5)
if __name__ == '__main__':
unittest.main()
| 29.276923
| 80
| 0.629532
| 217
| 0.11403
| 0
| 0
| 0
| 0
| 0
| 0
| 808
| 0.424593
|
a18aeadaf1c0a497b57a81c26b42e7ee05084e81
| 1,543
|
py
|
Python
|
tests/live/test_client_auth.py
|
denibertovic/stormpath-sdk-python
|
e594a1bb48de3fa8eff26558bf4f72bb056e9d00
|
[
"Apache-2.0"
] | null | null | null |
tests/live/test_client_auth.py
|
denibertovic/stormpath-sdk-python
|
e594a1bb48de3fa8eff26558bf4f72bb056e9d00
|
[
"Apache-2.0"
] | null | null | null |
tests/live/test_client_auth.py
|
denibertovic/stormpath-sdk-python
|
e594a1bb48de3fa8eff26558bf4f72bb056e9d00
|
[
"Apache-2.0"
] | null | null | null |
"""Live tests of client authentication against the Stormpath service API."""
from os import environ
from stormpath.client import Client
from stormpath.error import Error
from .base import LiveBase
class TestAuth(LiveBase):
def test_basic_authentication_succeeds(self):
client = Client(
id=self.api_key_id,
secret=self.api_key_secret,
scheme='basic')
# force the SDK to make a call to the server
list(client.applications)
def test_basic_authentication_fails(self):
client = Client(
id=self.api_key_id + 'x',
secret=self.api_key_secret + 'x',
scheme='basic')
# force the SDK to make a call to the server
with self.assertRaises(Error):
list(client.applications)
def test_digest_authentication_succeeds(self):
client = Client(
id=self.api_key_id,
secret=self.api_key_secret,
scheme='SAuthc1')
# force the SDK to make a call to the server
client.applications
def test_digest_authentication_fails(self):
client = Client(
id=self.api_key_id + 'x',
secret=self.api_key_secret + 'x',
scheme='SAuthc1')
# force the SDK to make a call to the server
with self.assertRaises(Error):
list(client.applications)
def test_load_from_environment_variables(self):
client = Client()
for app in client.applications:
self.assertTrue(app.href)
| 29.113208
| 76
| 0.628645
| 1,340
| 0.868438
| 0
| 0
| 0
| 0
| 0
| 0
| 296
| 0.191834
|
a18bdd3e3f40a3f576715555ebb6a8270c24a370
| 256
|
py
|
Python
|
languages/python/software_engineering_logging4.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 136
|
2015-03-06T18:11:21.000Z
|
2022-03-10T22:31:40.000Z
|
languages/python/software_engineering_logging4.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 27
|
2015-01-07T01:38:03.000Z
|
2021-12-22T19:20:15.000Z
|
languages/python/software_engineering_logging4.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1,582
|
2015-01-01T20:37:06.000Z
|
2022-03-30T12:29:24.000Z
|
import logging
logger1 = logging.getLogger('package1.module1')
logger2 = logging.getLogger('package1.module2')
logging.basicConfig(level=logging.WARNING)
logger1.warning('This is a warning message')
logger2.warning('This is a another warning message')
| 23.272727
| 52
| 0.792969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.382813
|
a18c81f3ba8e0a19564872357a93750676c04e10
| 862
|
py
|
Python
|
py/foreman/tests/testdata/test_command/pkg1/build.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | 3
|
2016-01-04T06:28:52.000Z
|
2020-09-20T13:18:40.000Z
|
py/foreman/tests/testdata/test_command/pkg1/build.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
py/foreman/tests/testdata/test_command/pkg1/build.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from foreman import define_parameter, rule, get_relpath
import foreman
if __name__ != 'pkg1':
raise AssertionError(__name__)
if not __file__.endswith('foreman/tests/testdata/test_command/pkg1/build.py'):
raise AssertionError(__file__)
relpath = get_relpath()
if relpath != Path('pkg1'):
raise AssertionError(relpath)
define_parameter('par1').with_derive(lambda ps: get_relpath())
@rule
@rule.depend('//pkg1/pkg2:rule2')
def rule1(parameters):
relpath = get_relpath()
if relpath != Path('pkg1'):
raise AssertionError(relpath)
par1 = parameters['par1']
if par1 != Path('pkg1'):
raise AssertionError(par1)
par2 = parameters['//pkg1/pkg2:par2']
if par2 != Path('pkg1/pkg2'):
raise AssertionError(par2)
# test_build() will check this
foreman._test_ran = True
| 21.02439
| 78
| 0.691415
| 0
| 0
| 0
| 0
| 433
| 0.50232
| 0
| 0
| 165
| 0.191415
|
a18d2404f6cd1284bac337bd359599e5974dbe24
| 11,036
|
py
|
Python
|
python/pyarrow/tests/test_dataset.py
|
maxburke/arrow
|
344ed4bed675c4913db5cc7b17d0e6cc57ea55c4
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_dataset.py
|
maxburke/arrow
|
344ed4bed675c4913db5cc7b17d0e6cc57ea55c4
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_dataset.py
|
maxburke/arrow
|
344ed4bed675c4913db5cc7b17d0e6cc57ea55c4
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pyarrow as pa
import pyarrow.fs as fs
try:
import pyarrow.dataset as ds
except ImportError:
ds = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not dataset'
pytestmark = pytest.mark.dataset
@pytest.fixture
@pytest.mark.parquet
def mockfs():
import pyarrow.parquet as pq
mockfs = fs._MockFileSystem()
data = [
list(range(5)),
list(map(float, range(5)))
]
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
batch = pa.record_batch(data, schema=schema)
table = pa.Table.from_batches([batch])
directories = [
'subdir/1/xxx',
'subdir/2/yyy',
]
for i, directory in enumerate(directories):
path = '{}/file{}.parquet'.format(directory, i)
mockfs.create_dir(directory)
with mockfs.open_output_stream(path) as out:
pq.write_table(table, out)
return mockfs
@pytest.fixture
def dataset(mockfs):
format = ds.ParquetFileFormat()
selector = fs.FileSelector('subdir', recursive=True)
options = ds.FileSystemDiscoveryOptions('subdir')
discovery = ds.FileSystemDataSourceDiscovery(mockfs, selector, format,
options)
discovery.partition_scheme = ds.SchemaPartitionScheme(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
source = discovery.finish()
schema = discovery.inspect()
return ds.Dataset([source], schema)
def test_filesystem_data_source(mockfs):
file_format = ds.ParquetFileFormat()
paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']
partitions = [ds.ScalarExpression(True), ds.ScalarExpression(True)]
source = ds.FileSystemDataSource(mockfs, paths, partitions,
source_partition=None,
file_format=file_format)
source_partition = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('source'),
ds.ScalarExpression(1337)
)
partitions = [
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('part'),
ds.ScalarExpression(1)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('part'),
ds.ScalarExpression(2)
)
]
source = ds.FileSystemDataSource(mockfs, paths, partitions,
source_partition=source_partition,
file_format=file_format)
assert source.partition_expression.equals(source_partition)
def test_dataset(dataset):
assert isinstance(dataset, ds.Dataset)
assert isinstance(dataset.schema, pa.Schema)
# TODO(kszucs): test non-boolean expressions for filter do raise
builder = dataset.new_scan()
assert isinstance(builder, ds.ScannerBuilder)
scanner = builder.finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for task in scanner.scan():
assert isinstance(task, ds.ScanTask)
for batch in task.execute():
assert batch.column(0).equals(expected_i64)
assert batch.column(1).equals(expected_f64)
table = scanner.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
condition = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(1)
)
scanner = dataset.new_scan().use_threads(True).filter(condition).finish()
result = scanner.to_table()
assert result.to_pydict() == {
'i64': [1, 1],
'f64': [1., 1.],
'group': [1, 2],
'key': ['xxx', 'yyy']
}
def test_scanner_builder(dataset):
builder = ds.ScannerBuilder(dataset, memory_pool=pa.default_memory_pool())
scanner = builder.finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
with pytest.raises(pa.ArrowInvalid):
dataset.new_scan().project(['unknown'])
builder = dataset.new_scan(memory_pool=pa.default_memory_pool())
scanner = builder.project(['i64']).finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
for task in scanner.scan():
for batch in task.execute():
assert batch.num_columns == 1
def test_abstract_classes():
classes = [
ds.FileFormat,
ds.Scanner,
ds.DataSource,
ds.Expression,
ds.PartitionScheme,
]
for klass in classes:
with pytest.raises(TypeError):
klass()
def test_partition_scheme():
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
for klass in [ds.SchemaPartitionScheme, ds.HivePartitionScheme]:
scheme = klass(schema)
assert isinstance(scheme, ds.PartitionScheme)
scheme = ds.SchemaPartitionScheme(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.float64())
])
)
expr = scheme.parse('/3/3.14')
assert isinstance(expr, ds.Expression)
expected = ds.AndExpression(
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('group'),
ds.ScalarExpression(3)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('key'),
ds.ScalarExpression(3.14)
)
)
assert expr.equals(expected)
with pytest.raises(pa.ArrowInvalid):
scheme.parse('/prefix/3/aaa')
scheme = ds.HivePartitionScheme(
pa.schema([
pa.field('alpha', pa.int64()),
pa.field('beta', pa.int64())
])
)
expr = scheme.parse('/alpha=0/beta=3')
expected = ds.AndExpression(
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('alpha'),
ds.ScalarExpression(0)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('beta'),
ds.ScalarExpression(3)
)
)
assert expr.equals(expected)
def test_expression():
a = ds.ScalarExpression(1)
b = ds.ScalarExpression(1.1)
c = ds.ScalarExpression(True)
equal = ds.ComparisonExpression(ds.CompareOperator.Equal, a, b)
assert equal.op() == ds.CompareOperator.Equal
and_ = ds.AndExpression(a, b)
assert and_.left_operand.equals(a)
assert and_.right_operand.equals(b)
assert and_.equals(ds.AndExpression(a, b))
assert and_.equals(and_)
ds.AndExpression(a, b, c)
ds.OrExpression(a, b)
ds.OrExpression(a, b, c)
ds.NotExpression(ds.OrExpression(a, b, c))
ds.IsValidExpression(a)
ds.CastExpression(a, pa.int32())
ds.CastExpression(a, pa.int32(), safe=True)
ds.InExpression(a, pa.array([1, 2, 3]))
condition = ds.ComparisonExpression(
ds.CompareOperator.Greater,
ds.FieldExpression('i64'),
ds.ScalarExpression(5)
)
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
assert condition.validate(schema) == pa.bool_()
i64_is_5 = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(5)
)
i64_is_7 = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(7)
)
assert condition.assume(i64_is_5).equals(ds.ScalarExpression(False))
assert condition.assume(i64_is_7).equals(ds.ScalarExpression(True))
assert str(condition) == "(i64 > 5:int64)"
@pytest.mark.parametrize('paths_or_selector', [
fs.FileSelector('subdir', recursive=True),
[
'subdir',
'subdir/1',
'subdir/1/xxx',
'subdir/1/xxx/file0.parquet',
'subdir/2',
'subdir/2/yyy',
'subdir/2/yyy/file1.parquet',
]
])
def test_file_system_discovery(mockfs, paths_or_selector):
format = ds.ParquetFileFormat()
options = ds.FileSystemDiscoveryOptions('subdir')
assert options.partition_base_dir == 'subdir'
assert options.ignore_prefixes == ['.', '_']
assert options.exclude_invalid_files is True
discovery = ds.FileSystemDataSourceDiscovery(
mockfs, paths_or_selector, format, options
)
assert isinstance(discovery.inspect(), pa.Schema)
assert isinstance(discovery.inspect_schemas(), list)
assert isinstance(discovery.finish(), ds.FileSystemDataSource)
assert isinstance(discovery.partition_scheme, ds.DefaultPartitionScheme)
assert discovery.root_partition.equals(ds.ScalarExpression(True))
discovery.partition_scheme = ds.SchemaPartitionScheme(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
data_source = discovery.finish()
assert isinstance(data_source, ds.DataSource)
inspected_schema = discovery.inspect()
dataset = ds.Dataset([data_source], inspected_schema)
scanner = dataset.new_scan().finish()
assert len(list(scanner.scan())) == 2
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for task, group, key in zip(scanner.scan(), [1, 2], ['xxx', 'yyy']):
expected_group_column = pa.array([group] * 5, type=pa.int32())
expected_key_column = pa.array([key] * 5, type=pa.string())
for batch in task.execute():
assert batch.num_columns == 4
assert batch[0].equals(expected_i64)
assert batch[1].equals(expected_f64)
assert batch[2].equals(expected_group_column)
assert batch[3].equals(expected_key_column)
table = scanner.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
assert table.num_columns == 4
| 31.175141
| 78
| 0.632476
| 0
| 0
| 0
| 0
| 3,565
| 0.323034
| 0
| 0
| 1,457
| 0.132022
|
a18f308a306f458e03d32285aa21896641d7fc85
| 400
|
py
|
Python
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py
|
zhi-xianwei/learn_python3_spider
|
a3301f8112e4ded25c3578162db8c6a263a0693b
|
[
"MIT"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 44
|
2019-05-27T10:59:29.000Z
|
2022-03-31T14:14:29.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
"""
Transitional module for moving to the w3lib library.
For new code, always import from w3lib.html instead of this module
"""
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from w3lib.html import *
warnings.warn("Module `scrapy.utils.markup` is deprecated. "
"Please import from `w3lib.html` instead.",
ScrapyDeprecationWarning, stacklevel=2)
| 28.571429
| 66
| 0.7375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.54
|
a190762c1566ca65105a3350c21b6933040e5549
| 2,362
|
py
|
Python
|
scripts/option_normal_model.py
|
jcoffi/FuturesAndOptionsTradingSimulation
|
e02fdbe8c40021785a2a1dae56ff4b72f2d47c30
|
[
"MIT"
] | 14
|
2017-02-16T15:13:53.000Z
|
2021-05-26T11:34:09.000Z
|
scripts/option_normal_model.py
|
jcoffi/FuturesAndOptionsTradingSimulation
|
e02fdbe8c40021785a2a1dae56ff4b72f2d47c30
|
[
"MIT"
] | null | null | null |
scripts/option_normal_model.py
|
jcoffi/FuturesAndOptionsTradingSimulation
|
e02fdbe8c40021785a2a1dae56ff4b72f2d47c30
|
[
"MIT"
] | 10
|
2016-08-05T07:37:07.000Z
|
2021-11-26T17:31:48.000Z
|
#IMPORT log and sqrt FROM math MODULE
from math import log, sqrt, exp
#IMPORT date AND timedelta FOR HANDLING EXPIRY TIMES
from datetime import date, timedelta
#IMPORT SciPy stats MODULE
from scipy import stats
def asian_vol_factor(valDate,startDate,endDate):
#VALIDATE START DATE RELATIVE TO END DATE AND RETURN NO IMPACT IF ODD
if startDate > endDate: return 1
T = (endDate - valDate).days()
L = (endDate - startDate).days()
if days_to_expiry > avg_period_length:
return sqrt(((T - L + 1) * L ** 2 + L * (L - 1) * (2 * L - 1) / 6) / (L ** 2 * T))
else:
return sqrt((T + 1) * (2*T + 1) / (6 * L ** 2))
def F(z):
return (1/sqrt(2*pi)) * exp(-(z ** 2) / 2)
def option_price_normal(forward,strike,vol,rate,tenor,sign):
if vol == 0:
return sign * (forward - strike)
#sign = +1 for calls and -1 for puts
d1 = (forward - strike) / (vol * sqrt(tenor))
sameTerm = (vol * sqrt(tenor) * exp(-1*d1*d1/2)) / sqrt(2*3.141592653589793)
return exp(-1 * rate * tenor) * (sign * (forward - strike) * stats.norm.cdf(sign * d1) + sameTerm)
def option_price_normal(forward,strike,vol,rate,tenor,sign):
def option_price_normal(forward,strike,vol,rate,tenor,sign):
def option_price_normal(forward,strike,vol,rate,tenor,sign):
def option_implied_vol_normal(forward,strike,price,rate,tenor,sign):
#print 'imp vol calc:',forward,strike,price,rate,tenor,sign
price_err_limit = price/10000
iteration_limit = 20
vmax = 1.0 #START SEARCH FOR UPPER VOL BOUND AT 100%
tprice = 0
while option_price(forward,strike,vmax,rate,tenor,sign) < price:
vmax += 1
if vmax > iteration_limit: return -1 #ERROR CONDITION
#print 'vmax',vmax
vmin = vmax - 1
vmid = (vmin + vmax)/2
tprice = option_price(forward,strike,vmid,rate,tenor,sign)
count = 1
while abs(tprice - price) > price_err_limit:
if tprice > price:
vmax = vmid
else:
vmin = vmid
vmid = (vmin + vmax)/2
count = count + 1
if count > iteration_limit:
print 'option_implied_vol: search iter limit reached'
print forward,strike,price,rate,tenor,sign
return vmid #EXIT CONDITION
tprice = option_price_normal(forward,strike,vmid,rate,tenor,sign)
#print 'imp_vol = ',vmid
return vmid
| 38.721311
| 104
| 0.640559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 440
| 0.186283
|
a19170892d787db003456b529cd07f4fcdc77170
| 27,286
|
py
|
Python
|
code/tasks/VNLA/oracle.py
|
Chucooleg/vnla
|
b9c1367b263f00a38828ff24cefc8becc149be7a
|
[
"MIT"
] | null | null | null |
code/tasks/VNLA/oracle.py
|
Chucooleg/vnla
|
b9c1367b263f00a38828ff24cefc8becc149be7a
|
[
"MIT"
] | null | null | null |
code/tasks/VNLA/oracle.py
|
Chucooleg/vnla
|
b9c1367b263f00a38828ff24cefc8becc149be7a
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import math
import networkx as nx
import functools
import scipy.stats
import random
import sys
import copy
import numpy as np
import torch
import utils
try:
sys.path.append('/opt/MatterSim/build/') # local docker or Philly
import MatterSim
except:
# local conda env only
sys.path.append('/home/hoyeung/Documents/vnla/code/build')
import MatterSim
class ShortestPathOracle(object):
''' Shortest navigation teacher '''
def __init__(self, agent_nav_actions, env_nav_actions=None):
self.scans = set()
self.graph = {}
self.paths = {}
self.distances = {}
self.agent_nav_actions = agent_nav_actions
if env_nav_actions is not None:
self.env_nav_actions = env_nav_actions
def add_scans(self, scans, path=None):
new_scans = set.difference(scans, self.scans)
if new_scans:
print('Loading navigation graphs for %d scans' % len(new_scans))
for scan in new_scans:
graph, paths, distances = self._compute_shortest_paths(scan, path=path)
self.graph[scan] = graph
self.paths[scan] = paths
self.distances[scan] = distances
self.scans.update(new_scans)
def _compute_shortest_paths(self, scan, path=None):
''' Load connectivity graph for each scan, useful for reasoning about shortest paths '''
graph = utils.load_nav_graphs(scan, path=path)
paths = dict(nx.all_pairs_dijkstra_path(graph))
distances = dict(nx.all_pairs_dijkstra_path_length(graph))
return graph, paths, distances
def _find_nearest_point(self, scan, start_point, end_points):
best_d = 1e9
best_point = None
for end_point in end_points:
d = self.distances[scan][start_point][end_point]
if d < best_d:
best_d = d
best_point = end_point
return best_d, best_point
def _find_nearest_point_on_a_path(self, scan, current_point, start_point, goal_point):
path = self.paths[scan][start_point][goal_point]
return self._find_nearest_point(scan, current_point, path)
def _shortest_path_action(self, ob):
''' Determine next action on the shortest path to goals. '''
scan = ob['scan']
start_point = ob['viewpoint']
# Find nearest goal
_, goal_point = self._find_nearest_point(scan, start_point, ob['goal_viewpoints'])
# Stop if a goal is reached
if start_point == goal_point:
return (0, 0, 0)
path = self.paths[scan][start_point][goal_point]
next_point = path[1]
# Can we see the next viewpoint?
for i, loc in enumerate(ob['navigableLocations']):
if loc.viewpointId == next_point:
# Look directly at the viewpoint before moving
if loc.rel_heading > math.pi/6.0:
return (0, 1, 0) # Turn right
elif loc.rel_heading < -math.pi/6.0:
return (0,-1, 0) # Turn left
elif loc.rel_elevation > math.pi/6.0 and ob['viewIndex'] // 12 < 2:
return (0, 0, 1) # Look up
elif loc.rel_elevation < -math.pi/6.0 and ob['viewIndex'] // 12 > 0:
return (0, 0,-1) # Look down
else:
return (i, 0, 0) # Move
# Can't see it - first neutralize camera elevation
if ob['viewIndex'] // 12 == 0:
return (0, 0, 1) # Look up
elif ob['viewIndex'] // 12 == 2:
return (0, 0,-1) # Look down
# If camera is already neutralized, decide which way to turn
target_rel = self.graph[ob['scan']].node[next_point]['position'] - ob['point'] # state.location.point
# 180deg -
target_heading = math.pi / 2.0 - math.atan2(target_rel[1], target_rel[0])
if target_heading < 0:
target_heading += 2.0 * math.pi
if ob['heading'] > target_heading and ob['heading'] - target_heading < math.pi:
return (0, -1, 0) # Turn left
if target_heading > ob['heading'] and target_heading - ob['heading'] > math.pi:
return (0, -1, 0) # Turn left
return (0, 1, 0) # Turn right
def _map_env_action_to_agent_action(self, action, ob):
ix, heading_chg, elevation_chg = action
if heading_chg > 0:
return self.agent_nav_actions.index('right')
if heading_chg < 0:
return self.agent_nav_actions.index('left')
if elevation_chg > 0:
return self.agent_nav_actions.index('up')
if elevation_chg < 0:
return self.agent_nav_actions.index('down')
if ix > 0:
return self.agent_nav_actions.index('forward')
if ob['ended']:
return self.agent_nav_actions.index('<ignore>')
return self.agent_nav_actions.index('<end>')
def interpret_agent_action(self, action_idx, ob):
'''Translate action index back to env action for simulator to take'''
# If the action is not `forward`, simply map it to the simulator's
# action space
if action_idx != self.agent_nav_actions.index('forward'):
return self.env_nav_actions[action_idx]
# If the action is forward, more complicated
scan = ob['scan']
start_point = ob['viewpoint']
# Find nearest goal view point
_, goal_point = self._find_nearest_point(scan, start_point, ob['goal_viewpoints'])
optimal_path = self.paths[scan][start_point][goal_point]
# If the goal is right in front of us, go to it.
# The dataset guarantees that the goal is always reachable.
if len(optimal_path) < 2:
return (1, 0, 0)
next_optimal_point = optimal_path[1]
# If the next optimal viewpoint is within 30 degrees of
# the center of the view, go to it.
for i, loc in enumerate(ob['navigableLocations']):
if loc.viewpointId == next_optimal_point:
if loc.rel_heading > math.pi/6.0 or loc.rel_heading < -math.pi/6.0 or \
(loc.rel_elevation > math.pi/6.0 and ob['viewIndex'] // 12 < 2) or \
(loc.rel_elevation < -math.pi/6.0 and ob['viewIndex'] // 12 > 0):
continue
else:
return (i, 0, 0)
# Otherwise, go the navigable (seeable) viewpt that has the least angular distance from the center of the current image (viewpt).
return (1, 0, 0)
def __call__(self, obs):
self.actions = list(map(self._shortest_path_action, obs))
return list(map(self._map_env_action_to_agent_action, self.actions, obs))
class FrontierShortestPathsOracle(ShortestPathOracle):
def __init__(self, agent_nav_actions, env_nav_actions=None):
super(FrontierShortestPathsOracle, self).__init__(agent_nav_actions, env_nav_actions)
# self.env_nav_actions = env_nav_actions
self.valid_rotation_action_indices = [self.agent_nav_actions.index(r) for r in ('left', 'right', 'up', 'down', '<ignore>')]
# inherit parent add_scans() function
def interpret_agent_rotations(self, rotation_action_indices, ob):
'''
rotation_action_indices : a list of int action indices
Returns:
list of fixed length agent.max_macro_action_seq_len (e.g. 8)
e.g. [(0, 1, 0), (0, 1, -1), ..... (0,0,0)]
e.g. [(0,0,0), ... (0,0,0)] if ob has ended.
'''
max_macro_action_seq_len = len(rotation_action_indices)
# [(0,0,0)] * 8
macro_rotations = [self.env_nav_actions[self.agent_nav_actions.index('<ignore>')]] * max_macro_action_seq_len
if not ob['ended']:
for i, action_idx in enumerate(rotation_action_indices):
assert action_idx in self.valid_rotation_action_indices
macro_rotations[i] = self.env_nav_actions[action_idx]
return macro_rotations
def interpret_agent_forward(self, ob):
'''
Returns:
(0, 0, 0) to ignore if trajectory has already ended
or
(1, 0, 0) to step forward to the direct facing vertex
'''
if ob['ended']:
return self.env_nav_actions[self.agent_nav_actions.index('<ignore>')]
else:
return self.env_nav_actions[self.agent_nav_actions.index('forward')]
def make_explore_instructions(self, obs):
'''
Make env level rotation instructions of each ob to explore its own panoramic sphere. The output should be informative enough for agent to collect information from all 36 facets of its panoramic sphere.
Returns:
heading_adjusts: list len=batch_size, each an env action tuple.
elevation_adjusts_1: same.
elevation_adjusts_2: list len=batch_size, each either a single action tuple e.g.(0,1,0), or double action tuple e.g.((0,0,-1), (0,0,-1)).
'''
batch_size = len(obs)
# How agent explore the entire pano sphere
# Right*11, Up/Down, Right*11, Up/Down (*2), Right*11
heading_adjusts = [()] * batch_size
elevation_adjusts_1 = [()] * batch_size
elevation_adjusts_2 = [()] * batch_size
# (0,0,1)
up_tup = self.env_nav_actions[self.agent_nav_actions.index('up')]
# (0,0,-1)
down_tup = self.env_nav_actions[self.agent_nav_actions.index('down')]
# (0,1,0)
right_tup = self.env_nav_actions[self.agent_nav_actions.index('right')]
# (0,0,0)
ignore_tup = self.env_nav_actions[self.agent_nav_actions.index('<ignore>')]
# Loop through each ob in the batch
for i, ob in enumerate(obs):
if ob['ended']:
# don't move at all.
heading_adjusts[i] = ignore_tup
elevation_adjusts_1[i] = ignore_tup
elevation_adjusts_2[i] = ignore_tup
else:
# turn right for 11 times at every elevation level.
heading_adjusts[i] = right_tup
# check agent elevation
if ob['viewIndex'] // 12 == 0:
# facing down, so need to look up twice.
elevation_adjusts_1[i] = up_tup
elevation_adjusts_2[i] = up_tup
elif ob['viewIndex'] // 12 == 2:
# facing up, so need to look down twice.
elevation_adjusts_1[i] = down_tup
elevation_adjusts_2[i] = down_tup
else:
# neutral, so need to look up once, and then look down twice
elevation_adjusts_1[i] = up_tup
elevation_adjusts_2[i] = (down_tup, down_tup)
return heading_adjusts, elevation_adjusts_1, elevation_adjusts_2
def compute_frontier_cost_single(self, ob, next_viewpoint_index_str):
'''
next_viewpoint_index_str: single str indicating viewpoint index.
e.g. '1e6b606b44df4a6086c0f97e826d4d15'
'''
# current point to next point
cost_stepping = self.distances[ob['scan']][ob['viewpoint']][next_viewpoint_index_str]
# next point to the closest goal
cost_togo, _ = self._find_nearest_point(ob['scan'], next_viewpoint_index_str, ob['goal_viewpoints'])
assert cost_stepping > 0 and cost_togo >= 0
return cost_togo , cost_stepping
def compute_frontier_costs(self, obs, viewix_next_vertex_map, timestep=None):
'''
For each ob, compute:
cost = cost-to-go + cost-stepping for all reachable vertices
'''
assert len(obs) == len(viewix_next_vertex_map)
# arr shape (batch_size, 36)
q_values_target_batch = np.ones((len(obs), len(viewix_next_vertex_map[0]))) * 1e9
# arr shape (batch_size, 36)
cost_togos_batch = np.ones((len(obs), len(viewix_next_vertex_map[0]))) * 1e9
# arr shape (batch_size, 36)
cost_stepping_batch = np.ones((len(obs), len(viewix_next_vertex_map[0]))) * 1e9
# arr shape (batch_size, )
end_target_batch = np.array([False for _ in range(len(obs))])
# Loop through batch
for i, ob in enumerate(obs):
# NOTE ended ob won't be added to hist buffer for training
if not ob['ended']:
costs = []
cost_togos = []
cost_steppings = []
for proposed_vertex in viewix_next_vertex_map[i]:
if proposed_vertex == '':
costs.append(1e9)
cost_togos.append(1e9)
cost_steppings.append(1e9)
else:
# add up cost-togo + cost-stepping
cost_togo , cost_stepping = self.compute_frontier_cost_single(ob, proposed_vertex)
costs.append(cost_togo + cost_stepping)
# keep tab cost-togo to determine ending later
cost_togos.append(cost_togo)
cost_steppings.append(cost_stepping)
assert len(cost_togos) == len(viewix_next_vertex_map[0]) # 36
assert len(cost_steppings) == len(viewix_next_vertex_map[0]) # 36
assert len(costs) == len(viewix_next_vertex_map[0]) # 36
q_values_target_batch[i, :] = costs
# get min costs for each row
# if the min index of costs also has a cost-togo = 0, then mark end for this row in end_target
end_target_batch[i] = cost_togos[costs.index(min(costs))] == 0
# for results logging
cost_togos_batch[i] = cost_togos
cost_stepping_batch[i] = cost_steppings
return q_values_target_batch, end_target_batch, cost_togos_batch, cost_stepping_batch
def _map_env_action_to_agent_action(self, action):
'''
Translate rotation env action seq into agent action index seq.
'''
ix, heading_chg, elevation_chg = action
assert ix == 0, 'Accept only rotation or ignore actions'
assert heading_chg == 0 or elevation_chg == 0, 'Accept only one rotation action at a time'
if heading_chg > 0:
return self.agent_nav_actions.index('right')
if heading_chg < 0:
return self.agent_nav_actions.index('left')
if elevation_chg > 0:
return self.agent_nav_actions.index('up')
if elevation_chg < 0:
return self.agent_nav_actions.index('down')
else:
return self.agent_nav_actions.index('<ignore>')
def translate_env_actions(self, obs, viewix_env_actions_map, max_macro_action_seq_len, sphere_size):
'''
viewix_env_actions_map : list (batch_size, 36, varies). Each [(0,1,0), (0,0,-1), ...]
Returns:
viewix_actions_map : array shape(36, batch_size, self.max_macro_action_seq_len)
'''
# tensor shape(36, batch_size, self.max_macro_action_seq_len)
viewix_actions_map = np.ones((sphere_size, len(obs), max_macro_action_seq_len), dtype='int') * \
self.agent_nav_actions.index('<ignore>')
for i, ob in enumerate(obs): # 1-100
if not ob['ended']:
for j, env_action_tup_seq in enumerate(viewix_env_actions_map[i]): # 1-36
assert len(env_action_tup_seq) <= 8
# map seq, length varies
agent_action_seq = list(map(self._map_env_action_to_agent_action, env_action_tup_seq))
assert len(agent_action_seq) <= 8
# assign action index, seq is already padded to 8 during initialization
viewix_actions_map[j, i, :len(agent_action_seq)] = agent_action_seq
return viewix_actions_map
class AskOracle(object):
DONT_ASK = 0
ASK = 1
def __init__(self, hparams, agent_ask_actions):
self.deviate_threshold = hparams.deviate_threshold
self.uncertain_threshold = hparams.uncertain_threshold
self.unmoved_threshold = hparams.unmoved_threshold
self.agent_ask_actions = agent_ask_actions
self.rule_a_e = hasattr(hparams, 'rule_a_e') and hparams.rule_a_e
self.rule_b_d = hasattr(hparams, 'rule_b_d') and hparams.rule_b_d
def _should_ask_rule_a_e(self, ob, nav_oracle=None):
if ob['queries_unused'] <= 0:
return self.DONT_ASK, 'exceed'
scan = ob['scan']
current_point = ob['viewpoint']
_, goal_point = nav_oracle._find_nearest_point(scan, current_point, ob['goal_viewpoints'])
agent_decision = int(np.argmax(ob['nav_dist']))
if current_point == goal_point and \
agent_decision == nav_oracle.agent_nav_actions.index('forward'):
return self.ASK, 'arrive'
start_point = ob['init_viewpoint']
d, _ = nav_oracle._find_nearest_point_on_a_path(scan, current_point, start_point, goal_point)
if d > self.deviate_threshold:
return self.ASK, 'deviate'
return self.DONT_ASK, 'pass'
def _should_ask_rule_b_d(self, ob, nav_oracle=None):
if ob['queries_unused'] <= 0:
return self.DONT_ASK, 'exceed'
agent_dist = ob['nav_dist']
uniform = [1. / len(agent_dist)] * len(agent_dist)
entropy_gap = scipy.stats.entropy(uniform) - scipy.stats.entropy(agent_dist)
if entropy_gap < self.uncertain_threshold - 1e-9:
return self.ASK, 'uncertain'
if len(ob['agent_path']) >= self.unmoved_threshold:
last_nodes = [t[0] for t in ob['agent_path']][-self.unmoved_threshold:]
if all(node == last_nodes[0] for node in last_nodes):
return self.ASK, 'unmoved'
if ob['queries_unused'] >= ob['traj_len'] - ob['time_step']:
return self.ASK, 'why_not'
return self.DONT_ASK, 'pass'
def _should_ask(self, ob, nav_oracle=None):
if self.rule_a_e:
return self._should_ask_rule_a_e(ob, nav_oracle=nav_oracle)
if self.rule_b_d:
return self._should_ask_rule_b_d(ob, nav_oracle=nav_oracle)
if ob['queries_unused'] <= 0:
return self.DONT_ASK, 'exceed'
# Find nearest point on the current shortest path
scan = ob['scan']
current_point = ob['viewpoint']
# Find nearest goal to current point
_, goal_point = nav_oracle._find_nearest_point(scan, current_point, ob['goal_viewpoints'])
# Rule (e): ask if the goal has been reached but the agent decides to
# go forward
agent_decision = int(np.argmax(ob['nav_dist']))
if current_point == goal_point and \
agent_decision == nav_oracle.agent_nav_actions.index('forward'):
return self.ASK, 'arrive'
start_point = ob['init_viewpoint']
# Find closest point to the current point on the path from start point
# to goal point
d, _ = nav_oracle._find_nearest_point_on_a_path(scan, current_point,
start_point, goal_point)
# Rule (a): ask if the agent deviates too far from the optimal path
if d > self.deviate_threshold:
return self.ASK, 'deviate'
# Rule (b): ask if uncertain
agent_dist = ob['nav_dist']
uniform = [1. / len(agent_dist)] * len(agent_dist)
entropy_gap = scipy.stats.entropy(uniform) - scipy.stats.entropy(agent_dist)
if entropy_gap < self.uncertain_threshold - 1e-9:
return self.ASK, 'uncertain'
# Rule (c): ask if not moving for too long
if len(ob['agent_path']) >= self.unmoved_threshold:
last_nodes = [t[0] for t in ob['agent_path']][-self.unmoved_threshold:]
if all(node == last_nodes[0] for node in last_nodes):
return self.ASK, 'unmoved'
# Rule (d): ask to spend all budget at the end
if ob['queries_unused'] >= ob['traj_len'] - ob['time_step']:
return self.ASK, 'why_not'
return self.DONT_ASK, 'pass'
def _map_env_action_to_agent_action(self, action, ob):
if ob['ended']:
return self.agent_ask_actions.index('<ignore>')
if action == self.DONT_ASK:
return self.agent_ask_actions.index('dont_ask')
return self.agent_ask_actions.index('ask')
def __call__(self, obs, nav_oracle):
should_ask_fn = functools.partial(self._should_ask, nav_oracle=nav_oracle)
actions, reasons = zip(*list(map(should_ask_fn, obs)))
actions = list(map(self._map_env_action_to_agent_action, actions, obs))
return actions, reasons
class MultistepShortestPathOracle(ShortestPathOracle):
'''For Ask Agents with direct advisors'''
def __init__(self, n_steps, agent_nav_actions, env_nav_actions):
super(MultistepShortestPathOracle, self).__init__(agent_nav_actions)
self.sim = MatterSim.Simulator()
self.sim.setRenderingEnabled(False)
self.sim.setDiscretizedViewingAngles(True)
self.sim.setCameraResolution(640, 480)
self.sim.setCameraVFOV(math.radians(60))
self.sim.setNavGraphPath(
os.path.join(os.getenv('PT_DATA_DIR'), 'connectivity'))
self.sim.init()
self.n_steps = n_steps
self.env_nav_actions = env_nav_actions
def _shortest_path_actions(self, ob):
actions = []
self.sim.newEpisode(ob['scan'], ob['viewpoint'], ob['heading'], ob['elevation'])
assert not ob['ended']
for _ in range(self.n_steps):
# Query oracle for next action
action = self._shortest_path_action(ob)
# Convert to agent action
agent_action = self._map_env_action_to_agent_action(action, ob)
actions.append(agent_action)
# Take action
self.sim.makeAction(*action)
if action == (0, 0, 0):
break
state = self.sim.getState()
ob = {
'viewpoint': state.location.viewpointId,
'viewIndex': state.viewIndex,
'heading' : state.heading,
'elevation': state.elevation,
'navigableLocations': state.navigableLocations,
'point' : state.location.point,
'ended' : ob['ended'] or action == (0, 0, 0),
'goal_viewpoints': ob['goal_viewpoints'],
'scan' : ob['scan']
}
return actions
def __call__(self, ob):
return self._shortest_path_actions(ob)
class NextOptimalOracle(object):
def __init__(self, hparams, agent_nav_actions, env_nav_actions,
agent_ask_actions):
self.type = 'next_optimal'
self.ask_oracle = make_oracle('ask', hparams, agent_ask_actions)
self.nav_oracle = make_oracle('shortest', agent_nav_actions, env_nav_actions)
def __call__(self, obs):
ask_actions, ask_reasons = self.ask_oracle(obs, self.nav_oracle)
self.nav_oracle.add_scans(set(ob['scan'] for ob in obs))
nav_actions = self.nav_oracle(obs)
return nav_actions, ask_actions, ask_reasons
def add_scans(self, scans):
self.nav_oracle.add_scans(scans)
def next_ask(self, obs):
return self.ask_oracle(obs, self.nav_oracle)
def next_nav(self, obs):
return self.nav_oracle(obs)
def interpret_agent_action(self, *args, **kwargs):
return self.nav_oracle.interpret_agent_action(*args, **kwargs)
class StepByStepSubgoalOracle(object):
def __init__(self, n_steps, agent_nav_actions, env_nav_actions, mode=None):
self.type = 'step_by_step'
self.nav_oracle = make_oracle('direct', n_steps, agent_nav_actions, env_nav_actions)
self.agent_nav_actions = agent_nav_actions
if mode == 'easy':
self._map_actions_to_instruction = self._map_actions_to_instruction_easy
elif mode == 'hard':
self._map_actions_to_instruction = self._map_actions_to_instruction_hard
else:
sys.exit('unknown step by step mode!')
def add_scans(self, scans):
self.nav_oracle.add_scans(scans)
def _make_action_name(self, a):
action_name = self.agent_nav_actions[a]
if action_name in ['up', 'down']:
return 'look ' + action_name
elif action_name in ['left', 'right']:
return 'turn ' + action_name
elif action_name == 'forward':
return 'go ' + action_name
elif action_name == '<end>':
return 'stop'
elif action_name == '<ignore>':
return ''
return None
def _map_actions_to_instruction_hard(self, actions):
agg_actions = []
cnt = 1
for i in range(1, len(actions)):
if actions[i] != actions[i - 1]:
agg_actions.append((actions[i - 1], cnt))
cnt = 1
else:
cnt += 1
agg_actions.append((actions[-1], cnt))
instruction = []
for a, c in agg_actions:
action_name = self._make_action_name(a)
if c > 1:
if 'turn' in action_name:
degree = 30 * c
if 'left' in action_name:
instruction.append('turn %d degrees left' % degree)
elif 'right' in action_name:
instruction.append('turn %d degrees right' % degree)
else:
raise ValueError('action name {} error'.format(action_name))
elif 'go' in action_name:
instruction.append('%s %d steps' % (action_name, c))
elif action_name != '':
instruction.append(action_name)
return ' , '.join(instruction)
def _map_actions_to_instruction_easy(self, actions):
instruction = []
for a in actions:
instruction.append(self._make_action_name(a))
return ' , '.join(instruction)
def __call__(self, ob):
action_seq = self.nav_oracle(ob)
verbal_instruction = self._map_actions_to_instruction(action_seq)
return action_seq, verbal_instruction
def make_oracle(oracle_type, *args, **kwargs):
if oracle_type == 'shortest':
return ShortestPathOracle(*args, **kwargs)
if oracle_type == 'next_optimal':
return NextOptimalOracle(*args, **kwargs)
if oracle_type == 'ask':
return AskOracle(*args, **kwargs)
if oracle_type == 'direct':
return MultistepShortestPathOracle(*args, **kwargs)
if oracle_type == 'verbal':
return StepByStepSubgoalOracle(*args, **kwargs)
if oracle_type == 'frontier_shortest':
return FrontierShortestPathsOracle(*args, **kwargs)
# TODO implement next
# if oracle_type == 'diverse_shortest':
# return DiverseShortestPathsOracle(*args, **kwargs)
return None
| 40.66468
| 209
| 0.606025
| 26,086
| 0.956021
| 0
| 0
| 0
| 0
| 0
| 0
| 6,351
| 0.232757
|
a191825d6c6da2861f6e74b98531a8374cb67f95
| 7,124
|
py
|
Python
|
unit-tests/controller.py
|
HimariO/VideoSum
|
3a81276df3b429c24ebf9a1841b5a9168c0c3ccf
|
[
"MIT"
] | null | null | null |
unit-tests/controller.py
|
HimariO/VideoSum
|
3a81276df3b429c24ebf9a1841b5a9168c0c3ccf
|
[
"MIT"
] | null | null | null |
unit-tests/controller.py
|
HimariO/VideoSum
|
3a81276df3b429c24ebf9a1841b5a9168c0c3ccf
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import unittest
from dnc.controller import BaseController
class DummyController(BaseController):
def network_vars(self):
self.W = tf.Variable(tf.truncated_normal([self.nn_input_size, 64]))
self.b = tf.Variable(tf.zeros([64]))
def network_op(self, X):
return tf.matmul(X, self.W) + self.b
class DummyRecurrentController(BaseController):
def network_vars(self):
self.lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(64)
self.state = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
self.output = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
def network_op(self, X, state):
X = tf.convert_to_tensor(X)
return self.lstm_cell(X, state)
def update_state(self, new_state):
return tf.group(
self.output.assign(new_state[0]),
self.state.assign(new_state[1])
)
def get_state(self):
return (self.output, self.state)
class DNCControllerTest(unittest.TestCase):
def test_construction(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 1)
self.assertFalse(controller.has_recurrent_nn)
self.assertEqual(controller.nn_input_size, 20)
self.assertEqual(controller.interface_vector_size, 38)
self.assertEqual(controller.interface_weights.get_shape().as_list(), [64, 38])
self.assertEqual(controller.nn_output_weights.get_shape().as_list(), [64, 10])
self.assertEqual(controller.mem_output_weights.get_shape().as_list(), [10, 10])
self.assertTrue(rcontroller.has_recurrent_nn)
self.assertEqual(rcontroller.nn_input_size, 20)
self.assertEqual(rcontroller.interface_vector_size, 38)
self.assertEqual(rcontroller.interface_weights.get_shape().as_list(), [64, 38])
self.assertEqual(rcontroller.nn_output_weights.get_shape().as_list(), [64, 10])
self.assertEqual(rcontroller.mem_output_weights.get_shape().as_list(), [10, 10])
def test_get_nn_output_size(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as Session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 1)
self.assertEqual(controller.get_nn_output_size(), 64)
self.assertEqual(rcontroller.get_nn_output_size(), 64)
def test_parse_interface_vector(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
zeta = np.random.uniform(-2, 2, (2, 38)).astype(np.float32)
read_keys = np.reshape(zeta[:, :10], (-1, 5, 2))
read_strengths = 1 + np.log(np.exp(np.reshape(zeta[:, 10:12], (-1, 2, ))) + 1)
write_key = np.reshape(zeta[:, 12:17], (-1, 5, 1))
write_strength = 1 + np.log(np.exp(np.reshape(zeta[:, 17], (-1, 1))) + 1)
erase_vector = 1.0 / (1 + np.exp(-1 * np.reshape(zeta[:, 18:23], (-1, 5))))
write_vector = np.reshape(zeta[:, 23:28], (-1, 5))
free_gates = 1.0 / (1 + np.exp(-1 * np.reshape(zeta[:, 28:30], (-1, 2))))
allocation_gate = 1.0 / (1 + np.exp(-1 * zeta[:, 30, np.newaxis]))
write_gate = 1.0 / (1 + np.exp(-1 * zeta[:, 31, np.newaxis]))
read_modes = np.reshape(zeta[:, 32:], (-1, 3, 2))
read_modes = np.transpose(read_modes, [0, 2, 1])
read_modes = np.reshape(read_modes, (-1, 3))
read_modes = np.exp(read_modes) / np.sum(np.exp(read_modes), axis=-1, keepdims=True)
read_modes = np.reshape(read_modes, (2, 2, 3))
read_modes = np.transpose(read_modes, [0, 2, 1])
op = controller.parse_interface_vector(zeta)
session.run(tf.initialize_all_variables())
parsed = session.run(op)
self.assertTrue(np.allclose(parsed['read_keys'], read_keys))
self.assertTrue(np.allclose(parsed['read_strengths'], read_strengths))
self.assertTrue(np.allclose(parsed['write_key'], write_key))
self.assertTrue(np.allclose(parsed['write_strength'], write_strength))
self.assertTrue(np.allclose(parsed['erase_vector'], erase_vector))
self.assertTrue(np.allclose(parsed['write_vector'], write_vector))
self.assertTrue(np.allclose(parsed['free_gates'], free_gates))
self.assertTrue(np.allclose(parsed['allocation_gate'], allocation_gate))
self.assertTrue(np.allclose(parsed['write_gate'], write_gate))
self.assertTrue(np.allclose(parsed['read_modes'], read_modes))
def test_process_input(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 2)
input_batch = np.random.uniform(0, 1, (2, 10)).astype(np.float32)
last_read_vectors = np.random.uniform(-1, 1, (2, 5, 2)).astype(np.float32)
v_op, zeta_op = controller.process_input(input_batch, last_read_vectors)
rv_op, rzeta_op, rs_op = rcontroller.process_input(input_batch, last_read_vectors, rcontroller.get_state())
session.run(tf.initialize_all_variables())
v, zeta = session.run([v_op, zeta_op])
rv, rzeta, rs = session.run([rv_op, rzeta_op, rs_op])
self.assertEqual(v.shape, (2, 10))
self.assertEqual(np.concatenate([np.reshape(val, (2, -1)) for _, val in zeta.items()], axis=1).shape, (2, 38))
self.assertEqual(rv.shape, (2, 10))
self.assertEqual(np.concatenate([np.reshape(val, (2, -1)) for _, val in rzeta.items()], axis=1).shape, (2, 38))
self.assertEqual([_s.shape for _s in rs], [(2, 64), (2, 64)])
def test_final_output(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
output_batch = np.random.uniform(0, 1, (2, 10)).astype(np.float32)
new_read_vectors = np.random.uniform(-1, 1, (2, 5, 2)).astype(np.float32)
op = controller.final_output(output_batch, new_read_vectors)
session.run(tf.initialize_all_variables())
y = session.run(op)
self.assertEqual(y.shape, (2, 10))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 44.805031
| 127
| 0.593346
| 6,954
| 0.976137
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.020354
|
a19397d382efe02f3787d8d407c6638e72798564
| 1,538
|
py
|
Python
|
movies/movies/spiders/douban.py
|
Devon-pku/repso
|
b86666aaebb3482240aba42437c606d856d44d21
|
[
"MIT"
] | null | null | null |
movies/movies/spiders/douban.py
|
Devon-pku/repso
|
b86666aaebb3482240aba42437c606d856d44d21
|
[
"MIT"
] | null | null | null |
movies/movies/spiders/douban.py
|
Devon-pku/repso
|
b86666aaebb3482240aba42437c606d856d44d21
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join, MapCompose
from scrapy.spiders import CrawlSpider, Rule
from movies.items import MoviesItem
class DoubanSpider(CrawlSpider):
name = 'douban'
allowed_domains = ['douban.com']
start_urls = ['https://movie.douban.com/top250']
rules = (
Rule(LinkExtractor(restrict_xpaths='//*[contains(@rel, "next")]')),
Rule(LinkExtractor(
restrict_xpaths='//*[contains(@class, "pic")]'), callback='parse_item')
)
def parse_item(self, response):
""" This function parses a property page.
@url https://movie.douban.com/top250
@returns items 1
@scrapes name score category url year
"""
# create the loader using the response
l = ItemLoader(item=MoviesItem(), response=response)
# Load fields using Xpath expressions
l.add_xpath('name', '//h1[1]/span[1]/text()',
MapCompose(str.strip, str.title))
l.add_xpath('score', '//*[contains(@class, "ll rating_num")]//text()',
MapCompose(lambda i: i.replace(',', ''), float), re='[.0-9]+')
l.add_xpath('category', '//*[contains(@property, "v:genre")]//text()',
MapCompose(str.strip), Join())
l.add_xpath('year', '//*[@id="content"]/h1/span[2]/text()', MapCompose(int), re='[0-9]+')
l.add_value('url', response.url)
return l.load_item()
| 37.512195
| 97
| 0.606632
| 1,290
| 0.838752
| 0
| 0
| 0
| 0
| 0
| 0
| 602
| 0.391417
|
a1946a453629c94f8bc3d4a45b2c968101db6df0
| 1,546
|
py
|
Python
|
CatFaultDetection/LSTM/Test_LSTM.py
|
jonlwowski012/UGV-Wheel-Slip-Detection-Using-LSTM-and-DNN
|
2af5dcf4c3b043f065f75b612a4bbfc4aa2d11e8
|
[
"Apache-2.0"
] | null | null | null |
CatFaultDetection/LSTM/Test_LSTM.py
|
jonlwowski012/UGV-Wheel-Slip-Detection-Using-LSTM-and-DNN
|
2af5dcf4c3b043f065f75b612a4bbfc4aa2d11e8
|
[
"Apache-2.0"
] | null | null | null |
CatFaultDetection/LSTM/Test_LSTM.py
|
jonlwowski012/UGV-Wheel-Slip-Detection-Using-LSTM-and-DNN
|
2af5dcf4c3b043f065f75b612a4bbfc4aa2d11e8
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from scipy.misc import imread, imsave, imresize
from keras.models import model_from_json
from os.path import join
import matplotlib.pyplot as plt
import pandas as pd
import time
def shuffler(filename):
df = pd.read_csv(filename, header=0)
# return the pandas dataframe
return df.reindex(np.random.permutation(df.index))
num_classes = 4
# Read Dataset
data = pd.read_csv('../dataset/fault_dataset.csv')
data = shuffler('../dataset/fault_dataset.csv')
X = np.asarray(data[['posex','posey','orix','oriy','oriz','oriw']])
y_norm = np.asarray(data['labels'])
y = np.zeros((len(y_norm), num_classes))
y[np.arange(len(y_norm)), y_norm] = 1
# Define Paths and Variables
model_dir = 'model'
#%% Load model and weights separately due to error in keras
model = model_from_json(open(model_dir+"/model_weights.json").read())
model.load_weights(model_dir+"/model_weights.h5")
#%% Predict Output
t0 = time.time()
output_org = model.predict(np.reshape(X, (X.shape[0], 1, X.shape[1])))
print "Time to predict all ", len(X), " samples: ", time.time()-t0
print "Average time to predict a sample: ", (time.time()-t0)/len(X)
output = np.zeros_like(output_org)
output[np.arange(len(output_org)), output_org.argmax(1)] = 1
correct = 0
for i in range(len(output)):
if np.array_equal(output[i],y[i]):
correct += 1
print "Acc: ", correct/float(len(output))
output_index = []
for row in output:
output_index.append(np.argmax(row))
plt.plot(y_norm, color='red',linewidth=3)
plt.plot(output_index, color='blue', linewidth=1)
plt.show()
| 28.109091
| 70
| 0.721863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.251617
|
a194bf4b74105b49a6100082214a932f48fe4c3d
| 3,304
|
py
|
Python
|
examples/spring_system.py
|
tkoziara/parmec
|
fefe0586798cd65744334f9abeab183159bd3d7a
|
[
"MIT"
] | null | null | null |
examples/spring_system.py
|
tkoziara/parmec
|
fefe0586798cd65744334f9abeab183159bd3d7a
|
[
"MIT"
] | 15
|
2017-06-09T12:05:27.000Z
|
2018-10-25T13:59:58.000Z
|
examples/spring_system.py
|
parmes/parmec
|
fefe0586798cd65744334f9abeab183159bd3d7a
|
[
"MIT"
] | null | null | null |
# find parmec path
import os, sys
def where(program):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, program)):
return path
return None
path = where('parmec4')
if path == None:
print 'ERROR: parmec4 not found in PATH!'
print ' Download and compile parmec;',
print 'add parmec directory to PATH variable;'
sys.exit(1)
print '(Found parmec4 at:', path + ')'
sys.path.append(os.path.join (path, 'python'))
from progress_bar import * # and import progress bar
from scipy import spatial # import scipy
import numpy as np # and numpy
# command line arguments
av = ARGV()
if '-h' in av or '--help' in av:
print 'Beam-like spring-system example:',
print 'cantilever beam fixed at x-far-end'
print 'Unit cubes interact via springs',
print 'connected within a radius of influence'
print 'Available arguments:'
print ' -nx int --> x resolution (or 10)'
print ' -ny int --> y resolution (or 5)'
print ' -nz int --> z resolution (or 5)'
print ' -du float --> duration (or 5.)'
print ' -st float --> time step (or auto)'
print ' -ra float --> spring influence radius (or 2.)'
print ' -h or --help --> print this help'
sys.exit(0)
# input parameters
nx = int(av[av.index('-nx')+1]) if '-nx' in av else 10
ny = int(av[av.index('-ny')+1]) if '-ny' in av else 5
nz = int(av[av.index('-nz')+1]) if '-nz' in av else 5
du = float(av[av.index('-du')+1]) if '-du' in av else 5.
st = float(av[av.index('-st')+1]) if '-st' in av else -1
ra = float(av[av.index('-ra')+1]) if '-ra' in av else 2.
# materials
matnum = MATERIAL (1E3, 1E9, 0.25)
spring = [-1,-1E6, 1,1E6]
dratio = 10.
# (nx,ny,nz) array of unit cubes
iend = nx*ny*nz-1
progress_bar(0, iend, 'Adding particles:')
x, y, z = np.mgrid[0:nx, 0:ny, 0:nz]
data = zip(x.ravel(), y.ravel(), z.ravel())
datarange = range (0, len(data))
for i in datarange:
p = data[i]
nodes = [p[0]-.5, p[1]-.5, p[2]-.5,
p[0]+.5, p[1]-.5, p[2]-.5,
p[0]+.5, p[1]+.5, p[2]-.5,
p[0]-.5, p[1]+.5, p[2]-.5,
p[0]-.5, p[1]-.5, p[2]+.5,
p[0]+.5, p[1]-.5, p[2]+.5,
p[0]+.5, p[1]+.5, p[2]+.5,
p[0]-.5, p[1]+.5, p[2]+.5]
elements = [8, 0, 1, 2, 3, 4, 5, 6, 7, matnum]
parnum = MESH (nodes, elements, matnum, 0)
progress_bar(i, iend, 'Adding particles:')
# connecting springs within radius
def add(a,b): return (a[0]+b[0],a[1]+b[1],a[2]+b[2])
def mul(a,b): return (a[0]*b,a[1]*b,a[2]*b)
progress_bar(0, iend, 'Adding springs:')
tree = spatial.KDTree(data)
for i in datarange:
p = data[i]
adj = tree.query_ball_point(np.array(p), ra)
for j in [k for k in adj if k < i]:
q = data[j]
x = mul(add(p,q),.5)
sprnum = SPRING (i, x, j, x, spring, dratio)
progress_bar(i, iend, 'Adding springs:')
# fixed at x-far-end
for i in datarange[-ny*nz:]:
RESTRAIN (i, [1,0,0,0,1,0,0,0,1], [1,0,0,0,1,0,0,0,1])
# gravity acceleration
GRAVITY (0., 0., -9.8)
# time step
hc = CRITICAL(perparticle=10)
if st < 0: st = 0.5 * hc[0][0]
# print out statistics
print '%dx%dx%d=%d particles and %d springs' % (nx,ny,nz,parnum,sprnum)
print '10 lowest-step per-particle tuples (critical step, particle index, circular frequency, damping ratio):'
print hc
print 'Running %d steps of size %g:' % (int(du/st),st)
# run simulation
DEM (du, st, (0.05, 0.01))
| 32.07767
| 110
| 0.608656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,179
| 0.35684
|
a194ce5184afbac2e200ce258188a996d6313650
| 113
|
py
|
Python
|
api/weibo/api/api.py
|
Eurkon/api
|
a51eae901e003ac6b94c04d12f1afeec00108256
|
[
"MIT"
] | 5
|
2021-06-15T05:33:01.000Z
|
2022-03-14T01:17:38.000Z
|
api/weibo/api/api.py
|
Eurkon/api
|
a51eae901e003ac6b94c04d12f1afeec00108256
|
[
"MIT"
] | 1
|
2021-06-03T09:22:50.000Z
|
2021-06-03T09:22:50.000Z
|
api/weibo/api/api.py
|
Eurkon/api
|
a51eae901e003ac6b94c04d12f1afeec00108256
|
[
"MIT"
] | 1
|
2021-07-25T15:58:40.000Z
|
2021-07-25T15:58:40.000Z
|
# -*- coding: utf-8 -*-
# @Author : Eurkon
# @Date : 2021/6/9 17:13
from api.weibo.api.top import weibo_top
| 22.6
| 39
| 0.610619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.610619
|
a1957451f3af335e8adc1d7f31b338f3928c6579
| 1,293
|
py
|
Python
|
leds.py
|
sthysel/pyboard-play
|
0df2dc98376667211958a2bcc18718d0cd69a400
|
[
"MIT"
] | null | null | null |
leds.py
|
sthysel/pyboard-play
|
0df2dc98376667211958a2bcc18718d0cd69a400
|
[
"MIT"
] | null | null | null |
leds.py
|
sthysel/pyboard-play
|
0df2dc98376667211958a2bcc18718d0cd69a400
|
[
"MIT"
] | null | null | null |
import pyb
import random
leds = [pyb.LED(i) for i in range(1, 5)]
blue_led = pyb.LED(4)
def fanfare(delay=200):
def switch_all(on=True):
for led in leds:
if on:
led.on()
else:
led.off()
for i in range(4):
switch_all(on=True)
pyb.delay(delay)
switch_all(on=False)
def slick():
while True:
for led in leds:
led.toggle()
pyb.delay(100)
def das_blinken(delay):
while True:
led = leds[random.randint(0, len(leds)-1)]
led.toggle()
pyb.delay(delay)
def das_dico():
try:
fanfare()
das_blinken(100)
finally:
fanfare()
def ramp_glow():
intensity = 0
while True:
intensity = (intensity + 1) % 255
blue_led.intensity(intensity)
pyb.delay(20)
def glow():
intensity = 0
direction = 1
while True:
if intensity == 255:
direction = -1
if intensity == 0:
direction = 1
intensity = intensity + direction
blue_led.intensity(intensity)
pyb.delay(20)
glow()
| 20.52381
| 115
| 0.464811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a195963a8a3b4f30d7ce7608dabc36d736c3bd7d
| 8,088
|
py
|
Python
|
main.py
|
droher/diachronic
|
4d50f37af96c2a89c46e027f5ab7f46bce9b9521
|
[
"Apache-2.0"
] | 3
|
2018-07-23T13:58:33.000Z
|
2020-01-23T09:02:01.000Z
|
main.py
|
droher/diachronic
|
4d50f37af96c2a89c46e027f5ab7f46bce9b9521
|
[
"Apache-2.0"
] | 1
|
2021-03-22T17:15:48.000Z
|
2021-03-22T17:15:48.000Z
|
main.py
|
droher/diachronic
|
4d50f37af96c2a89c46e027f5ab7f46bce9b9521
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import shutil
import urllib.request
import traceback
import logging
import psutil
from collections import defaultdict
from typing import List, Dict, Tuple
from multiprocessing import Semaphore, Pool
from subprocess import Popen, PIPE
from datetime import datetime, timedelta
from lxml import etree
from lxml.etree import Element
import pyarrow as pa
import pyarrow.parquet as pq
from google.cloud import storage
from diachronic import global_conf, Tags
PROCESS_MEM = psutil.virtual_memory().total / psutil.cpu_count()
# Fraction of (total_mem/cpu_count) that a given process uses before flushing buffer
PROCESS_MEM_LIMIT = .1
DOWNLOAD_SEMAPHORE = Semaphore(global_conf.download_parallelism)
FAILURES = []
def make_path(path: str) -> None:
if not os.path.exists(path):
os.makedirs(path)
def get_wiki_from_filename(wiki_file: str) -> str:
return wiki_file.split("-")[0]
class WikiHandler(object):
def __init__(self):
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
make_path(global_conf.input_path)
def get_filenames(self) -> List[str]:
filenames = []
for wiki in global_conf.wikis:
url_prefix = global_conf.get_url_prefix(wiki)
url = "{}dumpstatus.json".format(url_prefix)
logging.info("Grabbing filenames from {}".format(url))
conn = urllib.request.urlopen(url)
data = json.loads(conn.read().decode())
conn.close()
filenames.extend(list(data["jobs"]["metahistory7zdump"]["files"].keys()))
return filenames
def get_files_to_skip(self) -> List[str]:
client = storage.Client()
return [blob.name for blob in client.get_bucket(global_conf.bucket).list_blobs()]
def get_files_to_run(self, overwrite=False) -> List[str]:
all_filenames = self.get_filenames()
if overwrite:
logging.info("Overwrite enabled, running all {} files".format(len(all_filenames)))
return all_filenames
skipfiles = self.get_files_to_skip()
files_to_run = [f for f in all_filenames
if "{}.{}".format(f, global_conf.output_suffix) not in skipfiles]
skip_count = len(all_filenames) - len(files_to_run)
logging.info("Running {} files and skipping {}".format(len(files_to_run), skip_count))
return files_to_run
def download(self, wiki_file: str) -> None:
logging.info("Downloading {}".format(wiki_file))
wiki = get_wiki_from_filename(wiki_file)
url_prefix = global_conf.get_url_prefix(wiki)
response = urllib.request.urlopen(url_prefix + wiki_file)
download_file = open(global_conf.input_path + wiki_file, 'wb')
shutil.copyfileobj(response, download_file)
response.close()
download_file.close()
logging.info("Downloaded {}".format(wiki_file))
def run_file(self, wiki_file: str) -> None:
try:
with DOWNLOAD_SEMAPHORE:
self.download(wiki_file)
parser = WikiFileParser(wiki_file)
parser.run()
except Exception:
logging.info("Caught exception on {}".format(wiki_file))
logging.error(traceback.format_exc())
FAILURES.append(wiki_file)
os.remove(global_conf.input_path + wiki_file)
def run(self) -> None:
logging.info("Running {}".format(global_conf.month_source))
filenames_to_run = self.get_files_to_run()
pool = Pool()
pool.map_async(self.run_file, filenames_to_run, error_callback=self._on_error)
pool.close()
pool.join()
logging.info("{} Run completed. Failures: {}".format(global_conf.month_source, FAILURES))
def _on_error(self, ex: Exception):
raise ex
class WikiFileParser(object):
def __init__(self, wiki_file: str):
self.arrow_cols = ("namespace", "title", "timestamp", "text")
self.wiki_file = wiki_file
self.wiki = get_wiki_from_filename(self.wiki_file)
output_prefix = global_conf.get_output_prefix(self.wiki)
make_path(global_conf.output_path + output_prefix)
self.output_file = "{}{}.{}".format(output_prefix,
self.wiki_file,
global_conf.output_suffix)
# State trackers
self.arrow_buff = {colname: [] for colname in self.arrow_cols}
self.arrow_row, self.cur_date, self.current_revision = self.iter_reset()
self.schema: pq.ParquetSchema = None
self.writer: pq.ParquetWriter = None
def iter_reset(self) -> Tuple[Dict[str, None], datetime, None]:
self.arrow_row = {colname: None for colname in self.arrow_cols}
self.cur_date = global_conf.datetime_init
self.current_revision = None
return self.arrow_row, self.cur_date, self.current_revision
@property
def func_dict(self) -> Dict[str, callable]:
d = {
Tags.Revision.nstag: self.parse_revision,
Tags.Namespace.nstag: self.parse_namespace,
Tags.Page.nstag: self.parse_finish,
Tags.Title.nstag: self.parse_title
}
return defaultdict(lambda: (lambda x: None), **d)
def parse_title(self, elem: Element) -> None:
self.arrow_row["title"] = elem.text
def parse_namespace(self, elem: Element) -> None:
self.arrow_row["namespace"] = elem.text
def parse_revision(self, elem: Element) -> None:
if self.arrow_row["namespace"] == "0":
timestamp = datetime.strptime(elem.find(Tags.Timestamp.nstag).text[:-1], "%Y-%m-%dT%H:%M:%S")
if timestamp >= self.cur_date:
self.cur_date = datetime.combine(timestamp.date(), datetime.min.time()) + timedelta(days=1)
text = elem.find(Tags.Text.nstag).text or ""
self.arrow_row["text"] = text
self.arrow_row["timestamp"] = timestamp
for col, val in self.arrow_row.items():
self.arrow_buff[col].append(val)
elem.clear()
def parse_finish(self, elem: Element) -> None:
self.iter_reset()
# Determine whether buffer needs to be flushed based on available memory
process = psutil.Process(os.getpid())
if process.memory_info().rss / PROCESS_MEM >= PROCESS_MEM_LIMIT:
self.write()
elem.clear()
def stream(self) -> None:
stdout = Popen(["7z", "e", "-so", global_conf.input_path + self.wiki_file], stdout=PIPE).stdout
for event, elem in etree.iterparse(stdout, huge_tree=True):
self.func_dict[elem.tag](elem)
def write(self) -> None:
arrow_arrays = {colname: pa.array(arr) for colname, arr in self.arrow_buff.items()}
arrow_table = pa.Table.from_arrays(arrays=list(arrow_arrays.values()), names=list(arrow_arrays.keys()))
if not self.writer:
self.writer = pq.ParquetWriter(global_conf.output_path + self.output_file,
arrow_table.schema, compression='brotli')
self.writer.write_table(arrow_table)
self.arrow_buff = {colname: [] for colname in self.arrow_cols}
def upload(self) -> None:
client = storage.Client()
bucket = client.get_bucket(global_conf.bucket)
blob = bucket.blob(self.output_file)
with open(global_conf.output_path + self.output_file, 'rb') as pq_file:
blob.upload_from_file(pq_file)
def cleanup(self) -> None:
os.remove(global_conf.input_path + self.wiki_file)
os.remove(global_conf.output_path + self.output_file)
def run(self) -> None:
logging.info("Started parsing {}".format(self.wiki_file))
self.stream()
# Clear leftover buffer
self.write()
self.writer.close()
self.upload()
self.cleanup()
logging.info("Finished parsing {}".format(self.wiki_file))
if __name__ == "__main__":
WikiHandler().run()
| 38.514286
| 111
| 0.641568
| 7,115
| 0.879698
| 0
| 0
| 344
| 0.042532
| 0
| 0
| 694
| 0.085806
|
a196837dce4a47f814b5cdb67c242cac5688d382
| 597
|
py
|
Python
|
sasehack/models.py
|
ritwik19/Prosper
|
77828943e188cc0ab359326d7cd13d53a876ef35
|
[
"MIT"
] | 2
|
2021-01-10T19:57:21.000Z
|
2021-05-18T16:16:41.000Z
|
sasehack/models.py
|
jaxrtech/sasehack-2017
|
6c5bb89a0bb723e8c1017dc17f5317723aaf94ce
|
[
"MIT"
] | null | null | null |
sasehack/models.py
|
jaxrtech/sasehack-2017
|
6c5bb89a0bb723e8c1017dc17f5317723aaf94ce
|
[
"MIT"
] | null | null | null |
class FollowupEvent:
def __init__(self, name, data=None):
self.name = name
self.data = data
class Response:
def __init__(self, text=None, followup_event=None):
self.speech = text
self.display_text = text
self.followup_event = followup_event
class UserInput:
def __init__(self, message: str, session_id: str, params: dict, text: str, action: str, intent: str):
self.message = message
self.session_id = session_id
self.params = params
self.raw = text
self.action = action
self.intent = intent
| 24.875
| 105
| 0.631491
| 588
| 0.984925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a196cc5f96a8b93a3bb1cc5156a3a6b18c755ee7
| 9,491
|
py
|
Python
|
apps/core/helpers.py
|
tarvitz/icu
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T23:44:21.000Z
|
2022-03-12T23:44:21.000Z
|
apps/core/helpers.py
|
tarvitz/icu
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
[
"BSD-3-Clause"
] | null | null | null |
apps/core/helpers.py
|
tarvitz/icu
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
#
import re
import os
from django.conf import settings
from django.shortcuts import (
render_to_response, get_object_or_404 as _get_object_or_404,
redirect)
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils.translation import ugettext_lazy as _, ugettext as tr
from django.http import Http404
from datetime import datetime, time, date
import simplejson as json
def get_top_object_or_None(Object, *args, **kwargs):
if hasattr(Object, 'objects'):
obj = Object.objects.filter(*args, **kwargs)
else:
obj = Object.filter(*args, **kwargs)
if obj:
return obj[0]
return None
def get_object_or_None(Object, *args, **kwargs):
try:
return _get_object_or_404(Object, *args, **kwargs)
except (Http404, MultipleObjectsReturned):
return None
def get_object_or_404(Object, *args, **kwargs):
"""Retruns object or raise Http404 if it does not exist"""
try:
if hasattr(Object, 'objects'):
return Object.objects.get(*args, **kwargs)
elif hasattr(Object, 'get'):
return Object.get(*args, **kwargs)
else:
raise Http404("Giving object has no manager instance")
except (Object.DoesNotExist, Object.MultipleObjectReturned):
raise Http404("Object does not exist or multiple object returned")
def get_content_type(Object):
"""
works with ModelBase based classes, its instances
and with format string 'app_label.model_name', also supports
sphinx models and instances modification
source taken from warmist helpers source
retrieves content_type or raise the common django Exception
Examples:
get_content_type(User)
get_content_type(onsite_user)
get_content_type('auth.user')
"""
if callable(Object): # class
model = Object._meta.module_name
app_label = Object._meta.app_label
#model = Object.__name__.lower()
#app_label = (x for x in reversed(
# Object.__module__.split('.')) if x not in 'models').next()
elif hasattr(Object, 'pk'): # class instance
if hasattr(Object, '_sphinx') or hasattr(Object, '_current_object'):
model = Object._current_object._meta.module_name
app_label = Object._current_object._meta.app_label
#app_label = (x for x in reversed(
# Object._current_object.__module__.split('.')) \
#if x not in 'models').next()
#model = Object._current_object.__class__.__name__.lower()
else:
app_label = Object._meta.app_label
model = Object._meta.module_name
#app_label = (x for x in reversed(Object.__module__.split('.')) \
#if x not in 'models').next()
#model = Object.__class__.__name__.lower()
elif isinstance(Object, basestring):
app_label, model = Object.split('.')
ct = ContentType.objects.get(app_label=app_label, model=model)
return ct
def get_content_type_or_None(Object):
try:
return get_content_type(Object)
except:
return None
def get_content_type_or_404(Object):
try:
return get_content_type(Object)
except:
raise Http404
def get_form(app_label, form_name):
""" retrieve form within app_label and form_name given set"""
pass
def ajax_response(dt):
_errors = []
if 'errors' in dt:
for key in errors.keys():
_errors.append({'key': key, 'msg': errors[key]})
dt.update({'errors': _errors})
dt.update({'status': 200})
return dt
def generate_safe_value(value, regex):
if isinstance(regex, str):
regex = re.compile(regex, re.U | re.I)
match = regex.match(value or '')
if match:
return match.group()
return None
def make_http_response(**kw):
response = HttpResponse(status=kw.get('status', 200))
response['Content-Type'] = kw.get('content_type', 'text/plain')
if 'content' in kw:
response.write(kw['content'])
return response
def make_response(type='json', **kw):
response = HttpResponse(status=kw.get('status', 200))
if type in ('json', 'javascript', 'js'):
response['Content-Type'] = 'text/javascript'
else:
response['Content-Type'] = 'text/plain'
return response
def ajax_form_errors(errors):
""" returns form errors as python list """
errs = [{'key': k, 'msg': unicode(errors[k])} for k in errors.keys()]
#equivalent to
#for k in form.errors.keys():
# errors.append({'key': k, 'msg': unicode(form.errors[k])})
return errs
def paginate(Obj, page, **kwargs):
from django.core.paginator import InvalidPage, EmptyPage
from apps.core.diggpaginator import DiggPaginator as Paginator
pages = kwargs['pages'] if 'pages' in kwargs else 20
if 'pages' in kwargs:
del kwargs['pages']
paginator = Paginator(Obj, pages, **kwargs)
try:
objects = paginator.page(page)
except (InvalidPage, EmptyPage):
objects = paginator.page(1)
objects.count = pages # objects.end_index() - objects.start_index() +1
return objects
def model_json_encoder(obj, **kwargs):
from django.db.models.base import ModelState
from django.db.models import Model
from django.db.models.query import QuerySet
from decimal import Decimal
from django.db.models.fields.files import ImageFieldFile
is_human = kwargs.get('parse_humanday', False)
if isinstance(obj, QuerySet):
return list(obj)
elif isinstance(obj, Model):
dt = obj.__dict__
#obsolete better use partial
fields = ['_content_type_cache', '_author_cache', '_state']
for key in fields:
if key in dt:
del dt[key]
#normailize caches
disable_cache = kwargs['disable_cache'] \
if 'disable_cache' in kwargs else False
# disable cache if disable_cache given
for key in dt.keys():
if '_cache' in key and key.startswith('_'):
if not disable_cache:
dt[key[1:]] = dt[key]
#delete cache
del dt[key]
if disable_cache and '_cache' in key:
del dt[key]
#delete restriction fields
if kwargs.get('fields_restrict'):
for f in kwargs.get('fields_restrict'):
if f in dt:
del dt[f]
#make week more humanic
if is_human and 'week' in dt:
dt['week'] = unicode(humanday(dt['week']))
return dt
elif isinstance(obj, ModelState):
return 'state'
elif isinstance(obj, datetime):
return [
obj.year, obj.month, obj.day,
obj.hour, obj.minute, obj.second,
obj.isocalendar()[1]
]
elif isinstance(obj, date):
return [obj.year, obj.month, obj.day]
elif isinstance(obj, time):
return obj.strftime("%H:%M")
elif isinstance(obj, ImageFieldFile):
return obj.url if hasattr(obj, 'url') else ''
#elif isinstance(obj, Decimal):
# return float(obj)
return obj
def get_model_instance_json(Obj, id):
instance = get_object_or_None(Obj, id=id)
response = make_http_response(content_type='text/javascript')
if not instance:
response.write(json.dumps({
'success': False,
'error': unicode(_("Not found")),
}, default=model_json_encoder))
return response
response.write(json.dumps({
'success': True,
'instance': instance,
}, default=model_json_encoder))
return response
def create_path(path):
try:
os.stat(path)
except OSError, e:
if e.errno == 2:
os.makedirs(path)
else:
pass
return path
def get_safe_fields(lst, Obj):
""" excludes fields in given lst from Object """
return [
field.attname for field in Obj._meta.fields
if field.attname not in lst
]
#decorators
def render_to(template, content_type='text/html'):
def decorator(func):
def wrapper(request, *args, **kwargs):
dt = func(request, *args, **kwargs)
if 'redirect' in dt:
return redirect(dt['redirect'])
if content_type.lower() == 'text/html':
return render_to_response(
template,
dt,
context_instance=RequestContext(request))
elif content_type.lower() in ['text/json', 'text/javascript']:
response = HttpResponse()
response['Content-Type'] = content_type
tmpl = get_template(template)
response.write(tmpl.render(Context(dt)))
return response
else:
return render_to_response(
template,
dt, context_instance=RequestContext(request))
return wrapper
return decorator
def ajax_response(func):
def wrapper(request, *args, **kwargs):
dt = func(request, *args, **kwargs)
response = make_http_response(content_type='text/javascript')
response.write(json.dumps(dt, default=model_json_encoder))
return response
return wrapper
| 32.282313
| 78
| 0.623011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,167
| 0.228322
|
a196d336d93a22ab16f1f21a1b3e7182f45daa9b
| 536
|
py
|
Python
|
Python/Numpy/Shape and Reshape/shape_and_reshape.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | 2
|
2020-05-28T07:15:00.000Z
|
2020-07-21T08:34:06.000Z
|
Python/Numpy/Shape and Reshape/shape_and_reshape.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
Python/Numpy/Shape and Reshape/shape_and_reshape.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import List
def reshpare_to_square_matrix( seq:List)->None:
square_matrix = np.array( seq )
# reshpae to square matrix
square_matrix.shape = (3,3)
return square_matrix
if __name__ == '__main__':
int_sequence = list( map( int, input().split() ) )
# Method_#1
#sq_matrix = reshpare_to_square_matrix( int_sequence )
#print( sq_matrix )
# Method_#2
sq_matrix = np.array( int_sequence )
sq_matrix = np.reshape( sq_matrix, (3,3) )
print( sq_matrix )
| 20.615385
| 58
| 0.660448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.244403
|
a197169860a861a5d23aca5ba4544937a0ade0fe
| 2,440
|
py
|
Python
|
figures_in_paper/Fig3/ParticleSimulations/Fig3_particle_simulation_10-3.py
|
tstepien/moth-mating
|
eac5c735f40962f18d9d05b46bc3cc622ff5258d
|
[
"MIT"
] | null | null | null |
figures_in_paper/Fig3/ParticleSimulations/Fig3_particle_simulation_10-3.py
|
tstepien/moth-mating
|
eac5c735f40962f18d9d05b46bc3cc622ff5258d
|
[
"MIT"
] | null | null | null |
figures_in_paper/Fig3/ParticleSimulations/Fig3_particle_simulation_10-3.py
|
tstepien/moth-mating
|
eac5c735f40962f18d9d05b46bc3cc622ff5258d
|
[
"MIT"
] | 1
|
2021-08-08T14:45:17.000Z
|
2021-08-08T14:45:17.000Z
|
import numpy as np
import time
import csv
import multiprocessing
import os
from numba import njit
@njit()
def timestep(dist):
t1 = 1e-5
t2 = 1e-2
k = 1
timestep = (t2-t1)*np.tanh(k*dist)+t1
return timestep
@njit()
def random_walk(m,d,t,R,rt):
time = 0
#set up random IC on circle of radius R away from origin
x0 = np.random.randn(m)
norm_x0 = np.sqrt(np.sum(x0**2))
x0 = R*x0/norm_x0 #x0/||x_0|| is random on the unit sphere
#####
# trajectory = []
# trajectory.append(x0)
trapped = False
#####
while time < t:
if trapped == False:
dist = np.sqrt(np.sum(x0**2)) - rt
if dist < 0:
dist = 0
dt = timestep(dist)
s = np.sqrt(2*m*d*dt)*np.random.randn(1)
dx = np.random.randn(m)
norm_dx = np.sqrt(np.sum(dx**2))
x = x0 + s*dx/norm_dx
# trajectory.append(x)
time = time + dt
if np.sum(x**2) < rt**2:
trapped = True
x0 = x
elif trapped == True:
break
return trapped
def FractionAbsorbed(d,rt):
m = 2 #spatial dimension, can be 2 or 3 but not set up for 1d
t = 100.0 #total time
R = 1 #circle radius
num_particles = 5000
trappeds = []
for k in range(num_particles):
trapped = random_walk(m,d,t,R,rt)
trappeds.append(trapped)
return sum(trappeds)/num_particles
def parallel_fun(fn,input_args,num_threads):
#need to make list of pairs of d rt to pass to function...
with multiprocessing.Pool(num_threads) as pool:
out = pool.starmap(fn,input_args)
return np.array(out)
def get_cpus_per_task():
""" Returns the SLURM environment variable if set else throws
KeyError """
try:
return os.environ["SLURM_CPUS_PER_TASK"]
except KeyError:
print("SLURM environment variable unset: \
use salloc or sbatch to launch job")
raise
CPUS_PER_TASK = int(get_cpus_per_task())
# CPUS_PER_TASK = 4
begin = time.time()
D = [10**-3]
rt = np.linspace(1e-4,0.99,20)
input_args = [(x,y) for x in D for y in rt]
prop = parallel_fun(FractionAbsorbed,input_args,CPUS_PER_TASK)
data = []
for i in range(len(prop)):
data.append([input_args[i][0],input_args[i][1],prop[i]])
csvfile = csv.writer(open('C(100)_10-3.csv','w'))
for row in data:
csvfile.writerow(row)
end = time.time()
print(end-begin)
| 24.158416
| 65
| 0.59877
| 0
| 0
| 0
| 0
| 1,019
| 0.417623
| 0
| 0
| 522
| 0.213934
|
a19804bd039dd872f53c4d69a22088d534d74c39
| 8,153
|
py
|
Python
|
tests/core/test_factory.py
|
pdwaggoner/datar
|
a03f1c0ca0de1270059178e59cea151a51a6e7aa
|
[
"MIT"
] | null | null | null |
tests/core/test_factory.py
|
pdwaggoner/datar
|
a03f1c0ca0de1270059178e59cea151a51a6e7aa
|
[
"MIT"
] | null | null | null |
tests/core/test_factory.py
|
pdwaggoner/datar
|
a03f1c0ca0de1270059178e59cea151a51a6e7aa
|
[
"MIT"
] | null | null | null |
import inspect
import pytest
import numpy as np
from datar.core.backends.pandas import Categorical, DataFrame, Series
from datar.core.backends.pandas.testing import assert_frame_equal
from datar.core.backends.pandas.core.groupby import SeriesGroupBy
from datar.core.factory import func_factory
from datar.core.tibble import (
SeriesCategorical,
SeriesRowwise,
TibbleGrouped,
TibbleRowwise,
)
from datar.tibble import tibble
from ..conftest import assert_iterable_equal
def test_transform_default():
@func_factory("transform", "x")
def double(x):
return x * 2
# scalar
out = double(3)
assert out[0] == 6
out = double(np.array([1, 2], dtype=int))
assert_iterable_equal(out, [2, 4])
@func_factory("transform", "x")
def double(x):
return x * 2
out = double([1, 2])
assert_iterable_equal(out, [2, 4])
# default on series
x = Series([2, 3], index=["a", "b"])
out = double(x)
assert isinstance(out, Series)
assert_iterable_equal(out.index, ["a", "b"])
assert_iterable_equal(out, [4, 6])
# default on dataframe
x = DataFrame({"a": [3, 4]})
out = double(x)
assert isinstance(out, DataFrame)
assert_iterable_equal(out.a, [6, 8])
# default on seriesgroupby
x = Series([1, 2, 1, 2]).groupby([1, 1, 2, 2])
out = double(x)
assert isinstance(out, SeriesGroupBy)
assert_iterable_equal(out.obj, [2, 4, 2, 4])
assert out.grouper.ngroups == 2
# on tibble grouped
x = tibble(x=[1, 2, 1, 2], g=[1, 1, 2, 2]).group_by("g")
out = double(x)
# grouping variables not included
assert_iterable_equal(out.x.obj, [2, 4, 2, 4])
x = tibble(x=[1, 2, 1, 2], g=[1, 1, 2, 2]).rowwise("g")
out = double(x)
assert isinstance(out, TibbleRowwise)
assert_frame_equal(out, out._datar["grouped"].obj)
assert_iterable_equal(out.x.obj, [2, 4, 2, 4])
assert_iterable_equal(out.group_vars, ["g"])
def test_transform_register():
@func_factory(kind="transform", data_args="x")
def double(x):
return x * 2
@double.register(DataFrame)
def _(x):
return x * 3
x = Series([2, 3])
out = double(x)
assert_iterable_equal(out, [4, 6])
double.register(Series, lambda x: x * 4)
out = double(x)
assert_iterable_equal(out, [8, 12])
x = tibble(a=[1, 3])
out = double(x)
assert_iterable_equal(out.a, [3, 9])
out = double([1, 4])
assert_iterable_equal(out, [4, 16])
# register an available string func for tranform
double.register(SeriesGroupBy, "sum")
x = Series([1, -2]).groupby([1, 2])
out = double(x)
assert_iterable_equal(out.obj, [1, -2])
# seriesrowwise
double.register(SeriesRowwise, lambda x: x + 1)
x.is_rowwise = True
out = double(x)
assert_iterable_equal(out.obj, [2, -1])
assert out.is_rowwise
def test_transform_hooks():
@func_factory(kind="transform", data_args="x")
def times(x, t):
return x * t
with pytest.raises(ValueError):
times.register(Series, meta=False, pre=1, func=None)
times.register(
Series,
func=None,
pre=lambda x, t: (x, (-t,), {}),
post=lambda out, x, t: out + t,
)
x = Series([1, 2])
out = times(x, -1)
assert_iterable_equal(out, [2, 3])
@times.register(Series, meta=False)
def _(x, t):
return x + t
out = times(x, 10)
assert_iterable_equal(out, [11, 12])
@times.register(SeriesGroupBy, meta=True)
def _(x, t):
return x + 10
x = Series([1, 2, 1, 2]).groupby([1, 1, 2, 2])
out = times(x, 1)
assert_iterable_equal(out.obj, [11, 12, 11, 12])
times.register(
SeriesGroupBy,
func=None,
pre=lambda x, t: (x, (t + 1,), {}),
post=lambda out, x, *args, **kwargs: out,
)
out = times(x, 1)
assert_iterable_equal(out, [2, 4, 2, 4])
times.register(
Series,
func=None,
pre=lambda *args, **kwargs: None,
post=lambda out, x, t: out + t,
)
x = Series([1, 2])
out = times(x, 3)
assert_iterable_equal(out, [4, 5])
@times.register(DataFrame, meta=True)
def _(x, t):
return x ** t
x = tibble(a=[1, 2], b=[2, 3])
out = times(x, 3)
assert_iterable_equal(out.a, [1, 8])
assert_iterable_equal(out.b, [8, 27])
# TibbleGrouped
times.register(
TibbleGrouped,
func=None,
pre=lambda x, t: (x, (t - 1,), {}),
post=lambda out, x, t: out.reindex([1, 0]),
)
x = x.group_by("a")
out = times(x, 3)
assert_iterable_equal(out.b, [6, 4])
@times.register(
TibbleGrouped,
meta=False,
)
def _(x, t):
out = x.transform(lambda d, t: d * t, 0, t - 1)
out.iloc[0, 1] = 10
return out
# x = tibble(a=[1, 2], b=[2, 3]) # grouped by a
out = times(x, 3)
assert isinstance(out, TibbleGrouped)
assert_iterable_equal(out.group_vars, ["a"])
assert_iterable_equal(out.b.obj, [10, 6])
def test_agg():
men = func_factory(
"agg",
"a",
name="men",
func=np.mean,
signature=inspect.signature(lambda a: None),
)
x = [1, 2, 3]
out = men(x)
assert out == 2.0
x = Series([1, 2, 3])
out = men(x)
assert out == 2.0
# SeriesGroupBy
men.register(SeriesGroupBy, func="mean")
x = Series([1, 2, 4]).groupby([1, 2, 2])
out = men(x)
assert_iterable_equal(out.index, [1, 2])
assert_iterable_equal(out, [1.0, 3.0])
# SeriesRowwise
df = tibble(x=[1, 2, 4]).rowwise()
out = men(df.x)
assert_iterable_equal(out, df.x.obj)
men.register(SeriesRowwise, func="sum")
out = men(df.x)
assert_iterable_equal(out.index, [0, 1, 2])
assert_iterable_equal(out, [1.0, 2.0, 4.0])
# TibbleRowwise
x = tibble(a=[1, 2, 3], b=[4, 5, 6]).rowwise()
out = men(x)
assert_iterable_equal(out, [2.5, 3.5, 4.5])
# TibbleGrouped
x = tibble(a=[1, 2, 3], b=[4, 5, 5]).group_by("b")
out = men(x)
assert_iterable_equal(out.a, [1.0, 2.5])
def test_varargs_data_args():
@func_factory("agg", {"x", "args[0]"})
def mulsum(x, *args):
return (x + args[0]) * args[1]
out = mulsum([1, 2], 2, 3)
assert_iterable_equal(out, [9, 12])
@func_factory("agg", {"x", "args"})
def mulsum(x, *args):
return x + args[0] + args[1]
out = mulsum([1, 2], [1, 2], [2, 3])
assert_iterable_equal(out, [4, 7])
def test_dataargs_not_exist():
fun = func_factory("agg", "y")(lambda x: None)
with pytest.raises(ValueError):
fun(1)
def test_args_frame():
@func_factory("agg", {"x", "y"})
def frame(x, y, __args_frame=None):
return __args_frame
out = frame(1, 2)
assert_iterable_equal(sorted(out.columns), ["x", "y"])
def test_args_raw():
@func_factory("agg", {"x"})
def raw(x, __args_raw=None):
return x, __args_raw["x"]
outx, rawx = raw(1)
assert isinstance(outx, Series)
assert rawx == 1
def test_apply():
@func_factory("apply", "x")
def rn(x):
return tibble(x=[1, 2, 3])
x = tibble(a=[1, 2], b=[2, 3]).rowwise()
out = rn(x)
assert out.shape == (2,)
assert out.iloc[0].shape == (3, 1)
def test_no_func_registered():
fun = func_factory("agg", "x", func=lambda x: None)
with pytest.raises(ValueError):
fun.register(SeriesGroupBy, func=None, meta=False)
def test_run_error():
@func_factory("agg", "x")
def error(x):
raise RuntimeError
with pytest.raises(ValueError, match="registered function"):
error(1)
def test_series_cat():
@func_factory("agg", "x")
def sum1(x):
return x.sum()
@sum1.register(SeriesCategorical)
def _(x):
return x[0]
out = sum1([1, 2])
assert out == 3
out = sum1(Categorical([1, 2]))
assert out == 1
def test_str_fun():
sum2 = func_factory(
"agg",
"x",
name="sum2",
qualname="sum2",
func="sum",
signature=inspect.signature(lambda x: None),
)
assert sum2([1, 2, 3]) == 6
| 24.050147
| 69
| 0.577333
| 0
| 0
| 0
| 0
| 1,466
| 0.179811
| 0
| 0
| 587
| 0.071998
|
a198bfc5af6a0e4572de99e815bf83c6452a7e36
| 2,234
|
py
|
Python
|
worker/resources/Twitch.py
|
fga-eps-mds/2018.2-GamesBI_Importadores
|
72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae
|
[
"MIT"
] | 1
|
2018-10-25T20:39:16.000Z
|
2018-10-25T20:39:16.000Z
|
worker/resources/Twitch.py
|
fga-eps-mds/2018.2-GamesBI_Importadores
|
72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae
|
[
"MIT"
] | null | null | null |
worker/resources/Twitch.py
|
fga-eps-mds/2018.2-GamesBI_Importadores
|
72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae
|
[
"MIT"
] | 2
|
2018-11-10T16:08:46.000Z
|
2018-11-26T14:06:12.000Z
|
import requests
from functools import reduce
import operator
from urllib.parse import quote
import time
TWITCH_HEADER = {'Client-ID': 'nhnlqt9mgdmkf9ls184tt1nd753472',
'Accept': 'application/json'}
class Twitch(object):
def get_twitch_data(self, game_name):
url = 'https://api.twitch.tv/helix/games?name={}'.format(
quote(game_name))
time.sleep(3)
game_data = requests.get(url, headers=TWITCH_HEADER)
status = game_data.status_code
if status == 200:
ndata = game_data.json()
return self.filter_game_data(ndata)
else:
return {
'total_views': None,
'streams': []
}
def filter_game_data(self, ndata):
total_views = 0
streams = []
if 'data' in ndata:
data = ndata['data']
game_id = None
for info in data:
if 'id' in info:
game_id = info['id']
streams = self.get_streams(game_id)
total_views = 0
if len(streams) != 0:
total_views = reduce(operator.add, [
x['viewer_count'] if x['viewer_count'] != None else 0 for x in streams])
return {
'total_views': total_views,
'streams': streams
}
else:
return {
'total_views': None,
'streams': []
}
def get_streams(self, game_id):
url = 'https://api.twitch.tv/helix/streams?game_id={}'.format(game_id)
time.sleep(3)
stream_data = requests.get(url, headers=TWITCH_HEADER)
status = stream_data.status_code
if status == 200:
ndata = stream_data.json()
return self.filter_stream_data(ndata)
else:
return []
def filter_stream_data(self, ndata):
filtered_data = []
for data in ndata['data']:
keys = ['language', 'game_id',
'started_at', 'type', 'viewer_count']
filtered_data.append(
{key: data[key] if key in data else None for key in keys})
return filtered_data[:2]
| 30.60274
| 105
| 0.527305
| 2,015
| 0.90197
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.148165
|
a198ce3c9c299466d4689e0f835f493506d51e28
| 2,407
|
py
|
Python
|
maas/plugins/neutron_service_check.py
|
claco/rpc-openstack
|
fc5328fd174344d5445132ec8d8973a572aa4a0f
|
[
"Apache-2.0"
] | null | null | null |
maas/plugins/neutron_service_check.py
|
claco/rpc-openstack
|
fc5328fd174344d5445132ec8d8973a572aa4a0f
|
[
"Apache-2.0"
] | null | null | null |
maas/plugins/neutron_service_check.py
|
claco/rpc-openstack
|
fc5328fd174344d5445132ec8d8973a572aa4a0f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from maas_common import get_neutron_client
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
def check(args):
NETWORK_ENDPOINT = 'http://{hostname}:9696'.format(hostname=args.hostname)
try:
neutron = get_neutron_client(endpoint_url=NETWORK_ENDPOINT)
# not gathering api status metric here so catch any exception
except Exception as e:
status_err(str(e))
# gather nova service states
if args.host:
agents = neutron.list_agents(host=args.host)['agents']
else:
agents = neutron.list_agents()['agents']
if len(agents) == 0:
status_err("No host(s) found in the agents list")
# return all the things
status_ok()
for agent in agents:
agent_is_up = True
if agent['admin_state_up'] and not agent['alive']:
agent_is_up = False
if args.host:
name = '%s_status' % agent['binary']
else:
name = '%s_%s_on_host_%s' % (agent['binary'],
agent['id'],
agent['host'])
metric_bool(name, agent_is_up)
def main(args):
check(args)
if __name__ == "__main__":
with print_output():
parser = argparse.ArgumentParser(description='Check neutron agents')
parser.add_argument('hostname',
type=str,
help='Neutron API hostname or IP address')
parser.add_argument('--host',
type=str,
help='Only return metrics for specified host',
default=None)
args = parser.parse_args()
main(args)
| 30.858974
| 78
| 0.624429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 984
| 0.408808
|
a199ff1b2e5c00d402dfeaa1e9dbf8a6d4be69df
| 946
|
py
|
Python
|
integration-test/797-add-missing-boundaries.py
|
nextzen/vector-datasource
|
f11700f232a3a6251915579106ff07b2bee25d12
|
[
"MIT"
] | 1
|
2018-01-03T21:26:27.000Z
|
2018-01-03T21:26:27.000Z
|
integration-test/797-add-missing-boundaries.py
|
nextzen/vector-datasource
|
f11700f232a3a6251915579106ff07b2bee25d12
|
[
"MIT"
] | null | null | null |
integration-test/797-add-missing-boundaries.py
|
nextzen/vector-datasource
|
f11700f232a3a6251915579106ff07b2bee25d12
|
[
"MIT"
] | 1
|
2019-06-19T19:14:42.000Z
|
2019-06-19T19:14:42.000Z
|
from . import FixtureTest
class AddMissingBoundaries(FixtureTest):
def test_statistical(self):
# NE data - no OSM elements
# boundary between NV and CA is _also_ a "statistical" boundary
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_1_states_provinces_lines/'
'797-ne_10m_admin_1_states_provinces_lines-nv-ca.shp',
])
self.assert_has_feature(
7, 21, 49, 'boundaries',
{'kind': 'region'})
def test_statistical_meta(self):
# boundary between MT and ND is _also_ a "statistical meta" boundary
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_1_states_provinces_lines/'
'797-ne_10m_admin_1_states_provinces_lines-mt-nd.shp',
])
self.assert_has_feature(
7, 27, 44, 'boundaries',
{'kind': 'region'})
| 32.62069
| 76
| 0.614165
| 917
| 0.969345
| 0
| 0
| 0
| 0
| 0
| 0
| 466
| 0.4926
|
a19b0023958a3698889f955479e01ea3cfa60e20
| 836
|
py
|
Python
|
flask/app/views.py
|
Ivche1337/Dodgerino-Game
|
17ff7f3f7da4f5801be0f9c606fcd52fb14dfb95
|
[
"MIT"
] | 1
|
2018-01-21T16:24:51.000Z
|
2018-01-21T16:24:51.000Z
|
flask/app/views.py
|
Ivche1337/Dodgerino-Game
|
17ff7f3f7da4f5801be0f9c606fcd52fb14dfb95
|
[
"MIT"
] | 1
|
2018-01-18T04:42:07.000Z
|
2018-01-19T03:52:13.000Z
|
flask/app/views.py
|
Ivche1337/Dodgerino-Game
|
17ff7f3f7da4f5801be0f9c606fcd52fb14dfb95
|
[
"MIT"
] | null | null | null |
import os
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
from app import app
FILE_PATH = "/home/ivche/dev/Dodgerino-Game/highscores.db"
print(FILE_PATH)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+FILE_PATH
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB = SQLAlchemy(app)
class Scores(DB.Model):
name = DB.Column(DB.String)
score = DB.Column(DB.Integer,primary_key=True)
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'Ivche'}
return render_template('index.html',
user=user)
@app.route('/highscores')
def highscores():
result = Scores.query.all()
return render_template('highscores.html',
result = result)
@app.route('/about')
def about():
return render_template('about.html')
| 23.885714
| 62
| 0.674641
| 106
| 0.126794
| 0
| 0
| 401
| 0.479665
| 0
| 0
| 205
| 0.245215
|
a19b10b3dbefe70b02dea663c226b3a10d170161
| 24,076
|
py
|
Python
|
mds/files.py
|
VilledeMontreal/mds-provider
|
f1e70a7dc5a8afa64fd88d0c40e6d02f3da25d05
|
[
"MIT"
] | null | null | null |
mds/files.py
|
VilledeMontreal/mds-provider
|
f1e70a7dc5a8afa64fd88d0c40e6d02f3da25d05
|
[
"MIT"
] | null | null | null |
mds/files.py
|
VilledeMontreal/mds-provider
|
f1e70a7dc5a8afa64fd88d0c40e6d02f3da25d05
|
[
"MIT"
] | null | null | null |
"""
Work with MDS Provider data in JSON files.
"""
import csv
import datetime
import hashlib
import json
import os
import pathlib
import urllib
import requests
import pandas as pd
from .encoding import JsonEncoder
from .providers import Provider
from .schemas import SCHEMA_TYPES, STATUS_CHANGES, TRIPS
from .versions import UnexpectedVersionError, Version
class BaseFile():
"""
Base class for working with Provider files.
"""
def __init__(self, *sources, **kwargs):
"""
Parameters:
sources: str, Path, list, optional
Zero or more paths to track.
"""
self._sources = []
for source in sources:
if isinstance(source, list):
self._sources.extend([self._parse(s) for s in source])
else:
self._sources.append(self._parse(source))
self._sources = list(filter(None, self._sources))
@property
def file_sources(self):
"""
True if this instance references one or more valid file sources.
"""
return all([self._isfile(s) or self._isurl(s) for s in self._sources])
@classmethod
def _isdir(cls, source):
"""
Return True if source is a valid directory that exists.
"""
path = pathlib.Path(source.path)
return not cls._isfile(source) and path.is_dir() and path.exists()
@classmethod
def _isfile(cls, source):
"""
Return True if path is a valid file that exists.
"""
path = pathlib.Path(source.path)
return not cls._isurl(source) and path.is_file() and path.exists()
@classmethod
def _isurl(cls, source):
"""
Return True if source is a valid URL.
"""
return source.scheme in ("http", "https") and source.netloc
@classmethod
def _parse(cls, source):
"""
Parse a data file source argument into an urllib.parse.ParseResult instance.
"""
return urllib.parse.urlparse(str(source)) if source else None
class ConfigFile(BaseFile):
"""
Work with Provider configuration data in JSON files.
"""
def __init__(self, path=None, provider=None, **kwargs):
"""
Parameters:
path: str, Path, optional
A path to a configuration file.
provider: str, UUID, Provider, optional
An identifier (name, id) for a provider; or a Provider instance. Used to key
configuration data in a dict.
"""
super().__init__(path, **kwargs)
self._config_path = None
# did we get a single file path or a provider?
if len(self._sources) == 1 and self._isfile(self._sources[0]):
self._config_path = pathlib.Path(self._sources[0].path)
# read from the config file
if self._config_path:
config = json.load(self._config_path.open())
search = []
# case-insensitive search in config
if isinstance(provider, Provider):
search.extend([
str(provider.provider_id),
provider.provider_name, provider.provider_name.capitalize(),
provider.provider_name.lower(), provider.provider_name.upper()
])
elif provider:
search.extend([
provider, provider.lower(), provider.capitalize(), provider.upper()
])
for s in set(search):
if s in config:
config = config.pop(s)
break
for k,v in config.items():
setattr(self, k, v)
# set default attributes
else:
defaults = [("auth_type", "Bearer"), ("headers", {}), ("version", Version.mds_lower()), ("mds_api_suffix", None)]
for _field, _default in defaults:
setattr(self, _field, _default)
# finally, set from keyword args
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return f"<mds.files.ConfigFile ('{self._config_path}')>"
def dump(self, path=None, provider=None, **kwargs):
"""
Convert this instance back into a configuration dict.
Parameters:
path: str, Path, optional
The path to write the configuration data.
provider: str, UUID, Provider, optional
An identifier (name, id) for a provider; or a Provider instance. Used to key
configuration data in a dict.
Additional keyword arguments are passed-through to json.dump().
Return:
dict
With no path information, return a dict of configuration.
ConfigFile
With path information, dump configuration to file path and return this instance.
"""
dump = vars(self)
if provider:
if isinstance(provider, Provider):
provider = provider.provider_name
dump = dict([(provider, dump)])
if path:
json.dump(dump, pathlib.Path(path).open("w"), cls=JsonEncoder, **kwargs)
return self
return dump
class DataFile(BaseFile):
"""
Work with Provider payload data in JSON files.
"""
def __init__(self, record_type=None, *sources, **kwargs):
"""
Parameters:
record_type: str, optional
The type of MDS Provider record ("status_changes" or "trips") to use by default.
sources: str, Path, list, optional
One or more paths to (directories containing) MDS payload (JSON) files to read by default.
Directories are expanded such that all corresponding files within are read.
file_name: str, callable(record_type=str, payloads=list, extension=str, [payload=dict]): str, optional
A str name for the file; or a function receiving record_type, list of payloads,
file extension, and optionally a single payload being written, and returns the str
name for the file.
ls: callable(sources=list): list, optional
A function that receives a list of urllib.parse.ParseResult, and returns the
complete list of file Path objects and URL str to be read.
"""
super().__init__(*sources, **kwargs)
self.record_type = None
if record_type:
if record_type in SCHEMA_TYPES:
self.record_type = record_type
else:
self._sources.append(self._parse(record_type))
file_name = kwargs.get("file_name", self._filename)
if isinstance(file_name, str):
self.file_name = lambda **kwargs: file_name
else:
self.file_name = file_name
self.ls = kwargs.get("ls", self._ls)
def __repr__(self):
return "".join((
f"<mds.files.DataFile (",
", ".join([f"'{s}'" for s in [self.record_type]]),
")>"
))
def _default_dir(self):
"""
Get a default Path object for dumping data files.
"""
dirs = [s.path for s in self._sources if self._isdir(s)]
return pathlib.Path(dirs[0]) if len(dirs) == 1 else pathlib.Path(".")
def _record_type_or_raise(self, record_type):
"""
Get a valid record_type or raise an exception.
"""
record_type = record_type or self.record_type
if record_type in SCHEMA_TYPES:
return record_type
raise ValueError(f"A valid record type must be specified. Got {record_type}")
def dump_payloads(self, record_type=None, *payloads, **kwargs):
"""
Write MDS Provider payloads to JSON files.
Parameters:
record_type: str, optional
The type of MDS Provider record ("status_changes" or "trips").
payloads: dict, iterable
One or more MDS Provider payload dicts to write.
output_dir: str, Path, optional
The directory to write the files.
If this instance was initialized with a single directory source, use that by default.
Otherwise, use the current directory by default.
file_name: str, callable(record_type=str, payloads=list, extension=str, [payload=dict]): str, optional
A str name for the file; or a function receiving record_type, list of payloads,
file extension, and optionally a single payload being written, and returns the str
name for the file.
single_file: bool, optional
True (default) to write the payloads to a single file using the appropriate data structure.
False to write each payload as a dict to its own file.
Additional keyword arguments are passed through to json.dump().
Return:
Path
With single_file=True, the Path object pointing to the file that was written.
With single_file=False, the Path object pointing to the directory where files were written.
None if no files were written.
"""
sources = []
# marker indicates if the original incoming source was just a single dict
dict_source = False
# not a true record_type, but a data source
if record_type and record_type not in SCHEMA_TYPES:
if isinstance(record_type, dict):
sources.append(record_type)
dict_source = True
elif isinstance(record_type, list):
sources.extend(record_type)
elif isinstance(record_type, tuple):
sources.extend(list(record_type))
record_type = None
record_type = record_type or self.record_type
# convert payloads to a flat list of dicts
if isinstance(payloads, tuple) and len(payloads) == 1:
payloads = payloads[0]
if isinstance(payloads, dict):
payloads = [payloads]
dict_source = True
if not isinstance(payloads, list):
payloads = list(payloads)
sources.extend(payloads)
# filter payloads with non-matching record_type
if record_type in SCHEMA_TYPES:
sources = [p for p in sources if record_type in p["data"]]
if len(sources) == 0:
return None
output_dir = pathlib.Path(kwargs.pop("output_dir", self._default_dir()))
single_file = kwargs.pop("single_file", True)
file_name = kwargs.pop("file_name", self.file_name)
if isinstance(file_name, str):
orig_file_name = file_name
file_name = lambda **kwargs: orig_file_name
output_dir.mkdir(parents=True, exist_ok=True)
if single_file:
version = sources[0]["version"]
encoder = JsonEncoder(date_format="unix", version=version, **kwargs)
# generate a file name for the list of payloads
fname = file_name(record_type=record_type, payloads=sources, extension=".json")
print(fname)
path = pathlib.Path(output_dir, fname)
# dump the single payload or a list of payloads
if dict_source and len(sources) == 1:
path.write_text(encoder.encode(sources[0]))
else:
path.write_text(encoder.encode(sources))
return path
# multi-file
for payload in sources:
version = payload["version"]
encoder = JsonEncoder(date_format="unix", version=version, **kwargs)
# generate a file name for this payload
fname = file_name(record_type=record_type, payloads=sources, extension=".json", payload=payload)
path = pathlib.Path(output_dir, fname)
if sources.index(payload) > 0 and path.exists():
# increment the file number
n = str(sources.index(payload))
# pad with number of zeros based on how many items in the list
nz = len(str(len(sources)))
path = pathlib.Path(str(path).replace(".json", f"_{n.zfill(nz)}.json"))
# dump the payload dict
path.write_text(encoder.encode(payload))
return output_dir
def load_dataframe(self, record_type=None, *sources, **kwargs):
"""
Reads the contents of MDS payload files into tuples of (Version, DataFrame).
Parameters:
record_type: str, optional
The type of MDS Provider record ("status_changes" or "trips").
sources: str, list, optional
One or more paths to (directories containing) MDS payload (JSON) files.
Directories are expanded such that all corresponding files within are read.
flatten: bool, optional
True (default) to flatten the final result from all sources into a single tuple.
False to keep each result separate.
headers: dict, optional
A dict of headers to send with requests made to URL paths.
Could also be a dict mapping an URL path to headers for that path.
ls: callable(sources=list): list, optional
A function that receives a list of urllib.parse.ParseResult, and returns the
complete list of file Path objects and URL str to be read.
Raise:
UnexpectedVersionError
When flatten=True and a version mismatch is found amongst the data.
ValueError
When neither record_type or instance.record_type is specified.
Return:
tuple (Version, DataFrame)
With flatten=True, a (Version, DataFrame) tuple.
list
With flatten=False, a list of (Version, DataFrame) tuples with length equal to the
total number of payloads across all sources.
"""
record_type = self._record_type_or_raise(record_type)
flatten = kwargs.pop("flatten", True)
# obtain unmodified records
kwargs["flatten"] = False
records = self.load_records(record_type, *sources, **kwargs)
if len(records) == 0:
return records
version = Version(records[0][0])
if flatten:
if not all([Version(v) == version for v,_ in records]):
unexpected = [Version(v) for v,_ in records if Version(v) != version][0]
raise UnexpectedVersionError(unexpected, version)
# combine each record list
records = [item for _,data in records for item in data]
return version, pd.DataFrame.from_records(records)
else:
# list of version, DataFrame tuples
return [(Version(r[0]), pd.DataFrame.from_records(r[1])) for r in records]
def load_payloads(self, record_type=None, *sources, **kwargs):
"""
Reads the contents of MDS payload files.
Parameters:
record_type: str, optional
The type of MDS Provider record ("status_changes" or "trips").
By default get payloads of each type.
sources: str, Path, list, optional
One or more paths to (directories containing) MDS payload (JSON) files.
Directories are expanded such that all corresponding files within are read.
URLs pointing to JSON files are also supported.
flatten: bool, optional
True (default) to flatten the final result from all sources into a list of dicts.
False to keep each result as-is from the source.
headers: dict, optional
A dict of headers to send with requests made to URL paths.
Could also be a dict mapping an URL path to headers for that path.
ls: callable(sources=list): tuple (files: list, urls: list), optional
A function that receives a list of urllib.parse.ParseResult, and returns
a tuple of a list of valid files, and a list of valid URLs to be read from.
Additional keyword arguments are passed through to json.load().
Raise:
IndexError
When no sources have been specified.
Return:
list
With a single file source, or multiple sources and flatten=True, a list of Provider payload dicts.
With multiple sources and flatten=False, a list of the raw contents of each file.
"""
sources = [self._parse(s) for s in sources]
# record_type is not a schema type, but a data source
if record_type and record_type not in SCHEMA_TYPES:
sources.append(self._parse(record_type))
record_type = None
if len(sources) == 0:
sources.extend(self._sources)
if len(sources) == 0:
raise IndexError("There are no sources to read from.")
record_type = record_type or self.record_type
flatten = kwargs.pop("flatten", True)
headers = kwargs.pop("headers", {})
# obtain a list of file Paths and URL str to read
ls = kwargs.pop("ls", self.ls)
files, urls = ls(sources)
# load from each file/URL pointer into a composite list
data = []
data.extend([json.loads(f.read_text(), **kwargs) for f in files])
data.extend([requests.get(u, headers=headers.get(u, headers)).json() for u in urls])
# filter out payloads with non-matching record_type
if record_type:
filtered = []
for payload in data:
if isinstance(payload, list):
filtered.extend(filter(lambda p: record_type in p["data"], payload))
elif "data" in payload and record_type in payload["data"]:
filtered.append(payload)
data = filtered
# flatten any sublists
if flatten:
flattened = []
for payload in data:
if isinstance(payload, list):
flattened.extend(payload)
else:
flattened.append(payload)
data = flattened
return data
def load_records(self, record_type=None, *sources, **kwargs):
"""
Reads the contents of MDS payload files into tuples of (Version, list).
Parameters:
record_type: str, optional
The type of MDS Provider record ("status_changes" or "trips").
sources: str, optional
One or more paths to (directories containing) MDS payload (JSON) files.
flatten: bool, optional
True (default) to flatten the final result from all sources into a single list.
False to keep each result separate.
headers: dict, optional
A dict of headers to send with requests made to URL paths.
Could also be a dict mapping an URL path to headers for that path.
ls: callable(sources=list): list, optional
A function that receives a list of urllib.parse.ParseResult, and returns the
complete list of file Path objects and URL str to be read.
Raise:
UnexpectedVersionError
When flatten=True and a version mismatch is found amongst the data.
ValueError
When neither record_type or instance.record_type is provided.
Return:
tuple (Version, list)
With flatten=True, a (Version, list) tuple.
list
With flatten=False, a list of (Version, list) tuples with length equal to the
total number of payloads across all sources.
"""
record_type = self._record_type_or_raise(record_type)
flatten = kwargs.pop("flatten", True)
# obtain unmodified payloads
kwargs["flatten"] = False
payloads = self.load_payloads(record_type, *sources, **kwargs)
if len(payloads) < 1:
return payloads
# get the version from the initial payload
if isinstance(payloads[0], list):
version = Version(payloads[0][0]["version"])
else:
version = Version(payloads[0]["version"])
# collect versions and data from each payload
_payloads = []
for payload in payloads:
if not isinstance(payload, list):
payload = [payload]
for page in payload:
_payloads.append((page["version"], page["data"][record_type]))
if flatten:
if not all([Version(v) == version for v,_ in _payloads]):
# find the first non-matching version and raise
unexpected = [Version(v) for v,_ in _payloads if Version(v) != version][0]
raise UnexpectedVersionError(unexpected, version)
# return the version, records tuple
return version, [item for _,data in _payloads for item in data]
else:
# list of version, records tuples
return [(Version(r[0]), r[1]) for r in _payloads]
@classmethod
def _filename(cls, **kwargs):
"""
Generate a filename from the given parameters.
"""
record_type = kwargs.get("record_type", None)
payloads = kwargs.get("payloads", [])
extension = kwargs.get("extension", ".json")
payload = kwargs.get("payload", None)
# is there a single record_type in these payloads that we should use?
record_types = set([list(p["data"].keys())[0] for p in payloads])
if record_type is None and len(record_types) == 1:
record_type = record_types.pop()
# no record_type specified, generate filename from payload hash
if record_type is None:
data = json.dumps(payload or payloads).encode()
shadigest = hashlib.sha256(data).hexdigest()
return f"{shadigest[0:7]}{extension}"
# find time boundaries from the data
time_key = "event_time" if record_type == STATUS_CHANGES else "start_time"
times = [d[time_key] for p in payloads for d in p["data"][record_type]]
if all([isinstance(t, datetime.datetime) for t in times]):
start = min(times)
end = max(times)
else:
try:
start = datetime.datetime.utcfromtimestamp(int(min(times)))
end = datetime.datetime.utcfromtimestamp(int(max(times)))
except:
start = datetime.datetime.utcfromtimestamp(int(min(times)) / 1000.0)
end = datetime.datetime.utcfromtimestamp(int(max(times)) / 1000.0)
# clip to hour of day, offset if they are the same
start = datetime.datetime(start.year, start.month, start.day, start.hour)
end = datetime.datetime(end.year, end.month, end.day, end.hour)
if start == end:
end = end + datetime.timedelta(hours=1)
fmt = "%Y%m%dT%H0000Z"
providers = set([d["provider_name"] for p in payloads for d in p["data"][record_type]])
return f"{'_'.join(providers)}_{record_type}_{start.strftime(fmt)}_{end.strftime(fmt)}{extension}"
@classmethod
def _ls(cls, sources):
"""
Create a tuple of lists of valid file Paths and URLs from a list of urllib.parse.ParseResult.
"""
# separate into files and directories and urls
files = [pathlib.Path(f.path) for f in sources if cls._isfile(f)]
dirs = [pathlib.Path(d.path) for d in sources if cls._isdir(d)]
urls = [urllib.parse.urlunparse(u) for u in sources if cls._isurl(u)]
# expand into directories
files.extend([f for ls in [d.glob("*.json") for d in dirs] for f in ls])
return files, urls
| 37.560062
| 125
| 0.590713
| 23,707
| 0.984674
| 0
| 0
| 3,770
| 0.156587
| 0
| 0
| 11,508
| 0.477986
|
a19dcdf3a1a9976de17738ed277080bb753f9bd2
| 7,600
|
py
|
Python
|
App/neon_ann_stitch.py
|
weecology/NEON_crown_maps
|
2da84d36ae5af44631a6d0489ccb29b212f83fd8
|
[
"MIT"
] | null | null | null |
App/neon_ann_stitch.py
|
weecology/NEON_crown_maps
|
2da84d36ae5af44631a6d0489ccb29b212f83fd8
|
[
"MIT"
] | 34
|
2020-01-30T05:44:47.000Z
|
2021-02-08T22:51:57.000Z
|
App/neon_ann_stitch.py
|
weecology/NEON_crown_maps
|
2da84d36ae5af44631a6d0489ccb29b212f83fd8
|
[
"MIT"
] | null | null | null |
import os
import rasterio
import argparse
from PIL import Image
import subprocess
import pathlib
import shutil
from glob import glob
from numba import njit, prange
from OpenVisus import *
### Configuration
ext_name = ".tif"
dtype = "uint8[3]"
limit = 1000
###--------------
@njit(parallel=True)
def blend_rgb_ann(a, b):
#a[b[b>0]] = [255,0,0]
for i in prange(a[0].shape[0]):
for j in prange(a[0].shape[1]):
if(b[i][j] > 0):
a[0][i][j]=255
a[1][i][j]=0
a[2][i][j]=0
class tile():
def __init__(self,path,name):
self.path = path
self.name = name
self.frame = [0,0,0,0]
self.size = [0,0]
parser = argparse.ArgumentParser(description='Parse set of geotiff')
parser.add_argument('-rgb', type=str, nargs = 1, help ='rbg image path', required = True)
parser.add_argument('-ann', type=str, nargs = 1, help ='ann image path', required = False)
parser.add_argument('-out', type=str, nargs = 1, help ='output name', required = True)
args = parser.parse_args()
rgb_dir = args.rgb[0]
outdir = args.out[0]
pathlib.Path(outdir+"/temp").mkdir(parents=True, exist_ok=True)
outname = outdir.split("/")[-1]
if(outname==""):
outname = outdir.split("/")[-2]
if(args.ann):
ann_dir = args.ann[0]
# Blend rgb and annotations
for f in os.listdir(rgb_dir):
if f.endswith(ext_name):
rgb_path=rgb_dir+"/"+f
ann_path=ann_dir+"/"+f.replace("image.tif", "image_rasterized.tif")
ageo = rasterio.open(rgb_path)
a = ageo.read()
bgeo = rasterio.open(ann_path)
b = bgeo.read()
print("Blending ", rgb_path, "and", ann_path, "...")
blend_rgb_ann(a, b[0])
#tiff.imsave(outdir+"/"+f,a)
with rasterio.open(
outdir+"/"+f,
'w',
driver='GTiff',
height=ageo.height,
width=ageo.width,
count=3,
dtype=a.dtype,
crs='+proj=latlong',
transform=ageo.transform,
) as dst:
dst.write(a)
idir = outdir
else:
idir = rgb_dir
# Convert and stitch
images = []
for f in os.listdir(idir):
if f.endswith(ext_name):
filepath=idir+"/"+f
s = os.path.basename(f)
# filepath = filepath.replace('(','\(')
# filepath = filepath.replace(')','\)')
images.append(tile(filepath,s))
bbox = [99999999, 0, 99999999, 0]
count = 0
for img in images:
if count > limit:
break
count += 1
try:
ds = rasterio.open(img.path)
width = ds.width
height = ds.height
bounds = ds.bounds
except:
print("ERROR: metadata failure, skipping "+idir)
minx = bounds.left
miny = bounds.top
maxx = bounds.right
maxy = bounds.bottom
img.frame = [minx, maxx, miny, maxy]
img.size = [width, height]
#print("found gdal data", gt, "size", [height, width], "frame", [minx, maxx, miny, maxy], "psize", [maxx-minx, maxy-miny])
print("frame", img.frame)#, "psize", [(maxx-minx)/width, (maxy-miny)/height])
if(minx < bbox[0]):
bbox[0] = minx
if(miny < bbox[2]):
bbox[2] = miny
if(maxx > bbox[1]):
bbox[1] = maxx
if(maxy > bbox[3]):
bbox[3] = maxy
ratio=[(maxx-minx)/width,(maxy-miny)/height]
out_size = [bbox[1]-bbox[0], bbox[3]-bbox[2]]
img_size = [int(out_size[0]/ratio[0]), int(out_size[1]/ratio[1])]
gbox = "0 "+str(img_size[0]-1)+" 0 "+str(img_size[1]-1)
midx_name=outdir+"/global.midx"
midx_out = open(midx_name,"wt")
midx_out.write("<dataset typename='IdxMultipleDataset'>\n")
midx_out.write('<field name="voronoi">\n <code>output=voronoi()</code>\n</field>')
cwd = os.getcwd()
count = 0
for img in images:
if count > limit:
break
count += 1
lbox = "0 "+str(img.size[0]-1)+" 0 "+str(img.size[1]-1)
ancp = [int((img.frame[0]-bbox[0])/ratio[0]), int((img.frame[2]-bbox[2])/ratio[1])]
#print(ancp)
dbox = str(ancp[0])+ " " +str(ancp[0]+img.size[0]-1)+ " "+str(ancp[1])+ " "+str(ancp[1]+img.size[1]-1)
#midx_out.write('\t<dataset url="file://'+outdir+"/"+img.name+'exp.idx" name="'+img.name+'"> <M><translate x="'+str(ancp[0])+'" y="'+str(ancp[1])+'"/></M> </dataset>\n')
midx_out.write('\t<dataset url="file://'+outdir+"/"+img.name+'exp.idx" name="'+img.name+'" offset="'+str(ancp[0])+' '+str(ancp[1])+'"/>\n')
exp_idx = outdir+"/"+img.name+"exp.idx"
field=Field("data",dtype,"row_major")
CreateIdx(url=exp_idx,dims=img.size,fields=[field])
db=PyDataset(exp_idx)
#convertCommand(["create", exp_idx, "--box", lbox, "--fields", 'data '+dtype,"--time","0 0 time%03d/"])
#convert.runFromArgs(["create", exp_idx, "--box", lbox, "--fields", 'data '+dtype,"--time","0 0 time%03d/"])
print("Converting "+str(count)+"/"+str(min(limit, len(images)))+"...")
data=numpy.asarray(Image.open(img.path))
db.write(data)
#convertCommand(["import",img.path,"--dims",str(img.size[0]),str(img.size[1])," --dtype ",dtype,"--export",exp_idx," --box ",lbox, "--time", "0"])
#convert.runFromArgs(["import",img.path,"--dims",str(img.size[0]),str(img.size[1])," --dtype ",dtype,"--export",exp_idx," --box ",lbox, "--time", "0"])
midx_out.write('</dataset>')
midx_out.close();
print("Done conversion of tiles, now generating final mosaic")
def midxToIdx(filename, filename_idx):
field="output=voronoi()"
# in case it's an expression
tile_size=int(eval("4*1024"))
DATASET = LoadIdxDataset(filename)
FIELD=DATASET.getFieldByName(field)
TIME=DATASET.getDefaultTime()
Assert(FIELD.valid())
# save the new idx file
idxfile=DATASET.idxfile
idxfile.filename_template = "" # //force guess
idxfile.time_template = "" #force guess
idxfile.fields.clear()
idxfile.fields.push_back(Field("DATA", dtype, "rowmajor")) # note that compression will is empty in writing (at the end I will compress)
idxfile.save(filename_idx)
dataset = LoadIdxDataset(filename_idx)
Assert(dataset)
field=dataset.getDefaultField()
time=dataset.getDefaultTime()
Assert(field.valid())
ACCESS = DATASET.createAccess()
access = dataset.createAccess()
print("Generating tiles...",tile_size)
TILES = DATASET.generateTiles(tile_size)
TOT_TILES=TILES.size()
T1 = Time.now()
for TILE_ID in range(TOT_TILES):
TILE = TILES[TILE_ID]
t1 = Time.now()
buffer = DATASET.readFullResolutionData(ACCESS, FIELD, TIME, TILE)
msec_read = t1.elapsedMsec()
if not buffer:
continue
t1 = Time.now()
dataset.writeFullResolutionData(access, field, time, buffer, TILE)
msec_write = t1.elapsedMsec()
print("done", TILE_ID, "/", TOT_TILES, "msec_read", msec_read, "msec_write", msec_write)
#dataset.compressDataset("jpg-JPEG_QUALITYGOOD-JPEG_SUBSAMPLING_420-JPEG_OPTIMIZE")
#dataset.compressDataset("jpg-JPEG_QUALITYSUPERB-JPEG_SUBSAMPLING_420-JPEG_OPTIMIZE")
#dataset.compressDataset("jpg-JPEG_QUALITYSUPERB-JPEG_SUBSAMPLING_444-JPEG_OPTIMIZE")
#dataset.compressDataset("jpg-JPEG_QUALITYGOOD-JPEG_SUBSAMPLING_444-JPEG_OPTIMIZE")
# Make one big photomosaic
midxToIdx(os.path.abspath(midx_name), os.path.abspath(outdir+"/"+outname+".idx"))
# moving clutter to "outdir/temp" folder
for f in glob.glob(outdir+"/*tifexp*"):
subprocess.run(["mv",f,outdir+"/temp/"])
for f in glob.glob(outdir+"/*.tif"):
subprocess.run(["mv",f,outdir+"/temp/"])
subprocess.run(["mv",outdir+"/global.midx",outdir+"/temp/"])
# delete temp folder at the end
#subprocess.run(["rm","-R", outdir+"/temp"])
print("DONE")
| 30.522088
| 172
| 0.619342
| 142
| 0.018684
| 0
| 0
| 244
| 0.032105
| 0
| 0
| 2,505
| 0.329605
|
a19de4fc6f1c20cd12d2dfef53eca7293ca3f561
| 38
|
py
|
Python
|
scooby/plugins/processtime/__init__.py
|
zetaab/django-scooby-profiler
|
c4e63b5751a7aec2b01df3b46368c6ad40ec51e3
|
[
"MIT"
] | 9
|
2018-09-20T16:45:40.000Z
|
2021-08-08T07:04:55.000Z
|
scooby/plugins/processtime/__init__.py
|
zetaab/django-scooby-profiler
|
c4e63b5751a7aec2b01df3b46368c6ad40ec51e3
|
[
"MIT"
] | 7
|
2018-09-14T10:34:37.000Z
|
2019-04-20T06:54:29.000Z
|
scooby/plugins/processtime/__init__.py
|
zetaab/django-scooby-profiler
|
c4e63b5751a7aec2b01df3b46368c6ad40ec51e3
|
[
"MIT"
] | 3
|
2018-09-14T10:39:51.000Z
|
2019-06-26T09:32:13.000Z
|
from .plugin import ProcessTimePlugin
| 19
| 37
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a19e03506530c3d0c99934eb6006220cb01ea229
| 3,972
|
py
|
Python
|
data_creation/generate_cosmology_data.py
|
kstoreyf/emu-fight
|
2b2c538619f0e5ff7192d83f31346bb25b7ca41e
|
[
"MIT"
] | 3
|
2020-09-11T01:55:40.000Z
|
2020-11-24T00:49:17.000Z
|
data_creation/generate_cosmology_data.py
|
kstoreyf/emu-fight
|
2b2c538619f0e5ff7192d83f31346bb25b7ca41e
|
[
"MIT"
] | 9
|
2020-09-02T09:21:49.000Z
|
2020-09-09T19:15:44.000Z
|
data_creation/generate_cosmology_data.py
|
kstoreyf/emu-fight
|
2b2c538619f0e5ff7192d83f31346bb25b7ca41e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Created on Tue Sep 1 2020
@author: kstoreyf
"""
import numpy as np
import nbodykit
import pandas as pd
import pickle
from nbodykit import cosmology
def main():
save_fn = '../data/cosmology_train.pickle'
x = generate_training_parameters(n_train=1000)
y, extra_input = generate_data(x)
input_data, output_data = format_data(x, y,
objs_id=None)
data_to_save = make_data_to_save(input_data, output_data,
extra_input)
save_data(data_to_save, save_fn)
save_fn = '../data/cosmology_test.pickle'
x = generate_testing_parameters(n_test=100)
y, extra_input = generate_data(x)
input_data, output_data = format_data(x, y,
objs_id=None)
data_to_save = make_data_to_save(input_data, output_data,
extra_input)
save_data(data_to_save, save_fn)
# Generate the parameters that govern the output training set data
def generate_training_parameters(n_train=1000):
n_params = 3
n_points = n_train**(1./float(n_params))
assert abs(round(n_points) - n_points) < 1e-12, f"n_train must be a power of {n_params} because we're making a high-dimensional grid."
n_points = round(n_points)
omega_m = np.linspace(0.26, 0.34, n_points)
sigma_8 = np.linspace(0.7, 0.95, n_points)
omega_b = np.linspace(0.038, 0.058, n_points)
grid = np.meshgrid(omega_m, sigma_8, omega_b)
# x has shape (n_params, n_train), where n_train = n_points**n_params
x = np.array([grid[p].flatten() for p in range(n_params)])
return x
# Generate the parameters that govern the output testing set data
def generate_testing_parameters(n_test=100):
omega_m = random_between(0.26, 0.34, n_test)
sigma_8 = random_between(0.7, 0.95, n_test)
omega_b = random_between(0.038, 0.058, n_test)
# x has shape (n_params, n_test)
x = np.array([omega_m, sigma_8, omega_b])
return x
def random_between(xmin, xmax, n):
return np.random.rand(n)*(xmax-xmin)+xmin
# Generate the output data that we're interested in emulating
def generate_data(x):
redshift = 0.0
r_vals = np.linspace(50, 140, 10)
extra_input = {'redshift': redshift, 'r_vals': r_vals}
n_data = x.shape[1]
y = np.empty((len(r_vals), n_data))
for i in range(n_data):
print(i)
om, s8, ob = x[:,i]
ocdm = om - ob
m_ncdm = [] #no massive neutrinos
cosmo = cosmology.Cosmology(Omega0_b=ob, Omega0_cdm=ocdm, m_ncdm=m_ncdm)
cosmo = cosmo.match(sigma8=s8)
plin = cosmology.LinearPower(cosmo, redshift, transfer='EisensteinHu')
cf = cosmology.correlation.CorrelationFunction(plin)
y[:,i] = cf(r_vals)
return y, extra_input
# Format data into pandas data frames
def format_data(x_input, y_output, objs_id=None):
number_objs = len(x_input[0,:])
number_outputs = len(y_output[:,0])
if objs_id is None:
objs_id = [f'obj_{i}'for i in np.arange(number_objs)]
input_data = pd.DataFrame()
input_data['object_id'] = objs_id
input_data[r'$\Omega_m$'] = x_input[0,:]
input_data[r'$\sigma_8$'] = x_input[1,:]
input_data[r'$\Omega_b$'] = x_input[2,:]
output_data = pd.DataFrame()
output_data['object_id'] = objs_id
for i in np.arange(number_outputs):
output_data[r'$\xi(r_{})$'.format(i)] = y_output[i, :]
return input_data, output_data
# Format the data to save it
def make_data_to_save(input_data, output_data, extra_input=None):
data_to_save = {'input_data': input_data,
'output_data': output_data}
if extra_input is not None:
data_to_save['extra_input'] = extra_input
return data_to_save
# Save the data to a file
def save_data(data, save_fn):
with open(save_fn, 'wb') as f:
pickle.dump(data, f, protocol=3)
if __name__=='__main__':
main()
| 32.557377
| 139
| 0.651561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 797
| 0.200655
|
a19e65a3cf25b4afaeb7f38c8c02fdf3601144bc
| 1,256
|
py
|
Python
|
handlers/checkers/highway/track.py
|
n0s0r0g/perfect_OSM
|
d07fef525865a337d8d9bd3e8168cf6b411a182b
|
[
"MIT"
] | 4
|
2016-04-03T21:12:57.000Z
|
2016-05-04T09:14:43.000Z
|
handlers/checkers/highway/track.py
|
n0s0r0g/perfect_OSM
|
d07fef525865a337d8d9bd3e8168cf6b411a182b
|
[
"MIT"
] | null | null | null |
handlers/checkers/highway/track.py
|
n0s0r0g/perfect_OSM
|
d07fef525865a337d8d9bd3e8168cf6b411a182b
|
[
"MIT"
] | null | null | null |
from handlers.simplehandler import SimpleHandler
_NO_SURFACE = {
'title': 'Не указано покрытие дороги',
'help_text': """Для highway=track не задано покрытие (surface).
Объективные параметры:
- surface - тип покрытия
Субьективные параметры:
- surface:grade - оценка качества относительно типа покрытия (0..3)
- smoothness - абсолютное качество покрытия
- maxspeed:practical - скорость легкового автомобиля, с которой комфортно ехать
- tracktype
Ссылки по теме:
- http://wiki.openstreetmap.org/wiki/RU:Tag:highway%3Dtrack
- http://wiki.openstreetmap.org/wiki/RU:Key:surface
- http://wiki.openstreetmap.org/wiki/RU:Proposed_features/Surface_Quality
- http://wiki.openstreetmap.org/wiki/User:Danidin9/Variants_of_smooth_surfaces
""",
}
class HighwayTrackChecker(SimpleHandler):
def __init__(self):
self._no_surface = []
def process(self, obj):
if obj.get('highway') == 'track' and obj['@type'] == 'way':
if not 'surface' in obj:
self._no_surface.append(obj['@id'])
def finish(self, issues):
issues.add_issue_type('todo/highway/track/no_surface', _NO_SURFACE)
for way_id in self._no_surface:
issues.add_issue_obj('todo/highway/track/no_surface', 'way', way_id)
| 33.052632
| 80
| 0.713376
| 509
| 0.345085
| 0
| 0
| 0
| 0
| 0
| 0
| 988
| 0.669831
|
a19fbb8c0d58c560088872b36cde005f0cdcc5c0
| 9,636
|
py
|
Python
|
job_title_processing/ressources_txt/FR/cleaner/job.py
|
OnlineJobVacanciesESSnetBigData/JobTitleProcessing_FR
|
d5cf340e1a57d84562705a92b213333875be21f7
|
[
"MIT"
] | 3
|
2020-10-25T17:44:50.000Z
|
2021-12-11T22:28:18.000Z
|
job_title_processing/ressources_txt/FR/cleaner/job.py
|
OnlineJobVacanciesESSnetBigData/JobTitleProcessing_FR
|
d5cf340e1a57d84562705a92b213333875be21f7
|
[
"MIT"
] | null | null | null |
job_title_processing/ressources_txt/FR/cleaner/job.py
|
OnlineJobVacanciesESSnetBigData/JobTitleProcessing_FR
|
d5cf340e1a57d84562705a92b213333875be21f7
|
[
"MIT"
] | 1
|
2020-11-19T12:44:25.000Z
|
2020-11-19T12:44:25.000Z
|
# -*- coding: utf-8 -*-
jobwords = [
'nan',
'temps plein', 'temps complet', 'mi temps', 'temps partiel', # Part / Full time
'cherche', # look for
'urgent','rapidement', 'futur',
'job', 'offre', # Job offer
'trice', 'ère', 'eur', 'euse', 're', 'se', 'ème', 'trices', # Female endings
'ères', 'eurs', 'euses', 'res', 'fe', 'fes',# Female endings
've', 'ne', 'iere', 'rice', 'te', 'er', 'ice',
'ves', 'nes', 'ieres', 'rices', "tes", 'ices', # Female endings
'hf', 'fh', # Male/Female, Female/Male
'semaine', 'semaines', 'sem',
'h', 'heure', 'heures', 'hebdo', 'hebdomadaire', # Time (week, hour)
'année', 'mois', 'an', # Year
'jour', 'jours', # Day
'été', 'automne', 'hiver', 'printemps', # summer, winter ...
'lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi', 'dimanche', # Week day
'janvier', 'février', 'mars', 'avril', 'mai', 'juin', # Month
'juillet', 'aout', 'septembre', 'octobre', 'novembre', 'décembre',
"deux", "trois", "quatre", "cinq", "six", "sept", # Number
"huit", "neuf", "dix", "onze", # Number
"euros", "euro", "dollars", "dollar", # Money
"super", # Pour éviter "super poids lourd"
# To clean
'caces', 'cap', 'bts', 'dea', 'diplôme', 'bac',
"taf", "ref", "poste", "pourvoir", "sein", "profil",
"possible",
'indépendant',
'saisonnier', 'alternance', 'alternant', 'apprenti',
'apprentissage', 'stagiaire', 'étudiant', 'fonctionnaire',
'intermittent', 'élève', 'freelance', "professionnalisation",
'partiel', 'cdd', 'cdi', 'contrat', 'pro',
"fpe", # Fonction publique d'état
'débutant', 'expérimenté', 'junior', 'senior',
'confirmé', 'catégorie',
'trilingue', 'bilingue',
'bi','international', 'france', 'national', 'régional',
'européen', 'emploi', 'non',
'exclusif', 'uniquement',
'permis', 'ssiap', 'bnssa',
]
job_replace_infirst = {
'3 d' : 'troisd',
'3d':'troisd',
'2 d': 'deuxd',
'2d':'deuxd',
'b to b': 'btob'
}
job_lemmas_expr = {
'cours particulier' : 'professeur',
'call center' : 'centre appels',
'vl pl vu' : 'poids lourd',
'front end' : 'informatique',
'back end' : 'informatique',
'homme femme' : '',
'femme homme' : ''
}
job_normalize_map = [
("indu", "industriel"),
("pl","poids lourd"),
("spl","poids lourd"),
("sav","service après vente"),
("unix","informatique"),
("windows","informatique"),
("php","informatique"),
("java","informatique"),
("python","informatique"),
("jee","informatique"),
("sap","informatique"),
("abap","informatique"),
("ntic","informatique"),
# ("c","informatique"),
("rh","ressources humaines"),
("vrd","voirie réseaux divers"),
("super poids lourd","poids lourd"),
("adv","administration des ventes"),
("cvv","chauffage climatisation"),
("agt","agent"),
("ash","agent des services hospitaliers"),
("ibode","infirmier de bloc opératoire"),
("aes","accompagnant éducatif et social"),
("ads","agent de sécurité"),
("amp","aide médico psychologique"),
("asvp","agent de surveillance des voies publiques"),
("cesf","conseiller en économie sociale et familiale"),
("babysitter","baby sitter"),
("babysitting","baby sitter"),
("sitting","sitter"),
("nounou", "nourrice"),
("coaching","coach"),
("webdesigner","web designer"),
("webmarketer","web marketer"),
("helpdesk","help desk"),
("prof","professeur"),
("maths", "mathématiques"),
("géo", "géographie"),
("philo", "philosophie"),
("epr","employe polyvalent de restauration"),
("NTIC","Informatique"),
("SIG","Systèmes d Information Géographique "),
("EPSCP","établissement public à caractère scientifique, culturel et professionnel "),
("NRBC","Nucléaire, Radiologique, Bactériologique, Chimique "),
("SAV","Service après vente"),
("ACIM ","Agent des Cabinets en Imagerie Médicale "),
("ASC","Agent des Services Commerciaux"),
("AEC","Agent d Escale Commerciale"),
("ASEM","Agent spécialisé des écoles maternelles "),
("TIC","Informatique"),
("HSE","Hygiène Sécurité Environnement "),
("ATER","Attaché temporaire d enseignement et de recherche "),
("AVS","Auxiliaire de Vie Sociale "),
("AIS","Auxiliaire d Intégration Scolaire"),
("ASV","Auxiliaire Spécialisé Vétérinaire "),
("AVQ","Auxiliaire Vétérinaire Qualifié"),
("IARD","Incendie, Accidents, Risques Divers "),
("NBC","Nucléaire, Bactériologique et Chimique"),
("PGC","Produits de Grande Consommation "),
("PNT","Personnel Navigant Technique "),
("PAO","Publication Assistée par Ordinateur"),
("TTA","toute arme"),
("VRD","Voiries et Réseaux Divers"),
("CMS","Composants Montés en Surface "),
("VSL","Véhicule Sanitaire Léger"),
("CIP","Conseiller d Insertion et de Probation "),
("CND","Contrôle Non Destructif "),
("MOA","Maîtrise d Ouvrage"),
("OPC","Ordonnancement, Pilotage et Coordination de chantier"),
("SPS","Sécurité, Protection de la Santé "),
("DAF","Directeur administratif et financier"),
("CHU","Centre Hospitalier Universitaire "),
("GSB","Grande Surface de Bricolage "),
("GSS","Grande Surface Spécialisée "),
("DOSI","Directeur de l Organisation et des Systèmes d Information "),
("ESAT","entreprise ou de Service d Aide par le Travail "),
("DRH","Directeur des Ressources Humaines "),
("DSI","Directeur des services informatiques "),
("DSPIP","Directeur des services pénitentiaires d insertion et de probation "),
("EPA","Etablissement Public à caractère Administratif "),
("EPST","Etablissement Public à caractère Scientifique et Technologique "),
("EPCC","Etablissement Public de Coopération Culturelle "),
("EPIC","Etablissement Public et Commercial "),
("IFSI","Institut de formation en soins infirmiers"),
("MAS","Machines à Sous "),
("SCOP","Société Coopérative Ouvrière de Production"),
(" EVS","Employée du Service Après Vente "),
("EVAT","Engagée Volontaire de l Armée de Terre "),
("EV","Engagé Volontaire "),
("GIR","Groupement d Individuels Regroupés "),
("CN","Commande Numérique "),
("SICAV","Société d Investissement à Capital Variable "),
("OPCMV","Organisme de Placement Collectif en Valeurs Mobilières "),
("OPCVM","Organisme de Placement Collectif en Valeurs Mobilières "),
("IADE","Infirmier Anesthésiste Diplômé d Etat "),
("IBODE","Infirmier de bloc opératoire Diplômé d Etat "),
("CTC","contrôle technique de construction "),
("IGREF","Ingénieur du génie rural des eaux et forêts "),
("IAA","Inspecteur d académie adjoint"),
("DSDEN","directeur des services départementaux de l Education nationale "),
("IEN","Inspecteur de l Education Nationale "),
("IET","Inspecteur de l enseignement technique "),
("ISPV","Inspecteur de Santé Publique Vétérinaire "),
("IDEN","Inspecteur départemental de l Education nationale "),
("IIO","Inspecteur d information et d orientation "),
("IGEN","Inspecteur général de l Education nationale "),
("IPR","Inspecteur pédagogique régional"),
("IPET","Inspecteur principal de l enseignement technique "),
("PNC","Personnel Navigant Commercial "),
("MPR","Magasin de Pièces de Rechange "),
("CME","Cellule, Moteur, Electricité "),
("BTP","Bâtiments et Travaux Publics "),
("EIR","Electricité, Instrument de bord, Radio "),
("MAR","Médecin Anesthésiste Réanimateur "),
("PMI","Protection Maternelle et Infantile "),
("MISP","Médecin Inspecteur de Santé Publique "),
("MIRTMO","Médecin Inspecteur Régional du Travail et de la Main d oeuvre "),
("DIM","Documentation et de l Information Médicale"),
("OPL","Officier pilote de ligne "),
("CN","commande numérique "),
("PPM","Patron Plaisance Moteur "),
("PPV","Patron Plaisance Moteur "),
("PhISP","Pharmacien Inspecteur de Santé Publique "),
("PDG","Président Directeur Général "),
("FLE","Français Langue Etrangère "),
("PLP","Professeur de lycée professionnel "),
("EPS","éducation physique et sportive "),
("PEGL","Professeur d enseignement général de lycée "),
("PEGC","Professeur d enseignement général des collèges "),
("INJS","instituts nationaux de jeunes sourds "),
("INJA","instituts nationaux de jeunes aveugles "),
("TZR","titulaire en zone de remplacement "),
("CFAO","Conception de Fabrication Assistée par Ordinateur "),
("SPIP","service pénitentiaire d insertion et de probation "),
("PME","Petite ou Moyenne Entreprise "),
("RRH","Responsable des Ressources Humaines "),
("QSE","Qualité Sécurité Environnement "),
("SASU","Secrétaire d administration scolaire et universitaire "),
("MAG","Metal Active Gas "),
("MIG","Metal Inert Gas "),
("TIG","Tungsten Inert Gas "),
("GED","Gestion électronique de documents"),
("CVM","Circulations Verticales Mécanisées "),
("TISF","Technicien Intervention Sociale et Familiale"),
("MAO","Musique Assistée par Ordinateur"),
# ("Paie","paye"),
# ("paies","paye"),
("ml","mission locale"),
("AS","aide soignant"),
("IDE","infirmier de soins généraux"),
("ERD","études recherche et développement")
]
| 42.263158
| 91
| 0.603881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,506
| 0.766544
|
a19ffbe9ac756d60be5cdc280b27e2d8d949602c
| 6,262
|
py
|
Python
|
appimagebuilder/app_dir/runtime/app_run.py
|
srevinsaju/appimage-builder
|
105e253ccc43a345841b7d4037c1974938132a1d
|
[
"MIT"
] | null | null | null |
appimagebuilder/app_dir/runtime/app_run.py
|
srevinsaju/appimage-builder
|
105e253ccc43a345841b7d4037c1974938132a1d
|
[
"MIT"
] | null | null | null |
appimagebuilder/app_dir/runtime/app_run.py
|
srevinsaju/appimage-builder
|
105e253ccc43a345841b7d4037c1974938132a1d
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import fnmatch
import logging
import os
import shutil
import stat
import subprocess
import uuid
from pathlib import Path
from urllib import request
class AppRunSetupError(RuntimeError):
pass
class AppRun:
env = {
"APPIMAGE_UUID": None,
"SYSTEM_INTERP": None,
"XDG_DATA_DIRS": "$APPDIR/usr/local/share:$APPDIR/usr/share:$XDG_CONFIG_DIRS",
"XDG_CONFIG_DIRS": "$APPDIR/etc/xdg:$XDG_CONFIG_DIRS",
"LD_PRELOAD": "libapprun_hooks.so",
}
# arch mappings from the file command output to the debian format
archs_mapping = {
"ARM aarch64": "aarch64",
"ARM": "gnueabihf",
"Intel 80386": "i386",
"x86-64": "x86_64",
}
sections = {}
def __init__(
self,
version,
debug,
app_dir,
exec_path,
exec_args="$@",
cache_dir="appimage-builder-cache/runtime",
):
self.app_dir = Path(app_dir).absolute()
self.apprun_version = version
self.apprun_build_type = "Debug" if debug else "Release"
self.env["APPIMAGE_UUID"] = str(uuid.uuid4())
self.env["EXEC_PATH"] = "$APPDIR/%s" % exec_path
self.env["EXEC_ARGS"] = exec_args
self.cache_dir = Path(cache_dir).absolute()
def deploy(self):
embed_archs = self._get_embed_libc_archs()
# deploy AppRun
apprun_path = self._get_apprun_binary(embed_archs[0])
apprun_deploy_path = self.app_dir / "AppRun"
logging.info("Deploying: %s => %s" % (apprun_path, self.app_dir / "AppRun"))
shutil.copy(apprun_path, apprun_deploy_path)
apprun_deploy_path.chmod(
stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH
)
for arch in embed_archs:
hooks_lib = self._get_apprun_hooks_library(arch)
target_lib_dir = self._find_hooks_lib_target_lib_dir(arch)
logging.info("Deploying: %s => %s" % (hooks_lib, target_lib_dir))
shutil.copy(hooks_lib, os.path.join(target_lib_dir, "libapprun_hooks.so"))
self._generate_env_file()
def _get_embed_libc_archs(self):
libc_paths = self._find_libc_paths()
if not libc_paths:
raise AppRunSetupError("Unable to locate libc at: %s" % self.app_dir)
archs = set()
for path in libc_paths:
arch = self._get_elf_arch(path)
if arch:
archs.add(arch)
return list(archs)
def _generate_env_file(self):
with open(os.path.join(self.app_dir, ".env"), "w") as f:
for k, v in self.env.items():
f.write("%s=%s\n" % (k, v))
def _get_elf_arch(self, file):
proc_env = os.environ.copy()
proc_env["LC_ALL"] = "C"
proc = subprocess.run(
["file", "-b", file], stdout=subprocess.PIPE, env=proc_env
)
output = proc.stdout.decode("utf-8")
parts = output.split(",")
signature = ",".join(parts[1:2])
signature = signature.replace("shared object", "")
signature = signature.replace("executable", "")
return signature.strip(" ")
def _find_libc_paths(self):
paths = []
for base_path, dirs, files in os.walk(self.app_dir):
for file in files:
abs_path = os.path.join(base_path, file)
if fnmatch.fnmatch(abs_path, "*/libc.so*"):
paths.append(abs_path)
if fnmatch.fnmatch(abs_path, "*/libc-*.so*"):
paths.append(abs_path)
return paths
def _find_hooks_lib_target_lib_dir(self, arch):
lib_dirs = self.env["APPDIR_LIBRARY_PATH"]
lib_dirs = lib_dirs.replace("$APPDIR", str(self.app_dir))
lib_dirs = lib_dirs.replace("$APPDIR", str(self.app_dir))
lib_dirs = lib_dirs.split(":")
for lib_dir in lib_dirs:
for file in os.listdir(lib_dir):
file_path = os.path.join(lib_dir, file)
if os.path.isfile(file_path):
file_arch = self._get_elf_arch(file_path)
if file_arch == arch:
return lib_dir
def _get_apprun_binary(self, arch):
if arch not in self.archs_mapping:
raise AppRunSetupError("Non-supported architecture: '%s'" % arch)
self.cache_dir.mkdir(parents=True, exist_ok=True)
apprun_asset = "AppRun-%s-%s" % (
self.apprun_build_type,
self.archs_mapping[arch],
)
apprun_file = self.cache_dir / apprun_asset
if not apprun_file.exists():
url = (
"https://github.com/AppImageCrafters/AppRun/releases/download/%s/%s"
% (self.apprun_version, apprun_asset)
)
logging.info("Downloading: %s" % url)
request.urlretrieve(url, apprun_file)
return apprun_file
def _get_apprun_hooks_library(self, arch):
if arch not in self.archs_mapping:
raise AppRunSetupError("Non-supported architecture: '%s'" % arch)
self.cache_dir.mkdir(parents=True, exist_ok=True)
asset = "libapprun_hooks-%s-%s.so" % (
self.apprun_build_type,
self.archs_mapping[arch],
)
file = self.cache_dir / asset
if not file.exists():
url = (
"https://github.com/AppImageCrafters/AppRun/releases/download/%s/%s"
% (self.apprun_version, asset)
)
logging.info("Downloading: %s" % url)
request.urlretrieve(url, file)
return file
| 34.98324
| 86
| 0.603641
| 5,480
| 0.87512
| 0
| 0
| 0
| 0
| 0
| 0
| 1,599
| 0.25535
|
a1a133f4a1f010df28c349cd5d84226826c23e63
| 1,631
|
py
|
Python
|
setup.py
|
cardosan/tempo_test
|
ff5a757c9ca54e5af1ccd71e9e5840bac279e4f0
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
cardosan/tempo_test
|
ff5a757c9ca54e5af1ccd71e9e5840bac279e4f0
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
cardosan/tempo_test
|
ff5a757c9ca54e5af1ccd71e9e5840bac279e4f0
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
import io
setup(
name='bw2temporalis',
version="0.9.2",
packages=[
"bw2temporalis",
"bw2temporalis.tests",
"bw2temporalis.examples",
"bw2temporalis.cofire"
],
author="Chris Mutel",
author_email="cmutel@gmail.com",
license=io.open('LICENSE.txt', encoding='utf-8').read(),
url="https://bitbucket.org/cmutel/brightway2-temporalis",
install_requires=[
"arrow",
"eight",
"brightway2",
"bw2analyzer",
"bw2calc>=0.11",
"bw2data>=0.12",
"bw2speedups>=2.0",
"numexpr",
"numpy",
"scipy",
"stats_arrays",
],
description='Provide a dynamic LCA calculations for the Brightway2 life cycle assessment framework',
long_description=io.open('README.rst', encoding='utf-8').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| 32.62
| 104
| 0.591048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,030
| 0.631514
|
a1a1aaea4e69c1175a5a073ed210e340c1ccb2d1
| 8,444
|
py
|
Python
|
applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 778
|
2017-01-27T16:29:17.000Z
|
2022-03-30T03:01:51.000Z
|
applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 6,634
|
2017-01-15T22:56:13.000Z
|
2022-03-31T15:03:36.000Z
|
applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 224
|
2017-02-07T14:12:49.000Z
|
2022-03-06T23:09:34.000Z
|
import KratosMultiphysics
import KratosMultiphysics.FemToDemApplication.MainFemDem as MainFemDem
import KratosMultiphysics.FemToDemApplication as KratosFemDem
import KratosMultiphysics.DEMApplication as DEM
import KratosMultiphysics.DemStructuresCouplingApplication as DEM_Structures
# Python script created to modify the existing one due to the coupling of the DEM app in 2D
class FEM_for_coupling_Solution(MainFemDem.FEM_Solution):
def Info(self):
print("FEM part of the FEMDEM application")
def Initialize(self):
#### INITIALIZE ####
# Add variables (always before importing the model part)
self.solver.AddVariables()
# For remeshing purposes
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.NODAL_STRESS_VECTOR)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA)
self.main_model_part.AddNodalSolutionStepVariable(DEM.DEM_NODAL_AREA)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_NODAL_STRESS)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_NODAL_STRESS_GRADIENT)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.NODAL_DAMAGE)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_STRESS_VM)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.DISPLACEMENT_INCREMENT)
# For the DE-FE contact model
self.main_model_part.AddNodalSolutionStepVariable(DEM.DEM_PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.DELTA_DISPLACEMENT)
self.main_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.ELASTIC_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.TANGENTIAL_ELASTIC_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.SHEAR_STRESS)
# For the Substepping
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.BACKUP_LAST_STRUCTURAL_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.BACKUP_LAST_STRUCTURAL_DISPLACEMENT)
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.SMOOTHED_STRUCTURAL_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_IMPULSE)
# Read model_part (note: the buffer_size is set here) (restart is read here)
self.solver.ImportModelPart()
# Add dofs (always after importing the model part)
if((self.main_model_part.ProcessInfo).Has(KratosMultiphysics.IS_RESTARTED)):
if(self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] == False):
self.solver.AddDofs()
else:
self.solver.AddDofs()
# Add materials (assign material to model_parts if Materials.json exists)
self.AddMaterials()
# Add processes
self.model_processes = self.AddProcesses()
self.model_processes.ExecuteInitialize()
# Print model_part and properties
if(self.echo_level > 1):
print("")
print(self.main_model_part)
for properties in self.main_model_part.Properties:
print(properties)
#### START SOLUTION ####
self.computing_model_part = self.solver.GetComputingModelPart()
if (self.ProjectParameters["solver_settings"]["strategy_type"].GetString() == "arc_length"):
neighbour_elemental_finder = KratosMultiphysics.FindElementalNeighboursProcess(self.main_model_part, 2, 5)
neighbour_elemental_finder.Execute()
self.InitializeIntegrationPointsVariables()
self.model_processes.ExecuteBeforeSolutionLoop()
self.model_processes.ExecuteInitializeSolutionStep()
self.using_arc_length = True
else:
self.using_arc_length = False
## Sets strategies, builders, linear solvers, schemes and solving info, and fills the buffer
self.solver.Initialize()
#self.solver.InitializeStrategy()
self.solver.SetEchoLevel(self.echo_level)
# Initialize GiD I/O (gid outputs, file_lists)
self.SetGraphicalOutput()
self.GraphicalOutputExecuteInitialize()
print(" ")
print("=================================================")
print(" - Kratos FemDem Application Calculation Start - ")
print("=================================================")
self.model_processes.ExecuteBeforeSolutionLoop()
self.GraphicalOutputExecuteBeforeSolutionLoop()
# Set time settings
self.step = self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
self.time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
self.end_time = self.ProjectParameters["problem_data"]["end_time"].GetDouble()
self.delta_time = self.ComputeDeltaTime()
#============================================================================================================================
def ComputeDeltaTime(self):
if self.ProjectParameters["problem_data"].Has("time_step"):
return self.ProjectParameters["problem_data"]["time_step"].GetDouble()
elif self.ProjectParameters["problem_data"].Has("variable_time_steps"):
current_time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
for key in self.ProjectParameters["problem_data"]["variable_time_steps"].keys():
interval_settings = self.ProjectParameters["problem_data"]["variable_time_steps"][key]
interval = KratosMultiphysics.IntervalUtility(interval_settings)
# Getting the time step of the interval
if interval.IsInInterval(current_time):
return interval_settings["time_step"].GetDouble()
# If we arrive here we raise an error because the intervals are not well defined
raise Exception("::[MechanicalSolver]:: Time stepping not well defined!")
else:
raise Exception("::[MechanicalSolver]:: Time stepping not defined!")
#============================================================================================================================
def InitializeIntegrationPointsVariables(self):
utils = KratosMultiphysics.VariableUtils()
elements = self.main_model_part.Elements
self.domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]
nodes = self.main_model_part.Nodes
utils.SetNonHistoricalVariable(KratosFemDem.GENERATE_DEM, False, elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_THRESHOLD, 0.0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.DAMAGE_ELEMENT, 0.0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.PRESSURE_EXPANDED, 0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.IS_SKIN, 0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.SMOOTHING, 0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.RECOMPUTE_NEIGHBOURS, True, elements)
if self.domain_size == 3:
utils.SetNonHistoricalVariable(KratosFemDem.VOLUME_COUNTED, False, elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR, [0.0,0.0,0.0,0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRAIN_VECTOR, [0.0,0.0,0.0,0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR_INTEGRATED, [0.0,0.0,0.0,0.0,0.0,0.0], elements)
else: # 2D
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR, [0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRAIN_VECTOR, [0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR_INTEGRATED, [0.0, 0.0, 0.0], elements)
# if self.PressureLoad:
# utils.SetNonHistoricalVariable(KratosFemDem.PRESSURE_ID, 0, nodes)
| 51.487805
| 126
| 0.682852
| 8,056
| 0.95405
| 0
| 0
| 0
| 0
| 0
| 0
| 1,728
| 0.204642
|
a1a27befca81b9961c7c90b5224fd531c6279e19
| 5,284
|
py
|
Python
|
utils/arg_parser.py
|
dataflowr/Project-Neural-Bootstrapper
|
36278a7f6884438553d90d9cdc12eaf0da1bc7bf
|
[
"MIT"
] | 17
|
2020-10-17T08:46:56.000Z
|
2022-02-27T17:32:43.000Z
|
utils/arg_parser.py
|
dataflowr/Project-Neural-Bootstrapper
|
36278a7f6884438553d90d9cdc12eaf0da1bc7bf
|
[
"MIT"
] | 1
|
2022-03-12T15:44:38.000Z
|
2022-03-13T00:47:41.000Z
|
utils/arg_parser.py
|
dataflowr/Project-Neural-Bootstrapper
|
36278a7f6884438553d90d9cdc12eaf0da1bc7bf
|
[
"MIT"
] | 5
|
2021-01-30T05:04:29.000Z
|
2022-02-14T23:49:42.000Z
|
import os
import yaml
import copy
import logging
from pathlib import Path
import torch
from torch.nn import *
from torch.optim import *
import torch.distributed as dist
from torch.optim.lr_scheduler import *
from torch.nn.parallel import DistributedDataParallel
from utils.metrics import *
from models import _get_model
torch.backends.cudnn.benchmark = True
class Argments(object):
@staticmethod
def _file_load(yaml_file):
with open(fr'{yaml_file}') as f:
y = yaml.safe_load(f)
return y
@staticmethod
def _module_load(d, part, **kargs):
module_obj = eval(d[part]['name'])
module_args = copy.deepcopy(d[part])
module_args.update(kargs)
del module_args['name']
part = module_obj(**module_args)
return part
def _modules_load(self):
for k, v in self._y.items():
if 'module' in k:
setattr(self, k, dict())
module = self.__dict__[k]
module['model'] = _get_model(**v['model'], model_type=self['setup/model_type']).cuda()
if self['setup/phase'] != 'infer':
module['optim'] = self._module_load(v, part='optim',
params=module['model'].parameters())
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
module['lr_scheduler'] = self._module_load(v, part='lr_scheduler',
optimizer=module['optim'])
loss = [eval(l)(**v['loss_args'][l]) for l in v['loss']]
module['loss_with_weight'] = list(zip(loss, v['loss_weight']))
module['val_metric'] = eval(v['val_metric'])(**v['metric_args'])
module['test_metric'] = eval(v['test_metric'])(**v['metric_args'])
else:
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
def __init__(self, yaml_file, cmd_args):
self.file_name = yaml_file
self._y = self._file_load(yaml_file)
if cmd_args.gpus != "-1":
self['setup/gpus'] = cmd_args.gpus
os.environ["CUDA_VISIBLE_DEVICES"] = self["setup/gpus"]
self['setup/index'] = cmd_args.index
self['setup/phase'] = cmd_args.phase
self['setup/local_rank'] = cmd_args.local_rank
world_size = len(self["setup/gpus"].replace(',', "").replace("'", ""))
model_path = f"outs/{self['setup/model_type']}/{self['module/model/name']}"
model_path += f"/{self['path/dataset']}"
if self['setup/index'] != -1:
model_path += f"_{self['setup/index']}"
if self['path/postfix'] != 'none':
model_path += f"_{self['path/postfix']}"
self['path/model_path'] = model_path
Path(model_path).mkdir(parents=True, exist_ok=True)
torch.cuda.set_device(cmd_args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method=f'file://{Path(model_path).resolve()}/sharedfile',
world_size=world_size,
rank=self['setup/local_rank'])
self['setup/rank'] = dist.get_rank()
self['setup/dist_size'] = dist.get_world_size()
self._modules_load()
def reset(self):
for k, v in list(self.__dict__.items()):
if 'module' in k:
del self.__dict__[k]
torch.cuda.empty_cache()
self._modules_load()
def _get(self, *keys):
v = self._y
for k in keys:
v = v[k]
return v
def _update(self, *keys, value):
k = self._y
for i in range(len(keys) - 1):
k.setdefault(keys[i], {})
k = k[keys[i]]
k[keys[-1]] = value
def __str__(self):
return f'{self.file_name}\n{self._y}'
def __contains__(self, item):
def search_recursively(d, t):
for k, v in d.items():
if k == t:
return True
elif isinstance(v, dict):
search_recursively(v, t)
return False
return search_recursively(self._y, item)
def __getitem__(self, key):
return self._get(*key.split('/'))
def __setitem__(self, key, value):
self._update(*key.split('/'), value=value)
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.INFO)
log.addHandler(stream_handler)
log.addHandler(file_handler)
Args = Argments('test.yaml')
Args._update('path', 'abcd', 'efgh', value='zzzz')
Args['path/cccc/dddd'] = 'ffff'
log.debug(Args)
log.debug(Args['path/cccc/dddd'])
# print(Args)
# print('path' in Args)
# print(Args['path/abcd/efgh'])
# print(Args['path/cccc/dddd'])
# print(Args.module['lr_scheduler'])
| 35.702703
| 107
| 0.543906
| 4,275
| 0.809046
| 0
| 0
| 404
| 0.076457
| 0
| 0
| 996
| 0.188494
|
a1a36361a953bc1ab0c48721b0d1db387eabef20
| 6,139
|
py
|
Python
|
MDP/MDP.py
|
ADP-Benchmarks/ADP-Benchmark
|
aea3d1be7c28c7290a23e731b9e7b460ee6976f7
|
[
"MIT"
] | 1
|
2020-01-17T17:09:46.000Z
|
2020-01-17T17:09:46.000Z
|
MDP/MDP.py
|
ADP-Benchmarks/ADP-Benchmark
|
aea3d1be7c28c7290a23e731b9e7b460ee6976f7
|
[
"MIT"
] | null | null | null |
MDP/MDP.py
|
ADP-Benchmarks/ADP-Benchmark
|
aea3d1be7c28c7290a23e731b9e7b460ee6976f7
|
[
"MIT"
] | 2
|
2020-10-26T04:51:42.000Z
|
2020-11-22T20:20:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GitHub Homepage
----------------
https://github.com/ADP-Benchmarks
Contact information
-------------------
ADP.Benchmarks@gmail.com.
License
-------
The MIT License
"""
from MDP.spaces.space import Space
from MDP.transition import Transition
from MDP.objective import Objective
import copy
class MDP:
"""
Description
-----------
This class provides a generic implementation for continuous- and
discrete- state MDPs. Finite and infinite -time horizon MDPs as
well as average- and discounted- cost MDPs can be handled.
"""
def __init__(self, initState = None,
sSpace = None,
aSpace = None,
nSpace = None,
transition = None,
objective = None,
isFiniteHorizon = False,
isAveCost = False,
terminalStates=None,):
"""
Inputs
------
initState [list]: initial state vector, that is the list of
components of the starting state.
sSpace [Space]: MDP state space.
aSpace [Space]: MDP action space.
nSpace [Space]: MDP exogenous noise space.
transition [Transition]: MDP stochastic kernel (e.g., MDP
transition matrix for discrete MDPs).
objective [Objective]: the MDP cost/reward function.
isFiniteHorizon [int]: if int, MDP is finite-time horizon of length
isFiniteHorizon, else if False,
it is infinite-time horizon.
isAveCost [bool]: if True, MDP is average-cost, else it is
discounted-cost.
terminalStates [list]: list of absorbing state for episodic MDPs
Raises/Returns
--------------
Explanations
------------
The constructor of MDP class.
"""
# assert(isinstance(sSpace,Space))
# assert(isinstance(aSpace,Space))
# assert(isinstance(nSpace,Space))
assert(isinstance(transition,Transition))
assert(isinstance(objective,Objective))
assert sSpace.isStateFeasble(initState), 'Intial state should belong to\
the state space'
#TODO initState -> initDist
self.initState = initState
self.terminalStates = terminalStates
self.sSpace = sSpace
self.aSpace = aSpace
self.nSpace = nSpace
self.sDim = self.sSpace.dim
self.aDim = self.aSpace.dim
self.nDim = self.nSpace.dim
self.transition = transition
self.objective = objective
self.isFiniteHorizon = isFiniteHorizon
self.isAveCost = isAveCost
self.reset()
def step(self, action, force_noise=None):
'''
Takes one step in the MDP.
--------------------------
Inputs
------
action [list]: current action vector, that is the list of
components of the current action
force_noise [list]: optional, an exogenous noise vector used to
evaluate next state and reward. If not provided,
the noise vector will be sampled randomly
Returns
-------
nextState [list]: next state at t+1
reward [float]: Scalar reward/cost
done [boolean]: True if an absorbing state is reached,
for the case of absorbing MDPs
info [dict]: Provides info about the noise outcome and current period
in the finite horizon case
'''
#TODO This function should support generating a list of next states
if not force_noise:
noise = self.nSpace.sample()[0]
else:
noise = force_noise
nextState = self.transition.getNextStateWithExoSamples(self.currState,
action,
noise)
reward = self.objective.getObjectiveWithExoSamples(self.currState,
action,
noise)
self.currState = nextState
if self.isFiniteHorizon:
# Increment the period
self.t += 1
if self.t >= self.isFiniteHorizon:
self.reset()
return nextState, reward, {'t': self.t, 'noise': noise}
# Infinite horizon MDP
elif self.terminalStates:
done = nextState in self.terminalStates
return nextState, reward, done, {'noise': noise}
else:
return nextState, reward, {'noise': noise}
def reset(self,):
'''
Resets the state back to the initial state
------------------------------------------
Returns
-------
initState [list]: initial state vector, that is the list of
components of the starting state.
t [int]: starting period t for finit horizon MDPs
'''
self.currState = copy.deepcopy(self.initState)
if self.isFiniteHorizon:
self.t = 0
return (self.currState,self.t)
else:
return self.currState
| 32.654255
| 80
| 0.468154
| 5,684
| 0.925884
| 0
| 0
| 0
| 0
| 0
| 0
| 3,455
| 0.562795
|
a1a59271f18a59c5e8650b4f274444162d49578d
| 7,186
|
py
|
Python
|
tests/test_multiplegraphscallpeaks.py
|
uio-bmi/graph_peak_caller
|
89deeabf3cd0b23fba49b1304f1c81222fb534d7
|
[
"BSD-3-Clause"
] | 10
|
2018-04-19T21:54:31.000Z
|
2021-07-22T12:46:33.000Z
|
tests/test_multiplegraphscallpeaks.py
|
uio-bmi/graph_peak_caller
|
89deeabf3cd0b23fba49b1304f1c81222fb534d7
|
[
"BSD-3-Clause"
] | 9
|
2018-01-30T20:41:36.000Z
|
2021-01-28T23:00:18.000Z
|
tests/test_multiplegraphscallpeaks.py
|
uio-bmi/graph_peak_caller
|
89deeabf3cd0b23fba49b1304f1c81222fb534d7
|
[
"BSD-3-Clause"
] | 3
|
2019-08-20T21:43:53.000Z
|
2022-01-20T14:39:34.000Z
|
from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks
from graph_peak_caller.intervals import Intervals
from graph_peak_caller import Configuration
from graph_peak_caller.reporter import Reporter
from offsetbasedgraph import GraphWithReversals as Graph, \
DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval
import unittest
from graph_peak_caller.control.linearmap import LinearMap
from pyvg.sequences import SequenceRetriever
import logging
from graph_peak_caller.logging_config import set_logging_config
#set_logging_config(1)
import os
from graph_peak_caller.command_line_interface import run_argument_parser
class TestMultipleGraphsCallPeaks(unittest.TestCase):
def setUp(self):
self.chromosomes = ["1", "2", "3", "X", "Y"]
self.fragment_length = 5
self.read_length = 2
self.sample_reads = []
self.control_reads = []
self.linear_maps = []
self.sequence_retrievers = []
self.peaks = []
for chrom in self.chromosomes:
# Delete old files if existing
if os.path.isfile("multigraphs_%s_pvalues_indexes.npy" % chrom):
os.remove("multigraphs_%s_pvalues_indexes.npy" % chrom)
os.remove("multigraphs_%s_pvalues_values.npy" % chrom)
# Delete old files if existing
if os.path.isfile("multigraphs_%s_max_paths.intervalcollection" % chrom):
os.remove("multigraphs_%s_max_paths.intervalcollection" % chrom)
self._create_data()
self.config = Configuration()
self.config.fragment_length = self.fragment_length
self.config.read_length = self.read_length
self.config.has_control = False
self.config.min_background = 0.33
self.reporter = Reporter("multigraphs_")
def _create_data(self):
node_offset = 1
for chrom_number, chromosome in enumerate(self.chromosomes):
graph = Graph(
{i + node_offset: Block(10) for i in range(0, 3)},
{i+node_offset: [i+1+node_offset] for i in range(0, 2)})
linear_map = LinearMap.from_graph(graph)
linear_map_file_name = "linear_map_%s.npz" % chromosome
linear_map.to_file(linear_map_file_name)
self.linear_maps.append(linear_map_file_name)
self.sequence_retrievers.append(
SequenceRetriever({i+node_offset: "A" * 10
for i in range(0, 3)})
)
self._create_reads(chrom_number, chromosome, graph)
node_offset += 3
graph.convert_to_numpy_backend()
SequenceGraph.create_empty_from_ob_graph(graph).to_file(chromosome + ".nobg.sequences")
graph.to_file(chromosome + ".nobg")
def _create_reads(self, chrom_number, chrom, graph):
i = chrom_number
sample_reads = []
control_reads = []
peaks = [DirectedInterval(7, 2, [1 + 3*i, 2 + 3*i], graph)]
self.peaks.append(peaks)
for peak in peaks:
for i in range(0, 10):
left_sub = peak.get_subinterval(0, self.read_length)
sample_reads.append(left_sub)
control_reads.append(left_sub)
right_sub = peak.get_subinterval(
self.fragment_length - self.read_length,
self.fragment_length)
right_sub_reverse = right_sub.get_reverse()
sample_reads.append(right_sub_reverse)
control_reads.append(right_sub_reverse)
self.sample_reads.append(Intervals(sample_reads))
self.control_reads.append(Intervals(control_reads))
def test_run_from_init(self):
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
self.sample_reads,
self.control_reads,
self.linear_maps,
self.config,
self.reporter
)
caller.run()
self.do_asserts()
def test_run_from_init_in_two_steps(self):
set_logging_config(2)
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
self.sample_reads,
self.control_reads,
self.linear_maps,
self.config,
self.reporter,
stop_after_p_values=True
)
caller.run()
for i, chromosome in enumerate(self.chromosomes):
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
None,
None,
None,
self.config,
self.reporter
)
caller.create_joined_q_value_mapping()
caller.run_from_p_values(only_chromosome=chromosome)
self.do_asserts()
def do_asserts(self):
for i, chromosome in enumerate(self.chromosomes):
final_peaks = IntervalCollection.create_list_from_file(
"multigraphs_" + chromosome + "_max_paths.intervalcollection")
for peak in self.peaks[i]:
assert peak in final_peaks
class TestMultipleGraphsCallPeaksCommandLine(TestMultipleGraphsCallPeaks):
# Same test, but using commmand line interface
def _create_reads(self, *args):
super(TestMultipleGraphsCallPeaksCommandLine, self)._create_reads(*args)
for intervals, chrom in zip(self.sample_reads, self.chromosomes):
IntervalCollection(intervals._intervals).to_file("test_sample_" + chrom + ".intervalcollection", text_file=True)
def test_typical_run(self):
print(" ========= Running start ====")
run_argument_parser(["callpeaks",
"-g", "*.nobg",
"-s", "test_sample_*.intervalcollection",
"-f", "%s" % self.fragment_length,
"-r", "%s" % self.read_length,
"-u", "100",
"-G", "150",
"-n", "multigraphs_",
"-p", "True",
"-D", "True"])
for i, chromosome in enumerate(self.chromosomes):
run_argument_parser(["callpeaks_whole_genome_from_p_values", chromosome,
"-d", "./",
"-f", "%s" % self.fragment_length,
"-r", "%s" % self.read_length,
"-n", "multigraphs_"])
self.do_asserts()
def test_count_unique_reads(self):
reads = [
IntervalCollection([
Interval(4, 10, [1, 2, 3]),
Interval(4, 5, [1]),
Interval(5, 5, [1]),
Interval(6, 2, [-3, -2, -1])
])
]
unique = MultipleGraphsCallpeaks.count_number_of_unique_reads(reads)
self.assertEqual(unique, 3)
if __name__ == "__main__":
unittest.main()
| 38.427807
| 124
| 0.585305
| 6,470
| 0.900362
| 0
| 0
| 0
| 0
| 0
| 0
| 755
| 0.105065
|
a1a925ea7d8dee1ab5cd0e823a74e840575eb035
| 7,141
|
py
|
Python
|
brainite/models/mcvae.py
|
neurospin-deepinsight/brainite
|
18aafe5d1522f1a4a4081d43f120464afe6cd0a7
|
[
"CECILL-B"
] | null | null | null |
brainite/models/mcvae.py
|
neurospin-deepinsight/brainite
|
18aafe5d1522f1a4a4081d43f120464afe6cd0a7
|
[
"CECILL-B"
] | null | null | null |
brainite/models/mcvae.py
|
neurospin-deepinsight/brainite
|
18aafe5d1522f1a4a4081d43f120464afe6cd0a7
|
[
"CECILL-B"
] | 1
|
2021-09-16T08:29:19.000Z
|
2021-09-16T08:29:19.000Z
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Sparse Multi-Channel Variational Autoencoderfor the Joint Analysis of
Heterogeneous Data.
[1] Sparse Multi-Channel Variational Autoencoder for the Joint Analysis of
Heterogeneous Data, Antelmi, Luigi, PMLR 2019,
https://github.com/ggbioing/mcvae.
"""
# Imports
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.distributions import Normal, kl_divergence
from .vae import VAE
class MCVAE(nn.Module):
""" Sparse Multi-Channel Variational Autoencoder (sMCVAE).
"""
def __init__(self, latent_dim, n_channels, n_feats,
noise_init_logvar=-3, noise_fixed=False, sparse=False,
vae_model="dense", vae_kwargs=None, nodecoding=False):
""" Init class.
Parameters
----------
latent_dim: int
the number of latent dimensions.
n_channels: int
the number of channels.
n_feats: list of int
each channel input dimensions.
noise_init_logvar: float, default -3
default noise parameters values.
noise_fixed: bool, default False
if set not set do not required gradients on noise parameters.
sparse: bool, default False
use sparsity contraint.
vae_model: str, default "dense"
the VAE network used to encode each channel.
vae_kwargs: dict, default None
extra parameters passed initialization of the VAE model.
nodecoding: bool, default False
if set do not apply the decoding.
"""
super(MCVAE, self).__init__()
assert(n_channels == len(n_feats))
self.latent_dim = latent_dim
self.n_channels = n_channels
self.n_feats = n_feats
self.sparse = sparse
self.noise_init_logvar = noise_init_logvar
self.noise_fixed = noise_fixed
if vae_model == "dense":
self.vae_class = VAE
else:
raise ValueError("Unknown VAE model.")
self.vae_kwargs = vae_kwargs or {}
self.nodecoding = nodecoding
self.init_vae()
def init_vae(self):
""" Create one VAE model per channel.
"""
if self.sparse:
self.log_alpha = nn.Parameter(
torch.FloatTensor(1, self.latent_dim).normal_(0, 0.01))
else:
self.log_alpha = None
vae = []
for c_idx in range(self.n_channels):
if "conv_flts" not in self.vae_kwargs:
self.vae_kwargs["conv_flts"] = None
if "dense_hidden_dims" not in self.vae_kwargs:
self.vae_kwargs["dense_hidden_dims"] = None
vae.append(
self.vae_class(
input_channels=1,
input_dim=self.n_feats[c_idx],
latent_dim=self.latent_dim,
noise_out_logvar=self.noise_init_logvar,
noise_fixed=self.noise_fixed,
sparse=self.sparse,
act_func=torch.nn.Tanh,
final_activation=False,
log_alpha=self.log_alpha,
**self.vae_kwargs))
self.vae = torch.nn.ModuleList(vae)
def encode(self, x):
""" Encodes the input by passing through the encoder network
and returns the latent distribution for each channel.
Parameters
----------
x: list of Tensor, (C,) -> (N, Fc)
input tensors to encode.
Returns
-------
out: list of 2-uplet (C,) -> (N, D)
each channel distribution parameters mu (mean of the latent
Gaussian) and logvar (standard deviation of the latent Gaussian).
"""
return [self.vae[c_idx].encode(x[c_idx])
for c_idx in range(self.n_channels)]
def decode(self, z):
""" Maps the given latent codes onto the image space.
Parameters
----------
z: list of Tensor (N, D)
sample from the distribution having latent parameters mu, var.
Returns
-------
p: list of Tensor, (N, C, F)
the prediction p(x|z).
"""
p = []
for c_idx1 in range(self.n_channels):
pi = [self.vae[c_idx1].decode(z[c_idx2])
for c_idx2 in range(self.n_channels)]
p.append(pi)
del pi
return p
def reconstruct(self, p):
x_hat = []
for c_idx1 in range(self.n_channels):
x_tmp = torch.stack([
p[c_idx1][c_idx2].loc.detach()
for c_idx2 in range(self.n_channels)]).mean(dim=0)
x_hat.append(x_tmp.cpu().numpy())
del x_tmp
return x_hat
def forward(self, x):
qs = self.encode(x)
z = [q.rsample() for q in qs]
if self.nodecoding:
return z, {"q": qs, "x": x}
else:
p = self.decode(z)
return p, {"q": qs, "x": x}
def p_to_prediction(self, p):
""" Get the prediction from various types of distributions.
"""
if isinstance(p, list):
return [self.p_to_prediction(_p) for _p in p]
elif isinstance(p, Normal):
pred = p.loc.cpu().detach().numpy()
elif isinstance(p, Bernoulli):
pred = p.probs.cpu().detach().numpy()
else:
raise NotImplementedError
return pred
def apply_threshold(self, z, threshold, keep_dims=True, reorder=False):
""" Apply dropout threshold.
Parameters
----------
z: Tensor
distribution samples.
threshold: float
dropout threshold.
keep_dims: bool default True
dropout lower than threshold is set to 0.
reorder: bool default False
reorder dropout rates.
Returns
-------
z_keep: list
dropout rates.
"""
assert(threshold <= 1.0)
order = torch.argsort(self.dropout).squeeze()
keep = (self.dropout < threshold).squeeze()
z_keep = []
for drop in z:
if keep_dims:
drop[:, ~keep] = 0
else:
drop = drop[:, keep]
order = torch.argsort(
self.dropout[self.dropout < threshold]).squeeze()
if reorder:
drop = drop[:, order]
z_keep.append(drop)
del drop
return z_keep
@property
def dropout(self):
if self.sparse:
alpha = torch.exp(self.log_alpha.detach().cpu())
return alpha / (alpha + 1)
else:
raise NotImplementedError
| 33.213953
| 77
| 0.545022
| 6,297
| 0.881809
| 0
| 0
| 208
| 0.029128
| 0
| 0
| 3,019
| 0.42277
|
a1a93df58c13961d6720cb2c8092c988d4421933
| 5,312
|
py
|
Python
|
3.algorithmic_expert/Tries/1.Suffix Trie Construction.py
|
jimmymalhan/Coding_Interview_Questions_Python_algoexpert
|
94e8b4c63e8db92793b99741120a09f22806234f
|
[
"MIT"
] | 1
|
2020-10-05T04:55:26.000Z
|
2020-10-05T04:55:26.000Z
|
3.algorithmic_expert/Tries/1.Suffix Trie Construction.py
|
jimmymalhan/Coding_Interview_Questions_Python_algoexpert
|
94e8b4c63e8db92793b99741120a09f22806234f
|
[
"MIT"
] | null | null | null |
3.algorithmic_expert/Tries/1.Suffix Trie Construction.py
|
jimmymalhan/Coding_Interview_Questions_Python_algoexpert
|
94e8b4c63e8db92793b99741120a09f22806234f
|
[
"MIT"
] | null | null | null |
# Problem Name: Suffix Trie Construction
# Problem Description:
# Write a SuffixTrie class for Suffix-Trie-like data structures. The class should have a root property set to be the root node of the trie and should support:
# - Creating the trie from a string; this will be done by calling populateSuffixTrieFrom method upon class instantiation(creation), which should populate the root of the class.
# - Searching for strings in the trie.
# Note that every string added to the trie should end with special endSymbol character: "*".
####################################
# Sample Input (for creation):
# string = "babc"
# Sample Output (for creation):
# The structure below is the root of the trie:
# {
# "c": {"*": true},
# "b": {
# "c": {"*": true},
# "a": {"b": {"c": {"*": true}}},
# },
# "a": {"b": {"c": {"*": true}}},
# }
# Sample Input (for searching in the suffix trie above):
# string = "abc"
# Sample Output (for searching in the suffix trie above):
# True
####################################
"""
Explain the solution:
- Building a suffix-trie-like data structure consists of essentially storing every suffix of a given string in a trie. To do so, iterate through the input string one character at a time, and insert every substring starting at each character and ending at the end of string into the trie.
- To insert a string into the trie, start by adding the first character of the string into the root node of the trie and map it to an empty hash table if it isin't already there. Then, iterate through the rest of the string, inserting each of the remaining characters into the previous character's corresponding node(or hash table) in the trie, making sure to add an endSymbol "*" at the end.
- Searching the trie for a specific string should follow a nearly identical logic to the one used to add a string in the trie.
# Creation: O(n^2) time | O(n^2) space - where n is the length of the input string
# Searching: O(m) time | O(1) space - where m is the length of the input string
##################
Detailed explanation of the Solution:
create a class called SuffixTrie:
initialize function takes in a string:
initialize the class with root as an empty hash table
initialize the class with a endSymbol variable that is set to "*"
create a method called populateSuffixTrieFrom with a parameter of string
# Creation:
initialize function populateSuffixTrieFrom takes in a string:
iterate as i through the string one character at a time:
use Helper function insertSubsStringStartingAt with the parameter of the string and the current character(i)
initialize function insertSubsStringStartingAt takes in a string and a character(i):
create a variable called node that is set to the root of the trie
iterate as j through the string starting at the character(i) and ending at the end of the string:
create a variable called letter that is set to the current string[j]
if the letter is not in the node:
create a new hash table and set it to the node[letter] # this is the first time we've seen this letter
create a variable called node that is set to the node[letter] # this is the node we're currently at
node[self.endSymbol] = True # insert the endSymbol "*" at the end of the string
# Searching:
initialize function contains takes in a string:
create a variable called node that is set to the root of the trie
iterate as letter through the string:
if the letter is not in the node:
return False
create a variable called node that is set to the node[letter]
return self.endSymbol in node # return True if the endSymbol "*" is in the node
"""
####################################
class SuffixTrie:
def __init__(self, string):
self.root = {}
self.endSymbol = "*"
self.populateSuffixTrieFrom(string) #call the populateSuffixTrieFrom function with the string as a parameter
# Creation
def populateSuffixTrieFrom(self, string):
for i in range(len(string)):
self.insertSubstringStartingAt(string, i) #insert the substring starting at each character and ending at the end of string into the trie
def insertSubstringStartingAt(self, string, i):
node = self.root
for j in range(i, len(string)):#iterate through the string starting at the index i
letter = string[j] #get the letter at the index j
if letter not in node:
node[letter] = {} #if the letter is not in the node, add it to the node and map it to an empty hash table
node = node[letter] # this is the node that we are currently at
node[self.endSymbol] = True
# Searching
def contains(self, string):
node = self.root #start at the root node
for letter in string:
if letter not in node: #if the current letter is not in the node, return false
return False
node = node[letter] #move to the next node
return self.endSymbol in node #return True if the endSymbol "*" is in the node
def main():
string = "babc"
trie = SuffixTrie(string)
print(trie.root)
if __name__ == '__main__':
main()
| 47.855856
| 392
| 0.672063
| 1,348
| 0.253765
| 0
| 0
| 0
| 0
| 0
| 0
| 4,369
| 0.822477
|
a1a9b8f9731ec54e8ec54f439eb2503d5d2d061e
| 961
|
py
|
Python
|
test/test_host.py
|
waylonwang/pure-python-adb
|
417539119611f93cf079a7d0e05e575df3e3446e
|
[
"MIT"
] | null | null | null |
test/test_host.py
|
waylonwang/pure-python-adb
|
417539119611f93cf079a7d0e05e575df3e3446e
|
[
"MIT"
] | null | null | null |
test/test_host.py
|
waylonwang/pure-python-adb
|
417539119611f93cf079a7d0e05e575df3e3446e
|
[
"MIT"
] | 1
|
2020-10-08T10:18:13.000Z
|
2020-10-08T10:18:13.000Z
|
def test_list_devices(client):
devices = client.devices()
assert len(devices) > 0
assert any(map(lambda device: device.serial == "emulator-5554", devices))
def test_list_devices_by_state(client):
devices = client.devices(client.BOOTLOADER)
assert len(devices) == 0
devices = client.devices(client.OFFLINE)
assert len(devices) == 0
devices = client.devices(client.DEVICE)
assert len(devices) == 1
def test_version(client):
version = client.version()
assert type(version) == int
assert version != 0
def test_list_forward(client, device):
client.killforward_all()
result = client.list_forward()
assert not result
device.forward("tcp:6000", "tcp:6000")
result = client.list_forward()
assert result["emulator-5554"]["tcp:6000"] == "tcp:6000"
client.killforward_all()
result = client.list_forward()
assert not result
def test_features(client):
assert client.features()
| 25.289474
| 77
| 0.689906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.072841
|
a1a9ddb3b1fe60f0adead9941a1fa52ce26179fe
| 2,026
|
py
|
Python
|
Tms-GCN-PyTorch/utils/callbacks/base/best_epoch.py
|
Joker-L0912/Tms-GCN-Py
|
daed1c704e797cbb86d219d24b878284f3d5c426
|
[
"Apache-2.0"
] | null | null | null |
Tms-GCN-PyTorch/utils/callbacks/base/best_epoch.py
|
Joker-L0912/Tms-GCN-Py
|
daed1c704e797cbb86d219d24b878284f3d5c426
|
[
"Apache-2.0"
] | null | null | null |
Tms-GCN-PyTorch/utils/callbacks/base/best_epoch.py
|
Joker-L0912/Tms-GCN-Py
|
daed1c704e797cbb86d219d24b878284f3d5c426
|
[
"Apache-2.0"
] | null | null | null |
import copy
import numpy as np
import torch
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.callbacks import Callback
class BestEpochCallback(Callback):
TORCH_INF = torch_inf = torch.tensor(np.Inf)
MODE_DICT = {
"min": (torch_inf, "min"),
"max": (-torch_inf, "max"),
# "max": (100, "max"),
}
MONITOR_OP_DICT = {"min": torch.lt, "max": torch.gt}
def __init__(self, monitor="", mode="min"):
super(BestEpochCallback, self).__init__()
self.monitor = monitor
self.__init_monitor_mode(monitor, mode)
self.best_epoch = 0
def __init_monitor_mode(self, monitor, mode):
if mode not in self.MODE_DICT and mode != "auto":
rank_zero_warn(
f"PrintBestEpochMetrics mode {mode} is unknown, fallback to auto mode",
RuntimeWarning,
)
mode = "auto"
if mode == "auto":
rank_zero_warn(
"mode='auto' is deprecated in v1.1 and will be removed in v1.3."
" Default value for mode with be 'min' in v1.3.",
DeprecationWarning,
)
self.MODE_DICT["auto"] = (
(-self.TORCH_INF, "max")
if monitor is not None and ("acc" in monitor or monitor.startswith("fmeasure"))
else (self.TORCH_INF, "min")
)
self.best_value, self.mode = self.MODE_DICT[mode]
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if (trainer.current_epoch + 1) % trainer.check_val_every_n_epoch != 0:
return
monitor_op = self.MONITOR_OP_DICT[self.mode]
metrics_dict = copy.copy(trainer.callback_metrics)
monitor_value = metrics_dict.get(self.monitor, self.best_value)
if monitor_op(monitor_value.type_as(self.best_value), self.best_value):
self.best_value = monitor_value
self.best_epoch = trainer.current_epoch
| 38.961538
| 101
| 0.612043
| 1,875
| 0.925469
| 0
| 0
| 0
| 0
| 0
| 0
| 290
| 0.143139
|
a1a9fa4dcfc3f60c5f6176dc7d9d7778a0c79011
| 12,840
|
py
|
Python
|
playhouse/tests.py
|
mikiec84/peewee
|
2abc201d807bfed99048ca67a465ccd758ee7852
|
[
"MIT"
] | 1
|
2020-03-12T17:01:44.000Z
|
2020-03-12T17:01:44.000Z
|
playhouse/tests.py
|
mikiec84/peewee
|
2abc201d807bfed99048ca67a465ccd758ee7852
|
[
"MIT"
] | null | null | null |
playhouse/tests.py
|
mikiec84/peewee
|
2abc201d807bfed99048ca67a465ccd758ee7852
|
[
"MIT"
] | 1
|
2020-03-12T17:02:03.000Z
|
2020-03-12T17:02:03.000Z
|
from hashlib import sha1 as _sha1
import sqlite3
import unittest
from peewee import *
import signals
import sqlite_ext as sqe
import sweepea
db = SqliteDatabase(':memory:')
class BaseSignalModel(signals.Model):
class Meta:
database = db
class ModelA(BaseSignalModel):
a = CharField(default='')
class ModelB(BaseSignalModel):
b = CharField(default='')
class BaseSweepeaModel(sweepea.Model):
class Meta:
database = db
class SModelA(BaseSweepeaModel):
a1 = CharField()
a2 = IntegerField()
class SModelB(BaseSweepeaModel):
a = ForeignKeyField(SModelA)
b1 = CharField()
b2 = BooleanField()
class SModelC(BaseSweepeaModel):
b = ForeignKeyField(SModelB)
c1 = CharField()
# use a disk-backed db since memory dbs only exist for a single connection and
# we need to share the db w/2 for the locking tests. additionally, set the
# sqlite_busy_timeout to 100ms so when we test locking it doesn't take forever
ext_db = sqe.SqliteExtDatabase('tmp.db', timeout=.1)
ext_db.adapter.register_aggregate(sqe.WeightedAverage, 1, 'weighted_avg')
ext_db.adapter.register_aggregate(sqe.WeightedAverage, 2, 'weighted_avg2')
ext_db.adapter.register_collation(sqe.collate_reverse)
ext_db.adapter.register_function(sqe.sha1)
#ext_db.adapter.register_function(sqerank) # < auto register
class BaseExtModel(sqe.Model):
class Meta:
database = ext_db
class User(BaseExtModel):
username = CharField()
password = CharField(default='')
class Post(BaseExtModel):
user = ForeignKeyField(User)
message = TextField()
class FTSPost(Post, sqe.FTSModel):
pass
class Values(BaseExtModel):
klass = IntegerField()
value = FloatField()
weight = FloatField()
class SqliteExtTestCase(unittest.TestCase):
messages = [
'A faith is a necessity to a man. Woe to him who believes in nothing.',
'All who call on God in true faith, earnestly from the heart, will certainly be heard, and will receive what they have asked and desired.',
'Be faithful in small things because it is in them that your strength lies.',
'Faith consists in believing when it is beyond the power of reason to believe.',
'Faith has to do with things that are not seen and hope with things that are not at hand.',
]
def setUp(self):
FTSPost.drop_table(True)
Post.drop_table(True)
User.drop_table(True)
Values.drop_table(True)
Values.create_table()
User.create_table()
Post.create_table()
FTSPost.create_table(tokenize='porter', content_model=Post)
def test_fts(self):
u = User.create(username='u')
posts = []
for message in self.messages:
posts.append(Post.create(user=u, message=message))
pq = FTSPost.select().where(message__match='faith')
self.assertEqual(list(pq), [])
FTSPost.rebuild()
FTSPost.optimize()
# it will stem faithful -> faith b/c we use the porter tokenizer
pq = FTSPost.select().where(message__match='faith').order_by('id')
self.assertEqual([x.message for x in pq], self.messages)
pq = FTSPost.select().where(message__match='believe').order_by('id')
self.assertEqual([x.message for x in pq], [
self.messages[0], self.messages[3],
])
pq = FTSPost.select().where(message__match='thin*').order_by('id')
self.assertEqual([x.message for x in pq], [
self.messages[2], self.messages[4],
])
pq = FTSPost.select().where(message__match='"it is"').order_by('id')
self.assertEqual([x.message for x in pq], [
self.messages[2], self.messages[3],
])
pq = FTSPost.select(['*', sqe.Rank()]).where(message__match='things').order_by(('score', 'desc'))
self.assertEqual([(x.message, x.score) for x in pq], [
(self.messages[4], 2.0 / 3), (self.messages[2], 1.0 / 3),
])
pq = FTSPost.select([sqe.Rank()]).where(message__match='faithful')
self.assertEqual([x.score for x in pq], [.2] * 5)
def test_custom_agg(self):
data = (
(1, 3.4, 1.0),
(1, 6.4, 2.3),
(1, 4.3, 0.9),
(2, 3.4, 1.4),
(3, 2.7, 1.1),
(3, 2.5, 1.1),
)
for klass, value, wt in data:
Values.create(klass=klass, value=value, weight=wt)
vq = Values.select(['klass', ('weighted_avg', 'value', 'wtavg'), ('avg', 'value', 'avg')]).group_by('klass')
q_data = [(v.klass, v.wtavg, v.avg) for v in vq]
self.assertEqual(q_data, [
(1, 4.7, 4.7),
(2, 3.4, 3.4),
(3, 2.6, 2.6),
])
vq = Values.select(['klass', ('weighted_avg2', 'value, weight', 'wtavg'), ('avg', 'value', 'avg')]).group_by('klass')
q_data = [(v.klass, str(v.wtavg)[:4], v.avg) for v in vq]
self.assertEqual(q_data, [
(1, '5.23', 4.7),
(2, '3.4', 3.4),
(3, '2.6', 2.6),
])
def test_custom_collation(self):
data = (
('u1', 'u2', 'u3'),
(('p11', 'p12'), ('p21', 'p22', 'p23'), ()),
)
for user, posts in zip(data[0], data[1]):
u = User.create(username=user)
for p in posts:
Post.create(user=u, message=p)
uq = User.select().order_by('username collate collate_reverse')
self.assertEqual([u.username for u in uq], ['u3', 'u2', 'u1'])
def test_custom_function(self):
s = lambda s: _sha1(s).hexdigest()
u1 = User.create(username='u1', password=s('p1'))
u2 = User.create(username='u2', password=s('p2'))
uq = User.select().where(password=R('sha1(%s)', 'p2'))
self.assertEqual(uq.get(), u2)
uq = User.select().where(password=R('sha1(%s)', 'p1'))
self.assertEqual(uq.get(), u1)
uq = User.select().where(password=R('sha1(%s)', 'p3'))
self.assertEqual(uq.count(), 0)
def test_granular_transaction(self):
conn = ext_db.get_conn()
def test_locked_dbw(lt):
with ext_db.granular_transaction(lt):
User.create(username='u1', password='')
conn2 = ext_db.adapter.connect(ext_db.database, **ext_db.connect_kwargs)
conn2.execute('insert into user (username, password) values (?, ?);', ('x1', ''))
self.assertRaises(sqlite3.OperationalError, test_locked_dbw, 'exclusive')
self.assertRaises(sqlite3.OperationalError, test_locked_dbw, 'immediate')
self.assertRaises(sqlite3.OperationalError, test_locked_dbw, 'deferred')
def test_locked_dbr(lt):
with ext_db.granular_transaction(lt):
User.create(username='u1', password='')
conn2 = ext_db.adapter.connect(ext_db.database, **ext_db.connect_kwargs)
res = conn2.execute('select username from user')
return res.fetchall()
# no read-only stuff with exclusive locks
self.assertRaises(sqlite3.OperationalError, test_locked_dbr, 'exclusive')
# ok to do readonly w/immediate and deferred
self.assertEqual(test_locked_dbr('immediate'), [])
self.assertEqual(test_locked_dbr('deferred'), [('u1',)])
# test everything by hand, by setting the default connection to 'exclusive'
# and turning off autocommit behavior
ext_db.set_autocommit(False)
conn.isolation_level = 'exclusive'
User.create(username='u2', password='') # <-- uncommitted
# now, open a second connection w/exclusive and try to read, it will
# be locked
conn2 = ext_db.adapter.connect(ext_db.database, **ext_db.connect_kwargs)
conn2.isolation_level = 'exclusive'
self.assertRaises(sqlite3.OperationalError, conn2.execute, 'select * from user')
# rollback the first connection's transaction, releasing the exclusive lock
conn.rollback()
ext_db.set_autocommit(True)
with ext_db.granular_transaction('deferred'):
User.create(username='u3', password='')
res = conn2.execute('select username from user order by username;')
self.assertEqual(res.fetchall(), [('u1',), ('u1',), ('u3',)])
class SignalsTestCase(unittest.TestCase):
def setUp(self):
ModelA.create_table(True)
ModelB.create_table(True)
def tearDown(self):
ModelA.drop_table()
ModelB.drop_table()
signals.pre_save._flush()
signals.post_save._flush()
signals.pre_delete._flush()
signals.post_delete._flush()
signals.pre_init._flush()
signals.post_init._flush()
def test_pre_save(self):
state = []
@signals.connect(signals.pre_save)
def pre_save(sender, instance, created):
state.append((sender, instance, instance.get_pk(), created))
m = ModelA()
m.save()
self.assertEqual(state, [(ModelA, m, None, True)])
m.save()
self.assertTrue(m.id is not None)
self.assertEqual(state[-1], (ModelA, m, m.id, False))
def test_post_save(self):
state = []
@signals.connect(signals.post_save)
def post_save(sender, instance, created):
state.append((sender, instance, instance.get_pk(), created))
m = ModelA()
m.save()
self.assertTrue(m.id is not None)
self.assertEqual(state, [(ModelA, m, m.id, True)])
m.save()
self.assertEqual(state[-1], (ModelA, m, m.id, False))
def test_pre_delete(self):
state = []
m = ModelA()
m.save()
@signals.connect(signals.pre_delete)
def pre_delete(sender, instance):
state.append((sender, instance, ModelA.select().count()))
m.delete_instance()
self.assertEqual(state, [(ModelA, m, 1)])
def test_post_delete(self):
state = []
m = ModelA()
m.save()
@signals.connect(signals.post_delete)
def post_delete(sender, instance):
state.append((sender, instance, ModelA.select().count()))
m.delete_instance()
self.assertEqual(state, [(ModelA, m, 0)])
def test_pre_init(self):
state = []
m = ModelA(a='a')
m.save()
@signals.connect(signals.pre_init)
def pre_init(sender, instance):
state.append((sender, instance.a))
ModelA.get()
self.assertEqual(state, [(ModelA, '')])
def test_post_init(self):
state = []
m = ModelA(a='a')
m.save()
@signals.connect(signals.post_init)
def post_init(sender, instance):
state.append((sender, instance.a))
ModelA.get()
self.assertEqual(state, [(ModelA, 'a')])
def test_sender(self):
state = []
@signals.connect(signals.post_save, sender=ModelA)
def post_save(sender, instance, created):
state.append(instance)
m = ModelA.create()
self.assertEqual(state, [m])
m2 = ModelB.create()
self.assertEqual(state, [m])
def test_connect_disconnect(self):
state = []
@signals.connect(signals.post_save, sender=ModelA)
def post_save(sender, instance, created):
state.append(instance)
m = ModelA.create()
self.assertEqual(state, [m])
signals.post_save.disconnect(post_save)
m2 = ModelA.create()
self.assertEqual(state, [m])
class SweepeaTestCase(unittest.TestCase):
def setUp(self):
SModelC.drop_table(True)
SModelB.drop_table(True)
SModelA.drop_table(True)
SModelA.create_table()
SModelB.create_table()
SModelC.create_table()
a1 = SModelA.create(a1='foo', a2=1)
a2 = SModelA.create(a1='bar', a2=2)
a3 = SModelA.create(a1='baz', a2=3)
b1 = SModelB.create(a=a1, b1='herp', b2=True)
b2 = SModelB.create(a=a2, b1='derp', b2=False)
c1 = SModelC.create(b=b1, c1='hurr', c2=0)
c2 = SModelC.create(b=b2, c1='durr', c2=1)
def test_queries(self):
sq = sweepea.T(SModelA).q().order_by('id')
self.assertEqual([x.a1 for x in sq], ['foo', 'bar', 'baz'])
t = (SModelB * SModelA) ** (SModelA.a1 == 'foo')
self.assertEqual([x.b1 for x in t], ['herp'])
t = (SModelA) ** (SModelA.a2 > 1) % SModelA.a1
self.assertEqual([x.a1 for x in t], ['bar', 'baz'])
t = (SModelA) ** (SModelA.a2 > 1) % (SModelA.a1) << -SModelA.id
self.assertEqual([x.a1 for x in t], ['baz', 'bar'])
t = (SModelC * SModelB * SModelA) ** (SModelB.b2 == True) % (SModelC.c1, SModelB.b1)
self.assertEqual([(x.c1, x.b1) for x in t], [('hurr', 'herp')])
| 33.007712
| 147
| 0.597118
| 12,034
| 0.937227
| 0
| 0
| 1,126
| 0.087695
| 0
| 0
| 2,014
| 0.156854
|
a1aa7f5e730996934c8876a85b426f2a47d1eacc
| 799
|
py
|
Python
|
appengine/experimental/crbadge/testdata/upload.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/experimental/crbadge/testdata/upload.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
appengine/experimental/crbadge/testdata/upload.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import os, sys
import optparse
import json, urllib
import httplib2
import urlparse
def upload(filenames, url, password):
parsed = urlparse.urlparse(url)
http = httplib2.Http()
for filename in filenames:
with open(filename) as f:
# Load and validate JSON
o = json.load(f)
s = json.dumps(o)
resp, cont = http.request(url, method='POST', body=urllib.urlencode(
{'data': s, 'password': password}))
print 'sent data to %s' % url
print 'response (%s): %s: %s' % (resp['status'], resp, cont)
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--url')
parser.add_option('-p', '--password')
options, args = parser.parse_args()
upload(args, options.url, options.password)
if __name__ == '__main__':
main()
| 22.828571
| 72
| 0.649562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.185232
|
a1ab946e745fb18496c5d63e37229b34b0071a28
| 112
|
py
|
Python
|
libs/test_utils.py
|
bongnv/sublime-go
|
9f5f4f9795357ec595f73c1f71e479eca694b61e
|
[
"MIT"
] | 6
|
2018-05-12T04:43:36.000Z
|
2018-09-21T17:44:53.000Z
|
libs/test_utils.py
|
bongnv/sublime-go
|
9f5f4f9795357ec595f73c1f71e479eca694b61e
|
[
"MIT"
] | null | null | null |
libs/test_utils.py
|
bongnv/sublime-go
|
9f5f4f9795357ec595f73c1f71e479eca694b61e
|
[
"MIT"
] | null | null | null |
import unittest
class TestIsGoView(unittest.TestCase):
def test_nil(self):
self.assertFalse(None)
| 16
| 38
| 0.723214
| 93
| 0.830357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a1ac73057ccc5855df2d0931ac3ee0a8a54ddd18
| 855
|
py
|
Python
|
python-algorithm/leetcode/problem_457.py
|
isudox/nerd-algorithm
|
c1fbe153953cf3fc24395f75d102016fdf9ea0fa
|
[
"MIT"
] | 5
|
2017-06-11T09:19:34.000Z
|
2019-01-16T16:58:31.000Z
|
python-algorithm/leetcode/problem_457.py
|
isudox/leetcode-solution
|
60085e64deaf396a171367affc94b18114565c43
|
[
"MIT"
] | 5
|
2020-03-22T13:53:54.000Z
|
2020-03-23T08:49:35.000Z
|
python-algorithm/leetcode/problem_457.py
|
isudox/nerd-algorithm
|
c1fbe153953cf3fc24395f75d102016fdf9ea0fa
|
[
"MIT"
] | 1
|
2019-03-02T15:50:43.000Z
|
2019-03-02T15:50:43.000Z
|
"""457. Circular Array Loop
https://leetcode.com/problems/circular-array-loop/
"""
from typing import List
class Solution:
def circular_array_loop(self, nums: List[int]) -> bool:
def helper(start: int, cur: int, count: int, visited) -> int:
if nums[cur] * nums[start] < 0:
return False
if cur == start and count > 0:
return count > 1
if cur in visited:
return False
visited.add(cur)
next_pos = cur + nums[cur]
count += 1
if 0 <= next_pos < len(nums):
return helper(start, next_pos, count, visited)
return helper(start, next_pos % len(nums), count, visited)
for i in range(len(nums)):
if helper(i, i, 0, set()):
return True
return False
| 31.666667
| 70
| 0.527485
| 745
| 0.871345
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.095906
|
a1ac757a73cea2cb4a80f87ddc034e4b6d7ef1b0
| 10,937
|
py
|
Python
|
task/task2.py
|
joseph9991/Milestone1
|
08f95e845a743539160e9a7330ca58ea20240229
|
[
"MIT"
] | null | null | null |
task/task2.py
|
joseph9991/Milestone1
|
08f95e845a743539160e9a7330ca58ea20240229
|
[
"MIT"
] | null | null | null |
task/task2.py
|
joseph9991/Milestone1
|
08f95e845a743539160e9a7330ca58ea20240229
|
[
"MIT"
] | null | null | null |
import pandas as pd
from pandas import read_csv
import os
import sys
import glob
import re
import soundfile as sf
import pyloudnorm as pyln
from .thdncalculator import execute_thdn
class Task2:
def __init__(self,data,file_name):
self.df = pd.DataFrame.from_dict(data, orient='columns')
self.file_name = file_name
self.speakers = []
self.speaker_set = ()
def merge_timestamp(self):
'''
This functions helps us to correct small error in the speaker end
time obtained from response from Task 1.
Basically, uses the next speaker's start time and rerplaces it with the end time
of the current speaker
'''
df_length = len(self.df.index)
cursor = 0
speaker_list = self.df['speaker'].values.tolist()
start_list = self.df['start_time'].values.tolist()
end_list = self.df['end_time'].values.tolist()
self.speaker_set = sorted(list(set(speaker_list)))
for i in range(0,len(speaker_list)):
current_row = []
current_speaker = speaker_list[i]
if cursor == 0:
current_row = [current_speaker,start_list[0],end_list[0]]
self.speakers.append(current_row)
cursor = cursor + 1
continue
if current_speaker == speaker_list[i] and current_speaker == speaker_list[i-1]:
self.speakers[-1][2] = end_list[i]
else:
current_row = [current_speaker,start_list[i],end_list[i]]
self.speakers.append(current_row)
cursor = cursor + 1
for i in range(len(self.speakers)):
if i == len(self.speakers)-1:
break
self.speakers[i][2] = self.speakers[i+1][1]
print("\nComputed merged Timestamps for every speaker!!")
def trim(self):
'''
This function helps us to trim the files according to the each individual speaker using FFMPEG.
But, there will be multiple files per speaker
OUTPUT: spk_0-1.wav,spk_0-2.wav,spk_0-3.wav
spk_1-1.wav, spk_1-2.wav
spk_2-1.wav,spk_2-2.wav
'''
cursor = 0
for speaker in self.speakers:
new_file = speaker[0]+str(cursor)+'.wav'
command = f"ffmpeg -loglevel quiet -y -i {self.file_name} -ss {speaker[1]} -to \
{speaker[2]} -c:v copy -c:a copy {new_file}"
try:
os.system(command)
content = "file '{}'".format(new_file)
except Exception as err:
print(f'Error occurred: {err}')
cursor = cursor + 1
print("Divided audio file into {} individual speaker files!!".format(len(self.speakers)))
def generate_files(self):
'''
Merges each individual speaker files.
OUTPUT: spk_0.wav,spk_1.wav,spk_2.wav
'''
txt_files = []
for i in range(len(self.speaker_set)):
fileName = '{}.txt'.format(self.speaker_set[i])
with open(fileName,'a+') as f:
txt_files.append(fileName)
wavFiles = glob.glob('{}*.wav'.format(self.speaker_set[i]))
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
wavFiles = sorted(wavFiles,key=alphanum_key)
for wavFile in wavFiles:
f.write('file \'{}\'\n'.format(wavFile))
# speaker_set = wavFiles
# Deleting all the text files needed for merging
for txt_file in txt_files:
command = f"ffmpeg -loglevel quiet -y -f concat -i {txt_file} -c copy {txt_file[:-4]}.wav"
os.system(command)
os.remove(txt_file)
## Deleting the individual speaker audio clip [which were not merged]
# for wav_file in glob.glob('spk_[0-4][0-9]*.wav'):
# os.remove(wav_file)
print("Merged the individual speaker files into {} files!!\n".format(len(self.speaker_set)))
def calculate_rank(self):
'''
Calcualtes Loudness of each speaker file and THDN value
'''
speaker_loudness = {}
speaker_thdn = {}
speaker_frequency = {}
for speaker in self.speaker_set:
wav_file = speaker+'.wav'
data, rate = sf.read(wav_file)
print('Analyzing "' + wav_file + '"...')
meter = pyln.Meter(rate)
loudness = meter.integrated_loudness(data)
speaker_loudness[speaker] = loudness
response = execute_thdn(wav_file)
speaker_thdn[speaker] = response['thdn']
speaker_frequency[speaker] = response['frequency']
speaker_loudness = sorted( ((v,k) for k,v in speaker_loudness.items()), reverse=True)
print("\n\nThere is no \"better\" loudness. But the larger the value (closer to 0 dB), the louder. ")
print("--------------------------------------------------------------------------------------------")
print("Speaker\t\tLoudness\t\tTHDN\t\tFrequency\tRank")
print("--------------------------------------------------------------------------------------------")
for i in range(len(speaker_loudness)):
print('{}\t {} LUFS\t{}\t\t{}\t {}'.format(speaker_loudness[i][1], speaker_loudness[i][0],
speaker_thdn[speaker_loudness[i][1]], speaker_frequency[speaker_loudness[i][1]],i+1))
print("--------------------------------------------------------------------------------------------")
def execute_all_functions(self):
print("\n\nCommencing Task 2: Judge Sound Quality")
self.merge_timestamp()
self.trim()
self.generate_files()
self.calculate_rank()
return self.speaker_set
# # For Testing
# if __name__ == "__main__":
# file_name = sys.argv[1]
# # Temp Code
# data =[
# {
# "Unnamed: 0": 0,
# "start_time": "00:00:00",
# "end_time": "00:00:00",
# "speaker": "spk_1",
# "comment": "Well,",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 1,
# "start_time": "00:00:01",
# "end_time": "00:00:02",
# "speaker": "spk_1",
# "comment": "Hi, everyone.",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 2,
# "start_time": "00:00:03",
# "end_time": "00:00:05",
# "speaker": "spk_0",
# "comment": "Everyone's money. Good",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 3,
# "start_time": "00:00:05",
# "end_time": "00:00:10",
# "speaker": "spk_2",
# "comment": "morning, everyone. Money. Thanks for joining. Uh, so let's quickly get started with the meeting.",
# "stopwords": 4,
# "fillerwords": 1
# },
# {
# "Unnamed: 0": 4,
# "start_time": "00:00:11",
# "end_time": "00:00:14",
# "speaker": "spk_2",
# "comment": "Today's agenda is to discuss how we plan to increase the reach off our website",
# "stopwords": 8,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 5,
# "start_time": "00:00:15",
# "end_time": "00:00:20",
# "speaker": "spk_2",
# "comment": "and how to make it popular. Do you have any ideas, guys? Yes.",
# "stopwords": 8,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 6,
# "start_time": "00:00:20",
# "end_time": "00:00:22",
# "speaker": "spk_0",
# "comment": "Oh, Whoa. Um,",
# "stopwords": 0,
# "fillerwords": 1
# },
# {
# "Unnamed: 0": 7,
# "start_time": "00:00:23",
# "end_time": "00:00:36",
# "speaker": "spk_1",
# "comment": "it's okay. Thank you so much. Yes. Asai was saying one off. The ideas could be to make it more such friendly, you know? And to that I think we can. We need to improve the issue off our website.",
# "stopwords": 21,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 8,
# "start_time": "00:00:37",
# "end_time": "00:00:41",
# "speaker": "spk_2",
# "comment": "Yeah, that's a great point. We certainly need to improve the SC off our site.",
# "stopwords": 6,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 9,
# "start_time": "00:00:42",
# "end_time": "00:00:43",
# "speaker": "spk_2",
# "comment": "Let me let me take a note of this.",
# "stopwords": 4,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 10,
# "start_time": "00:00:45",
# "end_time": "00:00:57",
# "speaker": "spk_0",
# "comment": "How about using social media channels to promote our website? Everyone is on social media these days on way. We just need to target the right audience and share outside with them. Were often Oh, what do you think?",
# "stopwords": 18,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 11,
# "start_time": "00:00:58",
# "end_time": "00:01:05",
# "speaker": "spk_2",
# "comment": "It's definitely a great idea on since we already have our social accounts, I think we can get started on this one immediately.",
# "stopwords": 11,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 12,
# "start_time": "00:01:06",
# "end_time": "00:01:11",
# "speaker": "spk_0",
# "comment": "Yes, I can work on creating a plan for this. I come up with the content calendar base.",
# "stopwords": 9,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 13,
# "start_time": "00:01:11",
# "end_time": "00:01:17",
# "speaker": "spk_1",
# "comment": "Yeah, and I can start with creating the CEO content for all the periods off our website.",
# "stopwords": 10,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 14,
# "start_time": "00:01:17",
# "end_time": "00:01:24",
# "speaker": "spk_2",
# "comment": "Awesome. I think we already have a plan in place. Let's get rolling Eyes. Yeah, definitely.",
# "stopwords": 5,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 15,
# "start_time": "00:01:24",
# "end_time": "00:01:25",
# "speaker": "spk_2",
# "comment": "Yeah, sure.",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 16,
# "start_time": "00:01:26",
# "end_time": "00:01:33",
# "speaker": "spk_2",
# "comment": "Great. Thanks. Thanks, everyone, for your ideas. I'm ending the call now. Talk to you soon. Bye. Bye bye. Thanks.",
# "stopwords": 5,
# "fillerwords": 0
# }]
# obj = Task2(data,file_name)
# obj.execute_all_functions()
| 32.357988
| 241
| 0.526744
| 4,869
| 0.445186
| 0
| 0
| 0
| 0
| 0
| 0
| 7,592
| 0.694157
|
a1acd3aad52a9f207d22596dfa16d615ad5b5b36
| 6,253
|
py
|
Python
|
agents/hub_policy.py
|
floriandonhauser/TeBaG-RL
|
0110087c97e4d67f739961e7320945da4b3d9592
|
[
"MIT"
] | null | null | null |
agents/hub_policy.py
|
floriandonhauser/TeBaG-RL
|
0110087c97e4d67f739961e7320945da4b3d9592
|
[
"MIT"
] | null | null | null |
agents/hub_policy.py
|
floriandonhauser/TeBaG-RL
|
0110087c97e4d67f739961e7320945da4b3d9592
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_hub as hub
from tf_agents.networks import network
# Bert needs this (I think) TODO: Check?
import tensorflow_text as text
embedding = "https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2"
tfhub_handle_encoder = (
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1"
)
tfhub_handle_preprocess = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
class HubPolicyFC(network.Network):
"""Policy for DQN agent utilizing pre-trained NNLM embedding into FC layers."""
def __init__(
self, input_tensor_spec, action_spec, num_verb, num_obj, name="ActorNetwork"
):
super().__init__()
num_actions = action_spec.maximum - action_spec.minimum + 1
assert num_actions == num_verb * num_obj
self.num_verb = num_verb
self.num_obj = num_obj
self.hub_layer = hub.KerasLayer(
embedding,
input_shape=[],
dtype=tf.string,
trainable=True
)
self.fc1 = tf.keras.layers.Dense(128, activation="relu")
self.fc2 = tf.keras.layers.Dense(64, activation="relu")
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
self.do1 = tf.keras.layers.Dropout(0.1)
self.do2 = tf.keras.layers.Dropout(0.1)
self.verb_layer = tf.keras.layers.Dense(num_verb, activation=None)
self.obj_layer = tf.keras.layers.Dense(num_obj, activation=None)
self.number_of_strings = input_tensor_spec.shape[0]
def call(self, observation, network_state=(), training=False):
"""A wrapper around `Network.call`.
Args:
inputs: The input to `self.call`, matching `self.input_tensor_spec`
network_state: A state to pass to the network used by the RNN layer
training: Optional argument to set to training mode
Returns:
A tuple `(outputs, new_network_state)`.
"""
if network_state is not None and len(network_state) == 0:
network_state = None
flattened_observation = tf.reshape(observation, (-1))
embedded_observations = self.hub_layer(flattened_observation, training=training)
embedded_observations = tf.reshape(
embedded_observations, (observation.shape[0], observation.shape[1], 128)
)
out = self.bn1(embedded_observations, training=training)
out = self.fc1(out, training=training)
self.do1(out, training=training)
out = self.bn2(out, training=training)
out = self.fc2(out, training=training)
self.do2(out, training=training)
verb_q_value = self.verb_layer(out, training=training)
obj_q_value = self.obj_layer(out, training=training)
# q_value_multiplied = tf.matmul(verb_q_value, obj_q_value, transpose_a=True)
# q_values = tf.reshape(q_value_multiplied, (observation.shape[0], -1))
verb_q_value = tf.reshape(verb_q_value, (observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1))
obj_q_value = tf.reshape(obj_q_value, (observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2]))
q_values_added = tf.add(verb_q_value, obj_q_value)
q_values_added = tf.math.reduce_sum(q_values_added, axis=1)
q_values = tf.reshape(q_values_added, (observation.shape[0], -1))
return q_values, ()
class HubPolicyBert(network.Network):
"""Policy for DQN agent utilizing pre-trained smallBert into FC layers. """
def __init__(
self, input_tensor_spec, action_spec, num_verb, num_obj, name="ActorNetwork"
):
super().__init__()
num_actions = action_spec.maximum - action_spec.minimum + 1
assert num_actions == num_verb * num_obj
self.num_verb = num_verb
self.num_obj = num_obj
self.bert_preprocess_model = hub.KerasLayer(
tfhub_handle_preprocess,
input_shape=[],
dtype=tf.string,
)
self.bert_model = hub.KerasLayer(tfhub_handle_encoder, trainable=True)
self.fc1 = tf.keras.layers.Dense(128, activation="relu")
self.do1 = tf.keras.layers.Dropout(0.1)
self.verb_layer = tf.keras.layers.Dense(num_verb, activation=None)
self.obj_layer = tf.keras.layers.Dense(num_obj, activation=None)
self.verbobj_layer = tf.keras.layers.Dense(num_actions, activation=None)
self.number_of_strings = input_tensor_spec.shape[0]
def call(self, observation, network_state=(), training=False):
"""A wrapper around `Network.call`.
Args:
observation: The input to `self.call`, matching `self.input_tensor_spec`
network_state: A state to pass to the network used by the RNN layer
training: Optional argument to set to training mode
Returns:
A tuple `(outputs, new_network_state)`.
"""
if network_state is not None and len(network_state) == 0:
network_state = None
flattened_observation = tf.reshape(observation, (-1))
encoder_inputs = self.bert_preprocess_model(flattened_observation)
outputs = self.bert_model(encoder_inputs, training=training)
out = outputs["pooled_output"]
out = tf.reshape(out, (observation.shape[0], observation.shape[1], 128))
# out = self.do1(out, training=training)
# out = self.fc1(out, training=training)
verb_q_value = self.verb_layer(out, training=training)
obj_q_value = self.obj_layer(out, training=training)
# q_value_multiplied = tf.matmul(verb_q_value, obj_q_value, transpose_a=True)
# q_values = tf.reshape(q_value_multiplied, (observation.shape[0], -1))
verb_q_value = tf.reshape(verb_q_value, (observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1))
obj_q_value = tf.reshape(obj_q_value, (observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2]))
q_values_added = tf.add(verb_q_value, obj_q_value)
q_values_added = tf.math.reduce_sum(q_values_added, axis=1)
q_values = tf.reshape(q_values_added, (observation.shape[0], -1))
return q_values, ()
| 40.869281
| 120
| 0.669119
| 5,815
| 0.929954
| 0
| 0
| 0
| 0
| 0
| 0
| 1,532
| 0.245002
|
a1ad8c52da06d6abbbc870ab6152a1b0cfde52b7
| 475
|
py
|
Python
|
meiduo_mall/apps/orders/urls.py
|
MarioKarting/Django_meiduo_project
|
ef06e70b1ddb6709983ebb644452c980afc29000
|
[
"MIT"
] | null | null | null |
meiduo_mall/apps/orders/urls.py
|
MarioKarting/Django_meiduo_project
|
ef06e70b1ddb6709983ebb644452c980afc29000
|
[
"MIT"
] | null | null | null |
meiduo_mall/apps/orders/urls.py
|
MarioKarting/Django_meiduo_project
|
ef06e70b1ddb6709983ebb644452c980afc29000
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# _*_ coding:utf-8 _*_
from django.conf.urls import url
from . import views
urlpatterns = [
# 1. 结算订单 orders/settlement/
url(r'^orders/settlement/$', views.OrdersSettlementView.as_view(), name='settlement'),
# 2. orders/commit/ 提交订单
url(r'^orders/commit/$', views.OrdersCommitView.as_view(), name='commit'),
# 3. 订单成功 -- orders/success/
url(r'^orders/success/$', views.OrdersSuccessView.as_view(), name='sucess'),
]
| 22.619048
| 90
| 0.661053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.48497
|
a1ade519e607956e6b09f57c472fa7d337099ebf
| 138
|
py
|
Python
|
goldmeister/__init__.py
|
USDA-ARS-NWRC/goldmeister
|
b4624a355e551c4610834a9dcb971524c45bb437
|
[
"CC0-1.0"
] | null | null | null |
goldmeister/__init__.py
|
USDA-ARS-NWRC/goldmeister
|
b4624a355e551c4610834a9dcb971524c45bb437
|
[
"CC0-1.0"
] | 1
|
2020-09-17T16:16:13.000Z
|
2020-09-17T16:21:00.000Z
|
goldmeister/__init__.py
|
USDA-ARS-NWRC/goldmeister
|
b4624a355e551c4610834a9dcb971524c45bb437
|
[
"CC0-1.0"
] | null | null | null |
"""Top-level package for Goldmeister."""
__author__ = """Micah Johnson"""
__email__ = 'micah.johnson150@gmail.com'
__version__ = '0.2.0'
| 23
| 40
| 0.702899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.681159
|
a1b0c44fad44484d33a19381232ed8782c4771bb
| 1,014
|
py
|
Python
|
db/update.py
|
msgangwar/Leaderboard
|
d4cce6a3bb76f6a3c2344c485f67a7aa080d4e96
|
[
"MIT"
] | 2
|
2019-02-13T04:40:10.000Z
|
2019-02-14T17:56:09.000Z
|
db/update.py
|
msgangwar/Leaderboard
|
d4cce6a3bb76f6a3c2344c485f67a7aa080d4e96
|
[
"MIT"
] | 3
|
2021-02-08T20:28:25.000Z
|
2021-06-01T23:21:51.000Z
|
db/update.py
|
msgangwar/Leaderboard
|
d4cce6a3bb76f6a3c2344c485f67a7aa080d4e96
|
[
"MIT"
] | 6
|
2019-02-13T04:40:16.000Z
|
2020-10-02T05:26:25.000Z
|
from user import User
from Env import Env_Vars
from fetch_from_sheet import SheetData
from pymongo import MongoClient
from pprint import pprint
env_vars = Env_Vars()
MongoURI = env_vars.MongoURI
client = MongoClient(MongoURI, 27017)
db = client['users']
users = db['users']
def do_update():
sheet = SheetData()
data = sheet.get_sheet()
new_uses = []
user_scores = {}
for user in data:
user_scores[user['handle']] = int(user['score'])
#If a user with this handle does not exist
if users.find({'handle': user['handle']}).count() == 0:
new_uses.append(User([user['name'], user['UID'], user['handle'], 0, 0]).__repr__())
# Insert the new users into the DB
for user in new_uses:
users.insert_one(user)
# update the records
find = users.find()
for user in find:
user['contests'] += 1
x = 0
try:
x = user_scores[user['handle']]
except KeyError:
continue
user['score'] += x
users.save(user)
if __name__ == "__main__":
do_update()
| 22.533333
| 89
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.196252
|
a1b1b372ea41556cd122b9d3a8b1aaadf901cbd1
| 1,956
|
py
|
Python
|
uvicore/http/OBSOLETE/routes-OLD.py
|
coboyoshi/uvicore
|
9cfdeeac83000b156fe48f068b4658edaf51c8de
|
[
"MIT"
] | 11
|
2021-03-22T22:07:49.000Z
|
2022-03-08T16:18:33.000Z
|
uvicore/http/OBSOLETE/routes-OLD.py
|
coboyoshi/uvicore
|
9cfdeeac83000b156fe48f068b4658edaf51c8de
|
[
"MIT"
] | 12
|
2021-03-04T05:51:24.000Z
|
2021-09-22T05:16:18.000Z
|
uvicore/http/OBSOLETE/routes-OLD.py
|
coboyoshi/uvicore
|
9cfdeeac83000b156fe48f068b4658edaf51c8de
|
[
"MIT"
] | 2
|
2021-03-25T14:49:56.000Z
|
2021-11-17T23:20:29.000Z
|
# @uvicore.service()
# class Routes(RoutesInterface, Generic[R]):
# endpoints: str = None
# @property
# def app(self) -> ApplicationInterface:
# return self._app
# @property
# def package(self) -> PackageInterface:
# return self._package
# @property
# def Router(self) -> R:
# return self._Router
# @property
# def prefix(self) -> str:
# return self._prefix
# def __init__(self,
# app: ApplicationInterface,
# package: PackageInterface,
# Router: R,
# prefix: str
# ):
# self._app = app
# self._package = package
# self._Router = Router
# self._prefix = prefix
# def new_router(self):
# router = self.Router()
# # Add route context into Router
# router.uvicore = Dict({
# 'prefix': self.prefix,
# 'endpoints': self.endpoints,
# })
# return router
# def include(self, module, *, prefix: str = '', tags: List[str] = None) -> None:
# #self.http.controller(controller.route, prefix=self.prefix)
# if type(module) == str:
# # Using a string to point to an endpoint class controller
# controller = load(self.endpoints + '.' + module + '.route')
# uvicore.app.http.include_router(
# controller.object,
# prefix=self.prefix + str(prefix),
# tags=tags,
# )
# else:
# # Passing in an actual router class
# uvicore.app.http.include_router(
# module,
# prefix=self.prefix + str(prefix),
# tags=tags,
# )
# # def Router(self) -> R:
# # return self._Router()
# # IoC Class Instance
# #Routes: RoutesInterface = uvicore.ioc.make('Routes', _Routes)
# # Public API for import * and doc gens
# #__all__ = ['Routes', '_Routes']
| 25.736842
| 85
| 0.528119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,881
| 0.961656
|
a1b3738a830ad504560b84aa6870219df1d05595
| 182
|
py
|
Python
|
tudo/ex052.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | 1
|
2021-07-08T00:35:57.000Z
|
2021-07-08T00:35:57.000Z
|
tudo/ex052.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | null | null | null |
tudo/ex052.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número: '))
if n % 2 == 0 and n % 3 == 0 and n % 5 == 0:
print('{} é um número primo!'.format(n))
else:
print('{} não é um número primo!'.format(n))
| 30.333333
| 48
| 0.543956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.404255
|
a1b46b1cb092d1e3618170f67ba0443c89c2d63b
| 1,684
|
py
|
Python
|
Firmware/RaspberryPi/backend-pi/PWMController.py
|
librerespire/ventilator
|
c0cfa63f1eae23c20d5d72fe52f42785070bbb3d
|
[
"MIT"
] | 5
|
2020-04-08T12:33:31.000Z
|
2021-04-17T15:45:08.000Z
|
Firmware/RaspberryPi/backend-pi/PWMController.py
|
cmfsx/ventilator
|
996dd5ad5010c19799e03576acf068663276a5e8
|
[
"MIT"
] | 7
|
2020-03-27T13:16:09.000Z
|
2020-06-24T11:15:59.000Z
|
Firmware/RaspberryPi/backend-pi/PWMController.py
|
cmfsx/ventilator
|
996dd5ad5010c19799e03576acf068663276a5e8
|
[
"MIT"
] | 2
|
2020-09-03T16:29:22.000Z
|
2021-01-05T23:17:59.000Z
|
import threading
import time
import RPi.GPIO as GPIO
import logging
import logging.config
# declare logger parameters
logger = logging.getLogger(__name__)
class PWMController(threading.Thread):
""" Thread class with a stop() method.
Handy class to implement PWM on digital output pins """
def __init__(self, thread_id, pin, on_time, off_time):
threading.Thread.__init__(self)
self.__thread_id = thread_id
self.__pin = pin
self.__on_time = on_time
self.__off_time = off_time
self.__stop_event = threading.Event()
# TODO: Setting up the pins should be moved to the main script 'Controller.py'
# GPIO.setmode(GPIO.BCM)
# GPIO.setwarnings(False)
# GPIO.setup(pin, GPIO.OUT)
def stop(self):
self.__stop_event.set()
# print(str(self.__thread_id) + ": set the stop event")
def stopped(self):
return self.__stop_event.is_set()
def run(self):
while True:
if self.stopped():
# print(str(self.__thread_id) + ": thread has stopped. exiting")
break;
logger.debug(str(self.__pin) + ": ON--" + str(self.__on_time))
if self.__on_time > 0.02:
GPIO.output(self.__pin, GPIO.HIGH)
logger.debug("On wait time: %.3f" % self.__on_time)
time.sleep(self.__on_time)
logger.debug(str(self.__pin) + ": OFF--" + str(self.__off_time))
if self.__off_time > 0.02:
GPIO.output(self.__pin, GPIO.LOW)
logger.debug("Off wait time: %.3f" % self.__off_time)
time.sleep(self.__off_time)
| 33.68
| 86
| 0.600356
| 1,525
| 0.905582
| 0
| 0
| 0
| 0
| 0
| 0
| 460
| 0.273159
|
a1b53725330b8354a3bae3c9ca65bdec5434db16
| 2,393
|
py
|
Python
|
netforce_account/netforce_account/models/account_balance.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 27
|
2015-09-30T23:53:30.000Z
|
2021-06-07T04:56:25.000Z
|
netforce_account/netforce_account/models/account_balance.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 191
|
2015-10-08T11:46:30.000Z
|
2019-11-14T02:24:36.000Z
|
netforce_account/netforce_account/models/account_balance.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 32
|
2015-10-01T03:59:43.000Z
|
2022-01-13T07:31:05.000Z
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
import time
from netforce.database import get_connection
class Balance(Model):
_name = "account.balance"
_string="Account Balance"
_fields = {
"account_id": fields.Many2One("account.account", "Account", required=True, on_delete="cascade"),
"track_id": fields.Many2One("account.track.categ","Track-1"),
"debit": fields.Decimal("Debit",required=True),
"credit": fields.Decimal("Credit",required=True),
"amount_cur": fields.Decimal("Currency Amt"),
}
def update_balances(self,context={}): # XXX: make faster
db=get_connection()
res=db.query("SELECT account_id,track_id,SUM(debit) AS total_debit,SUM(credit) AS total_credit,SUM(amount_cur*SIGN(debit-credit)) AS total_amount_cur FROM account_move_line GROUP BY account_id,track_id")
bals={}
for r in res:
bals[(r.account_id,r.track_id)]=(r.total_debit,r.total_credit,r.total_amount_cur)
db.execute("DELETE FROM account_balance")
for (acc_id,track_id),(debit,credit,amount_cur) in bals.items():
db.execute("INSERT INTO account_balance (account_id,track_id,debit,credit,amount_cur) VALUES (%s,%s,%s,%s,%s)",acc_id,track_id,debit,credit,amount_cur)
Balance.register()
| 49.854167
| 211
| 0.732553
| 1,168
| 0.48809
| 0
| 0
| 0
| 0
| 0
| 0
| 1,597
| 0.667363
|
a1b607b0cbf4748eb3756401b6e1bc4bdb961ebc
| 115
|
py
|
Python
|
ex016.py
|
Rhodytesla/PythonMundo01
|
bac3e1a7ca3934c712423bfc606d16a4ea9af53a
|
[
"MIT"
] | null | null | null |
ex016.py
|
Rhodytesla/PythonMundo01
|
bac3e1a7ca3934c712423bfc606d16a4ea9af53a
|
[
"MIT"
] | null | null | null |
ex016.py
|
Rhodytesla/PythonMundo01
|
bac3e1a7ca3934c712423bfc606d16a4ea9af53a
|
[
"MIT"
] | null | null | null |
import math
a = float(input('insira um valor'))
print('a porção inteira do valor {} é {}'.format(a,math.trunc(a)))
| 28.75
| 66
| 0.678261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.466102
|
a1b6b1c77481492760b6401cbb654aaadb5145b0
| 5,144
|
py
|
Python
|
models/force_expand.py
|
DeerKK/Deformable-Modeling
|
97b14be152e78f44dd6e783059bc5380a3a74a68
|
[
"MIT"
] | 4
|
2020-11-16T16:06:08.000Z
|
2022-03-30T03:53:54.000Z
|
models/force_expand.py
|
DeerKK/Deformable-Modeling
|
97b14be152e78f44dd6e783059bc5380a3a74a68
|
[
"MIT"
] | null | null | null |
models/force_expand.py
|
DeerKK/Deformable-Modeling
|
97b14be152e78f44dd6e783059bc5380a3a74a68
|
[
"MIT"
] | null | null | null |
#from data_loader import *
from scipy import signal
import matplotlib.pyplot as plt
import copy
import os
import shutil
import numpy as np
def data_filter(exp_path, probe_type='point', Xtype='loc',ytype='f',num_point=0):
shutil.rmtree(exp_path+probe_type+'_expand', ignore_errors=True)
os.mkdir(exp_path+probe_type+'_expand')
for i in range(num_point):
#load force/torque data
force_path = exp_path+probe_type+'/force_'+str(i)+'.txt'
new_force_path = exp_path+probe_type+'_expand'+'/force_'+str(i)+'.txt'
force=[]
torque=[]
force_normal=[]
torque_normal=[]
displacement=[]
dataFile=open(force_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
force.append(l2[0:3])
force_normal.append(l2[3])
displacement.append(l2[4])
dataFile.close()
if probe_type == 'line':
torque_path = exp_path+probe_type+'/torque_'+str(i)+'.txt'
dataFile=open(torque_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
torque.append(l2[0:3])
torque_normal.append(l2[3])
dataFile.close()
elif probe_type == 'ellipse':
torque_path = exp_path+probe_type+'/torque_'+str(i)+'.txt'
dataFile=open(torque_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
torque.append(l2[0:3])
displacement.append(l2[3])
dataFile.close()
force_normal_1d =np.array(force_normal)
#to np
force=np.array(force,ndmin=2)
torque=np.array(torque,ndmin=2)
force_normal=np.array(force_normal,ndmin=2).T
torque_normal=np.array(torque_normal,ndmin=2).T
displacement=np.array(displacement)
#filter
Wn=0.01
[b,a]=signal.butter(5,Wn,'low')
for i in range(3):
tmp_filteredForces=signal.filtfilt(b,a,force[:,i].T,padlen=150)
if i == 0:
filteredForces = np.array(tmp_filteredForces,ndmin=2).T
print(filteredForces.shape)
else:
filteredForces = np.hstack((filteredForces,np.array(tmp_filteredForces,ndmin=2).T))
if probe_type == 'line' or probe_type == 'ellipse':
for i in range(3):
tmp_filteredTorques=signal.filtfilt(b,a,torque[:,i].T,padlen=150)
if i == 0:
filteredTorques = tmp_filteredTorques.T
else:
filteredTorques = np.hstack((filteredTorques,tmp_filteredTorques.T))
filtered_force_normal=signal.filtfilt(b,a,force_normal.T,padlen=150)
if probe_type == 'line':
filtered_torque_normal=signal.filtfilt(b,a,torque_normal.T,padlen=150)
#filtered_force_normal = filtered_force_normal.T
print(filtered_force_normal.shape)
new_dataFile=open(new_force_path,'w+')
num_data = len(displacement)
#delta_d = (displacement[num_data-1]-displacement[num_data-101])/1
delta_d = 0.0002
d_expand_start = displacement[num_data-1] + delta_d
d_expand_end = 0.020
d_expand = np.arange(d_expand_start,d_expand_end,delta_d)
num_expand = d_expand.shape[0]
print('[*]',num_expand)
slope = (force_normal[num_data-1] - force_normal[num_data-301])/(displacement[num_data-1]-displacement[num_data-301])
sd = slope*delta_d
fn_expand_start = force_normal[num_data-1] + sd*1
fn_expand_end = force_normal[num_data-1] + sd*(num_expand+1)
force_normal_expand = np.arange(fn_expand_start,fn_expand_end,sd)
print('[*]',len(d_expand))
d_all = displacement.tolist()+d_expand.tolist()
fn_all = force_normal_1d.tolist()+force_normal_expand.tolist()
num_all = len(d_all) - 2
print(num_all)
d_all = d_all[0:num_all]
fn_all = fn_all[0:num_all]
for i in range(num_all):
new_dataFile.write(str(0)+' '+str(0)+' '+str(0)+' ')
new_dataFile.write(str(fn_all[i])+' '+str(d_all[i])+'\n')
new_dataFile.close()
'''
for i in range(displacement.shape[0]):
new_dataFile.write(str(filteredForces[i,0])+' '+str(filteredForces[i,1])+' '+str(filteredForces[i,2])+' ')
new_dataFile.write(str(filtered_force_normal[0,i])+' '+str(displacement[i])+'\n')
new_dataFile.close()
'''
return d_all, fn_all
d,fn = data_filter('./', probe_type='point', Xtype='loc',ytype='fn',num_point=94)
print(len(d),len(fn))
plt.plot(np.array(d),np.array(fn),color='b',marker='o',markersize=1)
plt.show()
| 37.547445
| 125
| 0.577372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 699
| 0.135886
|
a1b6ce12f6da82245af7a016f922874b6b94b4ef
| 616
|
py
|
Python
|
DataStructures Python/parenthesis_matching.py
|
Kaushik-Pal-2020/DataStructure
|
4594e2f6d057db13e45b307d2d42f77e1444bfc1
|
[
"MIT"
] | null | null | null |
DataStructures Python/parenthesis_matching.py
|
Kaushik-Pal-2020/DataStructure
|
4594e2f6d057db13e45b307d2d42f77e1444bfc1
|
[
"MIT"
] | null | null | null |
DataStructures Python/parenthesis_matching.py
|
Kaushik-Pal-2020/DataStructure
|
4594e2f6d057db13e45b307d2d42f77e1444bfc1
|
[
"MIT"
] | null | null | null |
from collections import deque
def parenthesis_matching(user_input):
my_stack = deque()
my_dict = {'(': ')', '{': '}', '[': ']'}
try:
count = 0
for letter in user_input:
if letter in my_dict.keys():
my_stack.append(letter)
count += 1
elif letter in my_dict.values() and count > 0:
if my_dict[my_stack[count-1]] == letter:
my_stack.pop()
count -= 1
print(f"Now stack Becomes = {my_stack}")
except:
print("error")
parenthesis_matching("{[a+b]*[(c-d]/e}")
| 26.782609
| 58
| 0.496753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.123377
|
a1b746b6ceeb8b3c1f65c79e0b5184f641adb774
| 58
|
py
|
Python
|
a.20.7.py
|
AmanMishra148/python-repo
|
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
|
[
"bzip2-1.0.6"
] | null | null | null |
a.20.7.py
|
AmanMishra148/python-repo
|
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
|
[
"bzip2-1.0.6"
] | 1
|
2021-10-18T09:59:45.000Z
|
2021-10-18T09:59:45.000Z
|
a.20.7.py
|
AmanMishra148/python-repo
|
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
|
[
"bzip2-1.0.6"
] | 4
|
2021-10-18T09:40:54.000Z
|
2021-10-19T14:14:28.000Z
|
def si(p,r,t):
n= (p+r+t)//3
return n
| 8.285714
| 17
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a1b77cdc1daef2b3d3ed0cc366bb55bdefa74e68
| 1,670
|
py
|
Python
|
hard-gists/7880c101557297beeccda05978aeb278/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/7880c101557297beeccda05978aeb278/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/7880c101557297beeccda05978aeb278/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
# Example of use of Afanasy's API to generate a summary of the state of the
# render farm.
# Copyright (c) 2016 rise|fx (Elie Michel) - Released under MIT License
import af
cmd = af.Cmd()
def isSysJob(job):
return job['st'] == 0
## Jobs ##
joblist = cmd.getJobList()
job_state_counters = {}
job_count = 0
for job in joblist:
if isSysJob(job):
continue
job_count += 1
for s in job['state'].split():
job_state_counters[s] = job_state_counters.get(s, 0) + 1
print("Out of %d jobs:" % job_count)
print(" * %d are running" % job_state_counters.get('RUN', 0))
print(" * %d have error" % job_state_counters.get('ERR', 0))
print(" * %d are skipped" % job_state_counters.get('SKP', 0))
print(" * %d are off" % job_state_counters.get('OFF', 0))
print(" * %d are ready" % job_state_counters.get('RDY', 0))
print(" * %d are done" % job_state_counters.get('DON', 0))
# Note that the sum may exceed the total number of jobs because a job can have
# several states
print("")
## Renders ##
renderlist = cmd.renderGetList()
render_state_counts = {}
for render in renderlist:
for s in render['state'].split():
render_state_counts[s] = render_state_counts.get(s, 0) + 1
print("Out of %d renders:" % len(renderlist))
print(" * %d are online" % render_state_counts.get('ONL', 0))
print(" * %d are offline" % render_state_counts.get('OFF', 0))
print(" * %d are nimby" % render_state_counts.get('NBY', 0))
print(" * %d are running" % render_state_counts.get('RUN', 0))
print(" * %d are dirty" % render_state_counts.get('DRT', 0))
# Note that the sum may exceed the total number of renders because a render can
# have several states
| 28.305085
| 79
| 0.669461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 683
| 0.408982
|
a1b85880b05d9e4a401f9fe16d8f89e466e71f55
| 4,931
|
py
|
Python
|
cblib/scripts/admin/pack.py
|
HFriberg/cblib-base
|
164a00eb73ef3ac61f5b54f30492209cc69b854b
|
[
"Zlib"
] | 3
|
2019-06-13T06:57:31.000Z
|
2020-06-18T09:58:11.000Z
|
cblib/scripts/admin/pack.py
|
HFriberg/cblib-base
|
164a00eb73ef3ac61f5b54f30492209cc69b854b
|
[
"Zlib"
] | 1
|
2019-04-27T18:28:57.000Z
|
2019-04-30T17:16:53.000Z
|
cblib/scripts/admin/pack.py
|
HFriberg/cblib-base
|
164a00eb73ef3ac61f5b54f30492209cc69b854b
|
[
"Zlib"
] | 3
|
2019-04-30T11:19:34.000Z
|
2019-05-31T13:12:17.000Z
|
# Copyright (c) 2012 by Zuse-Institute Berlin and the Technical University of Denmark.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# Direct execution requires top level directory on python path
if __name__ == "__main__":
import os, sys, inspect
scriptdir = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
packagedir = os.path.realpath(os.path.abspath(os.path.join(scriptdir,'..')))
if packagedir not in sys.path:
sys.path.insert(0, packagedir)
import os, sys, inspect, tarfile, glob, stat, getopt
from data.CBFset import CBFset
from filter import filter
def addwritepermission(tarinfo):
tarinfo.mode = tarinfo.mode | stat.S_IWRITE
return tarinfo
def pack(packname, filtexpr, setexpr, packall):
# tarfile 'filter' requires v2.7
if sys.version_info < (2,7):
raise Exception('Python 2.7 or later required..')
# Get the root directory of cblib
scriptdir = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
rootdir = os.path.join(scriptdir,'..','..')
if not packall and setexpr != None:
if os.path.isfile(setexpr):
rootdir = os.path.dirname(setexpr)
else:
rootdir = setexpr
# Find all instances
files = list()
cbfset = CBFset()
cbfset.read(setexpr)
filter(filtexpr, None, cbfset, lambda x: files.append(x))
if packall:
# Find all instance information
files = files + glob.glob(os.path.join(rootdir,'instances','*.csv'))
files = files + glob.glob(os.path.join(rootdir,'instances','*.bib'))
# Find all source files from 'tools'
files = files + glob.glob(os.path.join(rootdir,'tools','*.c'))
files = files + glob.glob(os.path.join(rootdir,'tools','*.h'))
files = files + glob.glob(os.path.join(rootdir,'tools','Makefile.*'))
# Find all documents from 'docs'
files = files + glob.glob(os.path.join(rootdir,'docs','*.pdf'))
# Find all python files from 'scripts'
files = files + glob.glob(os.path.join(rootdir,'scripts','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','admin','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','data','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','dist','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','filters','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','solvers','*.py'))
# Find all other important files
files.append(os.path.join(rootdir,'README'))
files.append(os.path.join(rootdir,'instances','cbf','README'))
# Create compressed tar file
print('Writing '+packname+'.tar.gz')
tar = tarfile.open(os.path.join(scriptdir,packname+'.tar.gz'), 'w:gz')
for f in files:
extractname = os.path.join(packname, os.path.relpath(f, rootdir))
print(extractname)
tar.add(f, arcname=extractname, filter=addwritepermission)
tar.close()
if __name__ == "__main__":
try:
# Verify command line arguments
opts, args = getopt.gnu_getopt(sys.argv[1:], "n:s:a", "filter=")
if len(args) >= 1:
raise Exception('Incorrect usage!')
except Exception as e:
print(str(e))
raise Exception(''.join([
'Incorrect usage, try all instances', '\n',
' python ', sys.argv[0], ' -n cblib', '\n',
'or try all mixed-integer second order cone instances:', '\n',
' python ', sys.argv[0], ' -n cblib-misoco --filter="||int|| and ||cones|so|| and not ||psdcones||"']))
sys.exit(2)
packname = None
filtexpr = ""
setexpr = None
packall = False
for opt, arg in opts:
if opt == '-n':
packname = arg
elif opt == "-s":
setexpr = arg
elif opt == "-a":
packall = True
elif opt == "--filter":
filtexpr = arg
try:
if not packname:
if setexpr and os.path.exists(setexpr) and not os.path.isfile(setexpr):
packname = os.path.basename(setexpr)
if not packname:
packname = os.path.basename(os.path.dirname(setexpr))
else:
raise Exception('No pack name specified!')
print(setexpr)
pack(packname, filtexpr, setexpr, packall)
except Exception as e:
print(str(e))
| 35.992701
| 114
| 0.666396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,938
| 0.393024
|
a1b91c2b6aa90638bdb1249031654f84dc1518e8
| 35,353
|
py
|
Python
|
FAEGUI/VisualizationConnection.py
|
Eggiverse/FAE
|
1b953ba6dfcced83e5929eeaa8f525ec4acde5ed
|
[
"MIT"
] | null | null | null |
FAEGUI/VisualizationConnection.py
|
Eggiverse/FAE
|
1b953ba6dfcced83e5929eeaa8f525ec4acde5ed
|
[
"MIT"
] | null | null | null |
FAEGUI/VisualizationConnection.py
|
Eggiverse/FAE
|
1b953ba6dfcced83e5929eeaa8f525ec4acde5ed
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
import os
import re
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui
from GUI.Visualization import Ui_Visualization
from FAE.FeatureAnalysis.Classifier import *
from FAE.FeatureAnalysis.FeaturePipeline import FeatureAnalysisPipelines, OnePipeline
from FAE.Description.Description import Description
from FAE.Visualization.DrawROCList import DrawROCList
from FAE.Visualization.PlotMetricVsFeatureNumber import DrawCurve, DrawBar
from FAE.Visualization.FeatureSort import GeneralFeatureSort, SortRadiomicsFeature
from Utility.EcLog import eclog
class VisualizationConnection(QWidget, Ui_Visualization):
def __init__(self, parent=None):
self._root_folder = ''
self._fae = FeatureAnalysisPipelines()
self.sheet_dict = dict()
self.logger = eclog(os.path.split(__file__)[-1]).GetLogger()
self.__is_ui_ready = False
super(VisualizationConnection, self).__init__(parent)
self.setupUi(self)
self.buttonLoadResult.clicked.connect(self.LoadAll)
self.buttonClearResult.clicked.connect(self.ClearAll)
self.buttonSave.clicked.connect(self.Save)
self.buttonGenerateDescription.clicked.connect(self.GenerateDescription)
self.__plt_roc = self.canvasROC.getFigure().add_subplot(111)
self.__plt_plot = self.canvasPlot.getFigure().add_subplot(111)
self.__contribution = self.canvasFeature.getFigure().add_subplot(111)
# Update Sheet
self.tableClinicalStatistic.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableClinicalStatistic.setSelectionBehavior(QAbstractItemView.SelectRows)
self.comboSheet.currentIndexChanged.connect(self.UpdateSheet)
self.checkMaxFeatureNumber.stateChanged.connect(self.UpdateSheet)
# self.tableClinicalStatistic.doubleClicked.connect(self.ShowOneResult)
self.tableClinicalStatistic.itemSelectionChanged.connect(self.ShowOneResult)
# Update ROC canvas
self.comboNormalizer.currentIndexChanged.connect(self.UpdateROC)
self.comboDimensionReduction.currentIndexChanged.connect(self.UpdateROC)
self.comboFeatureSelector.currentIndexChanged.connect(self.UpdateROC)
self.comboClassifier.currentIndexChanged.connect(self.UpdateROC)
self.spinBoxFeatureNumber.valueChanged.connect(self.UpdateROC)
self.checkROCCVTrain.stateChanged.connect(self.UpdateROC)
self.checkROCCVValidation.stateChanged.connect(self.UpdateROC)
self.checkROCTrain.stateChanged.connect(self.UpdateROC)
self.checkROCTest.stateChanged.connect(self.UpdateROC)
# Update Plot canvas
self.comboPlotX.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotY.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotNormalizer.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotDimensionReduction.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotFeatureSelector.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotClassifier.currentIndexChanged.connect(self.UpdatePlot)
self.spinPlotFeatureNumber.valueChanged.connect(self.UpdatePlot)
self.checkPlotCVTrain.stateChanged.connect(self.UpdatePlot)
self.checkPlotCVValidation.stateChanged.connect(self.UpdatePlot)
self.checkPlotTrain.stateChanged.connect(self.UpdatePlot)
# self.checkPlotTest.stateChanged.connect(self.UpdatePlot)
# Update Contribution canvas
self.radioContributionFeatureSelector.toggled.connect(self.UpdateContribution)
self.radioContributionClassifier.toggled.connect(self.UpdateContribution)
self.comboContributionNormalizor.currentIndexChanged.connect(self.UpdateContribution)
self.comboContributionDimension.currentIndexChanged.connect(self.UpdateContribution)
self.comboContributionFeatureSelector.currentIndexChanged.connect(self.UpdateContribution)
self.comboContributionClassifier.currentIndexChanged.connect(self.UpdateContribution)
self.spinContributeFeatureNumber.valueChanged.connect(self.UpdateContribution)
def LoadAll(self):
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.DirectoryOnly)
dlg.setOption(QFileDialog.ShowDirsOnly)
if dlg.exec_():
self._root_folder = dlg.selectedFiles()[0]
if not os.path.exists(self._root_folder):
return
if not r'.FAEresult4129074093819729087' in os.listdir(self._root_folder):
QMessageBox.about(self, 'Load Error', 'This folder is not supported for import')
return
try:
self.lineEditResultPath.setText(self._root_folder)
self._fae.LoadAll(self._root_folder)
self.SetResultDescription()
self.SetResultTable()
self.InitialUi()
except Exception as ex:
QMessageBox.about(self, "Load Error", ex.__str__())
self.logger.log('Load Error, The reason is ' + str(ex))
self.ClearAll()
return
self.buttonClearResult.setEnabled(True)
self.buttonSave.setEnabled(True)
self.buttonLoadResult.setEnabled(False)
def ClearAll(self):
self.buttonLoadResult.setEnabled(True)
self.buttonSave.setEnabled(False)
self.buttonClearResult.setEnabled(False)
self.checkROCCVTrain.setChecked(False)
self.checkROCCVValidation.setChecked(False)
self.checkROCTrain.setChecked(False)
self.checkROCTest.setChecked(False)
self.checkPlotCVTrain.setChecked(False)
self.checkPlotCVValidation.setChecked(False)
self.checkPlotTrain.setChecked(False)
# self.checkPlotTest.setChecked(False)
self.radioContributionFeatureSelector.setChecked(True)
self.radioContributionFeatureSelector.setChecked(False)
self.checkMaxFeatureNumber.setChecked(False)
self.canvasROC.getFigure().clear()
self.canvasPlot.getFigure().clear()
self.canvasFeature.getFigure().clear()
self.__plt_roc = self.canvasROC.getFigure().add_subplot(111)
self.__plt_plot = self.canvasPlot.getFigure().add_subplot(111)
self.__contribution = self.canvasFeature.getFigure().add_subplot(111)
self.canvasROC.draw()
self.canvasPlot.draw()
self.canvasFeature.draw()
self.textEditDescription.clear()
self.lineEditResultPath.clear()
self.comboSheet.clear()
self.comboClassifier.clear()
self.comboDimensionReduction.clear()
self.comboNormalizer.clear()
self.comboFeatureSelector.clear()
self.comboPlotClassifier.clear()
self.comboPlotDimensionReduction.clear()
self.comboPlotFeatureSelector.clear()
self.comboPlotNormalizer.clear()
self.comboPlotX.clear()
self.comboPlotY.clear()
self.comboContributionNormalizor.clear()
self.comboContributionDimension.clear()
self.comboContributionClassifier.clear()
self.comboContributionFeatureSelector.clear()
self.spinBoxFeatureNumber.setValue(0)
self.spinPlotFeatureNumber.setValue(0)
self.spinPlotFeatureNumber.setEnabled(False)
self.spinContributeFeatureNumber.setValue(1)
self.tableClinicalStatistic.clear()
self.tableClinicalStatistic.setRowCount(0)
self.tableClinicalStatistic.setColumnCount(0)
self.tableClinicalStatistic.setHorizontalHeaderLabels(list([]))
self.tableClinicalStatistic.setVerticalHeaderLabels(list([]))
self._fae = FeatureAnalysisPipelines()
self._root_folder = ''
self.sheet_dict = dict()
self.__is_ui_ready = False
def Save(self):
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.DirectoryOnly)
dlg.setOption(QFileDialog.ShowDirsOnly)
if dlg.exec_():
store_folder = dlg.selectedFiles()[0]
try:
self.canvasROC.getFigure().savefig(os.path.join(store_folder, 'ROC.eps'), dpi=1200)
self.canvasROC.getFigure().savefig(os.path.join(store_folder, 'ROC.jpg'), dpi=300)
except Exception as e:
QMessageBox.about(self, 'Save Figure Failed', 'There is no ROC figure.\n' + e.__str__())
try:
self.canvasPlot.getFigure().savefig(os.path.join(store_folder, 'Compare.eps'), dpi=1200)
self.canvasPlot.getFigure().savefig(os.path.join(store_folder, 'Compare.jpg'), dpi=300)
except Exception as e:
QMessageBox.about(self, 'Save Figure Failed', 'There is no AUC comparison figure.\n' + e.__str__())
try:
self.canvasFeature.getFigure().savefig(os.path.join(store_folder, 'FeatureWeights.eps'), dpi=1200)
self.canvasFeature.getFigure().savefig(os.path.join(store_folder, 'FeatureWeights.jpg'), dpi=300)
except Exception as e:
QMessageBox.about(self, 'Save Figure Failed', 'There is no Feature Contribution figure.\n' + e.__str__())
def InitialUi(self):
# Update ROC canvers
for normalizer in self._fae.GetNormalizerList():
self.comboNormalizer.addItem(normalizer.GetName())
for dimension_reduction in self._fae.GetDimensionReductionList():
self.comboDimensionReduction.addItem(dimension_reduction.GetName())
for classifier in self._fae.GetClassifierList():
self.comboClassifier.addItem(classifier.GetName())
for feature_selector in self._fae.GetFeatureSelectorList():
self.comboFeatureSelector.addItem(feature_selector.GetName())
self.spinBoxFeatureNumber.setMinimum(int(self._fae.GetFeatureNumberList()[0]))
self.spinBoxFeatureNumber.setMaximum(int(self._fae.GetFeatureNumberList()[-1]))
# Update Plot canvars
if len(self._fae.GetNormalizerList()) > 1:
self.comboPlotX.addItem('Normaliaztion')
if len(self._fae.GetDimensionReductionList()) > 1:
self.comboPlotX.addItem('Dimension Reduction')
if len(self._fae.GetFeatureSelectorList()) > 1:
self.comboPlotX.addItem('Feature Selector')
if len(self._fae.GetClassifierList()) > 1:
self.comboPlotX.addItem('Classifier')
if len(self._fae.GetFeatureNumberList()) > 1:
self.comboPlotX.addItem('Feature Number')
self.comboPlotY.addItem('AUC')
for index in self._fae.GetNormalizerList():
self.comboPlotNormalizer.addItem(index.GetName())
for index in self._fae.GetDimensionReductionList():
self.comboPlotDimensionReduction.addItem(index.GetName())
for index in self._fae.GetFeatureSelectorList():
self.comboPlotFeatureSelector.addItem(index.GetName())
for index in self._fae.GetClassifierList():
self.comboPlotClassifier.addItem(index.GetName())
self.spinPlotFeatureNumber.setMinimum(int(self._fae.GetFeatureNumberList()[0]))
self.spinPlotFeatureNumber.setMaximum(int(self._fae.GetFeatureNumberList()[-1]))
# Update Contribution canvas
for index in self._fae.GetNormalizerList():
self.comboContributionNormalizor.addItem(index.GetName())
for index in self._fae.GetDimensionReductionList():
self.comboContributionDimension.addItem(index.GetName())
for selector in self._fae.GetFeatureSelectorList():
self.comboContributionFeatureSelector.addItem(selector.GetName())
for classifier in self._fae.GetClassifierList():
specific_name = classifier.GetName() + '_coef.csv'
if self._SearchSpecificFile(int(self._fae.GetFeatureNumberList()[0]), specific_name):
self.comboContributionClassifier.addItem(classifier.GetName())
self.spinContributeFeatureNumber.setMinimum(int(self._fae.GetFeatureNumberList()[0]))
self.spinContributeFeatureNumber.setMaximum(int(self._fae.GetFeatureNumberList()[-1]))
self.__is_ui_ready = True
def UpdateROC(self):
if not self.__is_ui_ready:
return
if (self.comboNormalizer.count() == 0) or \
(self.comboDimensionReduction.count() == 0) or \
(self.comboFeatureSelector.count() == 0) or \
(self.comboClassifier.count() == 0) or \
(self.spinBoxFeatureNumber.value() == 0):
return
case_name = self.comboNormalizer.currentText() + '_' + \
self.comboDimensionReduction.currentText() + '_' + \
self.comboFeatureSelector.currentText() + '_' + \
str(self.spinBoxFeatureNumber.value()) + '_' + \
self.comboClassifier.currentText()
case_folder = os.path.join(self._root_folder, case_name)
pred_list, label_list, name_list = [], [], []
if self.checkROCCVTrain.isChecked():
train_pred = np.load(os.path.join(case_folder, 'train_predict.npy'))
train_label = np.load(os.path.join(case_folder, 'train_label.npy'))
pred_list.append(train_pred)
label_list.append(train_label)
name_list.append('CV Train')
if self.checkROCCVValidation.isChecked():
val_pred = np.load(os.path.join(case_folder, 'val_predict.npy'))
val_label = np.load(os.path.join(case_folder, 'val_label.npy'))
pred_list.append(val_pred)
label_list.append(val_label)
name_list.append('CV Validation')
if self.checkROCTrain.isChecked():
all_train_pred = np.load(os.path.join(case_folder, 'all_train_predict.npy'))
all_train_label = np.load(os.path.join(case_folder, 'all_train_label.npy'))
pred_list.append(all_train_pred)
label_list.append(all_train_label)
name_list.append('Train')
if self.checkROCTest.isChecked():
if os.path.exists(os.path.join(case_folder, 'test_label.npy')):
test_pred = np.load(os.path.join(case_folder, 'test_predict.npy'))
test_label = np.load(os.path.join(case_folder, 'test_label.npy'))
pred_list.append(test_pred)
label_list.append(test_label)
name_list.append('Test')
if len(pred_list) > 0:
DrawROCList(pred_list, label_list, name_list=name_list, is_show=False, fig=self.canvasROC.getFigure())
self.canvasROC.draw()
def _UpdatePlotButtons(self, selected_index):
index = [0, 0, 0, 0, 0]
self.comboPlotNormalizer.setEnabled(True)
self.comboPlotDimensionReduction.setEnabled(True)
self.comboPlotFeatureSelector.setEnabled(True)
self.comboPlotClassifier.setEnabled(True)
self.spinPlotFeatureNumber.setEnabled(True)
index[0] = self.comboPlotNormalizer.currentIndex()
index[1] = self.comboPlotDimensionReduction.currentIndex()
index[2] = self.comboPlotFeatureSelector.currentIndex()
index[4] = self.comboPlotClassifier.currentIndex()
index[3] = self.spinPlotFeatureNumber.value() - int(self._fae.GetFeatureNumberList()[0])
if selected_index == 0:
self.comboPlotNormalizer.setEnabled(False)
index[0] = [temp for temp in range(len(self._fae.GetNormalizerList()))]
elif selected_index == 1:
self.comboPlotDimensionReduction.setEnabled(False)
index[1] = [temp for temp in range(len(self._fae.GetDimensionReductionList()))]
elif selected_index == 2:
self.comboPlotFeatureSelector.setEnabled(False)
index[2] = [temp for temp in range(len(self._fae.GetFeatureSelectorList()))]
elif selected_index == 4:
self.comboPlotClassifier.setEnabled(False)
index[4] = [temp for temp in range(len(self._fae.GetClassifierList()))]
elif selected_index == 3:
self.spinPlotFeatureNumber.setEnabled(False)
index[3] = [temp for temp in range(len(self._fae.GetFeatureNumberList()))]
return index
def UpdatePlot(self):
if not self.__is_ui_ready:
return
if self.comboPlotX.count() == 0:
return
x_ticks = []
x_label = ''
selected_index = -1
if self.comboPlotX.currentText() == 'Normaliaztion':
selected_index = 0
x_ticks = [instance.GetName() for instance in self._fae.GetNormalizerList()]
x_label = 'Normalization Method'
elif self.comboPlotX.currentText() == 'Dimension Reduction':
selected_index = 1
x_ticks = [instance.GetName() for instance in self._fae.GetDimensionReductionList()]
x_label = 'Dimension Reduction Method'
elif self.comboPlotX.currentText() == 'Feature Selector':
selected_index = 2
x_ticks = [instance.GetName() for instance in self._fae.GetFeatureSelectorList()]
x_label = 'Feature Selecotr Method'
elif self.comboPlotX.currentText() == 'Classifier':
selected_index = 4
x_ticks = [instance.GetName() for instance in self._fae.GetClassifierList()]
x_label = 'Classifier Method'
elif self.comboPlotX.currentText() == 'Feature Number':
selected_index = 3
x_ticks = list(map(int, self._fae.GetFeatureNumberList()))
x_label = 'Feature Number'
max_axis_list = [0, 1, 2, 3, 4]
max_axis_list.remove(selected_index)
max_axis = tuple(max_axis_list)
index = self._UpdatePlotButtons(selected_index)
show_data = []
show_data_std =[]
name_list = []
if self.comboPlotY.currentText() == 'AUC':
if self.checkPlotCVTrain.isChecked():
temp = deepcopy(self._fae.GetAUCMetric()['train'])
auc_std = deepcopy(self._fae.GetAUCstdMetric()['train'])
show_data.append(temp[tuple(index)].tolist())
show_data_std.append(auc_std[tuple(index)].tolist())
name_list.append('CV Train')
if self.checkPlotCVValidation.isChecked():
temp = deepcopy(self._fae.GetAUCMetric()['val'])
auc_std = deepcopy(self._fae.GetAUCstdMetric()['val'])
show_data.append(temp[tuple(index)].tolist())
show_data_std.append(auc_std[tuple(index)].tolist())
name_list.append('CV Validation')
if self.checkPlotTrain.isChecked():
temp = deepcopy(self._fae.GetAUCMetric()['all_train'])
auc_std = deepcopy(self._fae.GetAUCstdMetric()['all_train'])
show_data.append(temp[tuple(index)].tolist())
show_data_std.append(auc_std[tuple(index)].tolist())
name_list.append('Train')
# if self.checkPlotTest.isChecked():
# temp = deepcopy(self._fae.GetAUCMetric()['test'])
# auc_std = deepcopy(self._fae.GetAUCstdMetric()['test'])
# if temp.size > 0:
# show_data.append(temp[tuple(index)].tolist())
# show_data_std.append(auc_std[tuple(index)].tolist())
# name_list.append('Test')
if len(show_data) > 0:
if selected_index == 3:
DrawCurve(x_ticks, show_data, show_data_std, xlabel=x_label, ylabel=self.comboPlotY.currentText(),
name_list=name_list, is_show=False, fig=self.canvasPlot.getFigure())
else:
DrawBar(x_ticks, show_data, ylabel=self.comboPlotY.currentText(),
name_list=name_list, is_show=False, fig=self.canvasPlot.getFigure())
self.canvasPlot.draw()
def UpdateContribution(self):
if not self.__is_ui_ready:
return
try:
one_result_folder_name = self.comboContributionNormalizor.currentText() + '_' + \
self.comboContributionDimension.currentText() + '_' + \
self.comboContributionFeatureSelector.currentText() + '_' + \
str(self.spinContributeFeatureNumber.value()) + '_' + \
self.comboContributionClassifier.currentText()
one_result_folder = os.path.join(self._root_folder, one_result_folder_name)
# This is compatible with the previous version
if not os.path.exists(one_result_folder):
one_result_folder_name = self.comboContributionNormalizor.currentText() + '_Cos_' + \
self.comboContributionFeatureSelector.currentText() + '_' + \
str(self.spinContributeFeatureNumber.value()) + '_' + \
self.comboContributionClassifier.currentText()
one_result_folder = os.path.join(self._root_folder, one_result_folder_name)
if self.radioContributionFeatureSelector.isChecked():
file_name = self.comboContributionFeatureSelector.currentText() + '_sort.csv'
file_path = os.path.join(one_result_folder, file_name)
if not os.path.exists(file_path):
file_name = self.comboContributionFeatureSelector.currentText().lower() + '_sort.csv'
file_path = os.path.join(one_result_folder, file_name)
if file_path:
df = pd.read_csv(file_path, index_col=0)
value = list(np.abs(df.iloc[:, 0]))
#add positive and negatiove info for coef
processed_feature_name = list(df.index)
original_value = list(df.iloc[:, 0])
for index in range(len(original_value)):
if original_value[index] > 0:
processed_feature_name[index] = processed_feature_name[index] + ' P'
else:
processed_feature_name[index] = processed_feature_name[index] + ' N'
GeneralFeatureSort(processed_feature_name, value, max_num=self.spinContributeFeatureNumber.value(),
is_show=False, fig=self.canvasFeature.getFigure())
elif self.radioContributionClassifier.isChecked():
specific_name = self.comboContributionClassifier.currentText() + '_coef.csv'
file_path = os.path.join(one_result_folder, specific_name)
if not os.path.exists(file_path):
specific_name = self.comboContributionClassifier.currentText().lower() + '_coef.csv'
file_path = os.path.join(one_result_folder, specific_name)
if file_path:
df = pd.read_csv(file_path, index_col=0)
feature_name = list(df.index)
value = list(np.abs(df.iloc[:, 0]))
#add positive and negatiove info for coef
processed_feature_name = list(df.index)
original_value = list(df.iloc[:, 0])
for index in range(len(original_value)):
if original_value[index] > 0:
processed_feature_name[index] = processed_feature_name[index] + ' P'
else:
processed_feature_name[index] = processed_feature_name[index] + ' N'
# try:
# SortRadiomicsFeature(processed_feature_name, value, is_show=False, fig=self.canvasFeature.getFigure())
# except:
GeneralFeatureSort(processed_feature_name, value,
is_show=False, fig=self.canvasFeature.getFigure())
self.canvasFeature.draw()
except Exception as e:
content = 'In Visualization, UpdateContribution failed'
self.logger.error('{}{}'.format(content, str(e)))
QMessageBox.about(self, content, e.__str__())
def SetResultDescription(self):
text = "Normalizer:\n"
for index in self._fae.GetNormalizerList():
text += (index.GetName() + '\n')
text += '\n'
text += "Dimension Reduction:\n"
for index in self._fae.GetDimensionReductionList():
text += (index.GetName() + '\n')
text += '\n'
text += "Feature Selector:\n"
for index in self._fae.GetFeatureSelectorList():
text += (index.GetName() + '\n')
text += '\n'
text += "Feature Number:\n"
text += "{:s} - {:s}\n".format(self._fae.GetFeatureNumberList()[0], self._fae.GetFeatureNumberList()[-1])
text += '\n'
text += "Classifier:\n"
for index in self._fae.GetClassifierList():
text += (index.GetName() + '\n')
text += '\n'
text += 'Cross Validation: ' + self._fae.GetCrossValidation().GetName()
self.textEditDescription.setPlainText(text)
def UpdateSheet(self):
if self.checkMaxFeatureNumber.isChecked():
self.comboSheet.setEnabled(False)
else:
self.comboSheet.setEnabled(True)
self.tableClinicalStatistic.clear()
self.tableClinicalStatistic.setSortingEnabled(False)
if self.comboSheet.currentText() == 'Train':
df = self.sheet_dict['train']
elif self.comboSheet.currentText() == 'Validation':
df = self.sheet_dict['val']
elif self.comboSheet.currentText() == 'Test':
df = self.sheet_dict['test']
else:
return
if self.checkMaxFeatureNumber.isChecked():
self.sheet_dict['test'] = pd.read_csv(os.path.join(self._root_folder, 'test_result.csv'), index_col=0)
data = self._fae.GetAUCMetric()['val']
std_data = self._fae.GetAUCstdMetric()['val']
df_val = self.sheet_dict['val']
df_test = self.sheet_dict['test']
name_list = []
for normalizer_index, normalizer in enumerate(self._fae.GetNormalizerList()):
for dimension_reducer_index, dimension_reducer in enumerate(self._fae.GetDimensionReductionList()):
for feature_selector_index, feature_selector in enumerate(self._fae.GetFeatureSelectorList()):
for classifier_index, classifier in enumerate(self._fae.GetClassifierList()):
sub_auc = data[normalizer_index, dimension_reducer_index, feature_selector_index, :,
classifier_index]
sub_auc_std = std_data[normalizer_index, dimension_reducer_index, feature_selector_index, :,
classifier_index]
one_se = max(sub_auc)-sub_auc_std[np.argmax(sub_auc)]
for feature_number_index in range(len(self._fae.GetFeatureNumberList())):
if data[normalizer_index, dimension_reducer_index,
feature_selector_index, feature_number_index, classifier_index] >= one_se:
name = normalizer.GetName() + '_' + dimension_reducer.GetName() + '_' + \
feature_selector.GetName() + '_' + str(self._fae.GetFeatureNumberList()[feature_number_index]) + '_' + \
classifier.GetName()
name_list.append(name)
break
# choose the selected models from all test result
df_val = df_val.loc[name_list]
max_index = df_val['auc'].idxmax()
sub_serise = df_val.loc[max_index]
max_array = sub_serise.get_values().reshape(1, -1)
max_auc_df = pd.DataFrame(data=max_array, columns=sub_serise.index.tolist(), index=[max_index])
max_auc_95ci = max_auc_df.at[max_index, 'auc 95% CIs']
max_auc_95ci = re.findall(r"\d+\.?\d*", max_auc_95ci)
sub_val_df = df_val[(df_val['auc'] >= float(max_auc_95ci[0])) & (df_val['auc'] <= float(max_auc_95ci[1]))]
index_by_val = sub_val_df.index.tolist()
df = df_test.loc[index_by_val]
df.sort_index(inplace=True)
self.tableClinicalStatistic.setRowCount(df.shape[0])
self.tableClinicalStatistic.setColumnCount(df.shape[1]+1)
headerlabels = df.columns.tolist()
headerlabels.insert(0, 'models name')
self.tableClinicalStatistic.setHorizontalHeaderLabels(headerlabels)
# self.tableClinicalStatistic.setVerticalHeaderLabels(list(df.index))
for row_index in range(df.shape[0]):
for col_index in range(df.shape[1]+1):
if col_index == 0:
self.tableClinicalStatistic.setItem(row_index, col_index,
QTableWidgetItem(df.index[row_index]))
else:
self.tableClinicalStatistic.setItem(row_index, col_index,
QTableWidgetItem(str(df.iloc[row_index, col_index-1])))
self.tableClinicalStatistic.setSortingEnabled(True)
def SetResultTable(self):
self.sheet_dict['train'] = pd.read_csv(os.path.join(self._root_folder, 'train_result.csv'), index_col=0)
self.comboSheet.addItem('Train')
self.sheet_dict['val'] = pd.read_csv(os.path.join(self._root_folder, 'val_result.csv'), index_col=0)
self.comboSheet.addItem('Validation')
if os.path.exists(os.path.join(self._root_folder, 'test_result.csv')):
self.sheet_dict['test'] = pd.read_csv(os.path.join(self._root_folder, 'test_result.csv'), index_col=0)
self.comboSheet.addItem('Test')
self.UpdateSheet()
def _SearchSpecificFile(self, feature_number, specific_file_name, specific_file_name2=''):
for rt, folder, files in os.walk(self._root_folder):
for file_name in files:
# print(file_name)
if specific_file_name2:
if (file_name.lower() == specific_file_name.lower()) and \
('_{:d}_'.format(feature_number) in rt) and \
(specific_file_name2 in rt):
return os.path.join(rt, file_name)
else:
if (file_name.lower() == specific_file_name.lower()) and ('_{:d}_'.format(feature_number) in rt):
return os.path.join(rt, file_name)
return ''
def ShowOneResult(self):
try:
# for index in self.tableClinicalStatistic.selectedIndexes():
index = self.tableClinicalStatistic.selectedIndexes()[0]
row = index.row()
one_item = self.tableClinicalStatistic.item(row, 0)
text = str(one_item.text())
current_normalizer, current_dimension_reducer, current_feature_selector, current_feature_number, current_classifier = \
text.split('_')
self.comboNormalizer.setCurrentText(current_normalizer)
self.comboDimensionReduction.setCurrentText(current_dimension_reducer)
self.comboFeatureSelector.setCurrentText(current_feature_selector)
self.comboClassifier.setCurrentText(current_classifier)
self.spinBoxFeatureNumber.setValue(int(current_feature_number))
if not (self.checkROCTrain.isChecked() or self.checkROCCVTrain.isChecked() or
self.checkROCCVValidation.isChecked() or self.checkROCTrain.isChecked()):
self.checkROCCVTrain.setCheckState(True)
self.checkROCCVValidation.setCheckState(True)
self.UpdateROC()
# Update the AUC versus feature number
self.comboPlotNormalizer.setCurrentText(current_normalizer)
self.comboPlotDimensionReduction.setCurrentText(current_dimension_reducer)
self.comboPlotFeatureSelector.setCurrentText(current_feature_selector)
self.comboPlotClassifier.setCurrentText(current_classifier)
self.comboPlotX.setCurrentText('Feature Number')
if not (self.checkPlotTrain.isChecked() or
self.checkPlotCVTrain.isChecked() or
self.checkPlotCVValidation.isChecked()):
self.checkPlotCVValidation.setCheckState(True)
self.UpdatePlot()
# Update the Contribution
self.comboContributionNormalizor.setCurrentText(current_normalizer)
self.comboContributionDimension.setCurrentText(current_dimension_reducer)
self.comboContributionFeatureSelector.setCurrentText(current_feature_selector)
self.comboContributionClassifier.setCurrentText(current_classifier)
self.spinContributeFeatureNumber.setValue(int(current_feature_number))
self.UpdateContribution()
except Exception as e:
content = 'Visualization, ShowOneResult failed: '
self.logger.error('{}{}'.format(content, str(e)))
QMessageBox.about(self, content, e.__str__())
def GenerateDescription(self):
if (self.comboNormalizer.count() == 0) or \
(self.comboDimensionReduction.count() == 0) or \
(self.comboFeatureSelector.count() == 0) or \
(self.comboClassifier.count() == 0) or \
(self.spinBoxFeatureNumber.value() == 0):
return
case_name = self.comboNormalizer.currentText() + '_' + \
self.comboDimensionReduction.currentText() + '_' + \
self.comboFeatureSelector.currentText() + '_' + \
str(self.spinBoxFeatureNumber.value()) + '_' + \
self.comboClassifier.currentText()
case_folder = os.path.join(self._root_folder, case_name)
current_pipeline = OnePipeline()
try:
current_pipeline.LoadPipeline(os.path.join(case_folder, 'pipeline_info.csv'))
except Exception as ex:
QMessageBox.about(self, "In Description, Load Pipeline_info Error", ex.__str__())
self.logger.error('Load Pipeline Error, The reason is ' + str(ex))
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.DirectoryOnly)
dlg.setOption(QFileDialog.ShowDirsOnly)
if dlg.exec_():
store_folder = dlg.selectedFiles()[0]
roc_path = os.path.join(store_folder, 'ROC.jpg')
self.canvasROC.getFigure().savefig(roc_path, dpi=300)
report = Description()
try:
report.Run(current_pipeline, self._root_folder, store_folder)
os.system("explorer.exe {:s}".format(os.path.normpath(store_folder)))
except Exception as ex:
QMessageBox.about(self, 'Description Generate Error: ', ex.__str__())
self.logger.log('Description Generate Error: ' + str(ex))
| 50.21733
| 148
| 0.633808
| 34,767
| 0.983424
| 0
| 0
| 0
| 0
| 0
| 0
| 2,970
| 0.08401
|
a1b98e7fe17a60a91fcb8684f5329153681b1123
| 1,779
|
py
|
Python
|
bookstore/management/commands/makeratings.py
|
mirko-lelansky/booksite
|
f3bcab93a4d9382ed43adaba4b04202333fe4a86
|
[
"Apache-2.0"
] | null | null | null |
bookstore/management/commands/makeratings.py
|
mirko-lelansky/booksite
|
f3bcab93a4d9382ed43adaba4b04202333fe4a86
|
[
"Apache-2.0"
] | null | null | null |
bookstore/management/commands/makeratings.py
|
mirko-lelansky/booksite
|
f3bcab93a4d9382ed43adaba4b04202333fe4a86
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mirko Lelansky <mlelansky@mail.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand, CommandError
from bookstore.models import Book, Rating
import random
import threading
class Command(BaseCommand):
help = "Create some test commands."
def add_arguments(self, parser):
parser.add_argument("clients", default=5, nargs="?", type=int)
parser.add_argument("requests_per_client", default=20, nargs="?", type=int)
def handle(self, *args, **options):
threads = [ClientThread(options["requests_per_client"]) for i in range(options["clients"])]
[thread.start() for thread in threads]
for x in threads:
x.join()
class ClientThread(threading.Thread):
"""
"""
def __init__(self, max_requests):
super().__init__()
self._requests = 0
self._max_requests = max_requests
def run(self):
while(self._requests < self._max_requests):
books = Book.objects.all()
book = random.choice(books)
rate = random.randint(1, 5)
rating = Rating()
rating.book = book
rating.stars = rate
rating.save()
self._requests = self._requests + 1
| 35.58
| 99
| 0.670039
| 1,040
| 0.584598
| 0
| 0
| 0
| 0
| 0
| 0
| 688
| 0.386734
|
a1bbcc80b20916c2b274dcf7f69fc4ce858c7f88
| 735
|
py
|
Python
|
secondstate/converters.py
|
fruiti-ltd/secondstate
|
81fe6916b92c7024372a95f0eb9d50f6275dfc69
|
[
"BSD-3-Clause"
] | 1
|
2021-05-28T23:02:08.000Z
|
2021-05-28T23:02:08.000Z
|
secondstate/converters.py
|
fruiti-ltd/secondstate
|
81fe6916b92c7024372a95f0eb9d50f6275dfc69
|
[
"BSD-3-Clause"
] | null | null | null |
secondstate/converters.py
|
fruiti-ltd/secondstate
|
81fe6916b92c7024372a95f0eb9d50f6275dfc69
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, Fruiti Limited
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
def convert_custom_timestamp_range(timestamp_range: str) -> list:
result = timestamp_range.split("_")
result[0] = convert_timestamp_to_iso_datetime(result[0])
result[1] = convert_timestamp_to_iso_datetime(result[1])
return result
def convert_iso_datetime_to_timestamp(iso_datetime: str) -> int:
return int(datetime.fromisoformat(iso_datetime).timestamp())
def convert_timestamp_to_iso_datetime(timestamp: int) -> str:
return str(datetime.fromtimestamp(int(timestamp)).isoformat())
| 29.4
| 71
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.257143
|
a1bd442cb66a1c8f82b5b33378ae612201ae99f7
| 5,313
|
py
|
Python
|
Write.py
|
yukiii-zhong/HandMovementTracking
|
d39c65ca83862d97c4589dde616c1d8a586a033c
|
[
"MIT"
] | 1
|
2019-04-09T17:24:49.000Z
|
2019-04-09T17:24:49.000Z
|
Write.py
|
yukiii-zhong/HandMovementTracking
|
d39c65ca83862d97c4589dde616c1d8a586a033c
|
[
"MIT"
] | null | null | null |
Write.py
|
yukiii-zhong/HandMovementTracking
|
d39c65ca83862d97c4589dde616c1d8a586a033c
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import argparse
from collections import deque
import keyboard as kb
import time
from pynput.keyboard import Key, Controller, Listener
class points(object):
def __init__(self, x, y):
self.x = x
self.y = y
sm_threshold = 100
lg_threshold = 200
guiding = True
keyboard = Controller()
cap = cv2.VideoCapture(0)
pts = deque(maxlen=64)
Lower_green = np.array([110, 50, 50])
Upper_green = np.array([130, 255, 255])
startPoint =endPoint = points(0,0)
recentPoints = deque()
# counter = 0
# prev_x = 0
# prev_y = 0
while True:
if kb.is_pressed('q'):
guiding = False
if kb.is_pressed('w'):
guiding = True
ret, img = cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# Added code
recentPoints.append(points(x,y))
if len(recentPoints)>16:
recentPoints.popleft()
if len(recentPoints) == 16:
min_X = min([p.x for p in recentPoints])
max_X = max([p.x for p in recentPoints])
min_Y = min([p.y for p in recentPoints])
max_Y = max([p.y for p in recentPoints])
if max_X-min_X <= sm_threshold or max_Y-min_Y<=sm_threshold:
# EndPoint as average of recentPoints
# endPoint_X = sum([p.x for p in recentPoints])/len(recentPoints)
# endPoint_Y = sum([p.y for p in recentPoints])/ len(recentPoints)
# endPoint = points(endPoint_X, endPoint_Y)
endPoint = points(x,y)
if abs(startPoint.x-endPoint.x)*0.625 > abs(startPoint.y- endPoint.y):
if startPoint.x - endPoint.x > lg_threshold:
print('right')
keyboard.press(Key.right)
keyboard.release(Key.right)
startPoint = endPoint
recentPoints = deque()
elif startPoint.x - endPoint.x < -lg_threshold:
print('left')
keyboard.press(Key.left)
keyboard.release(Key.left)
startPoint = endPoint
recentPoints = deque()
else:
if startPoint.y - endPoint.y > lg_threshold*0.625 :
print('up')
keyboard.press(Key.up)
keyboard.release(Key.up)
startPoint = endPoint
recentPoints = deque()
elif startPoint.y - endPoint.y < -lg_threshold*0.625:
print('down')
keyboard.press(Key.down)
keyboard.release(Key.down)
startPoint = endPoint
recentPoints = deque()
#print(x, y)
# time.sleep(0.1)
# counter += 1
# if counter == 32:
# prev_x = x
# prev_y = y
# if counter > 32:
# if abs(x - prev_x) > abs(y - prev_y):
# if x - prev_x > 100:
# print('left')
# keyboard.press(Key.left)
# keyboard.release(Key.left)
# # time.sleep(0.7)
# counter = 0
# elif x - prev_x < -100:
# print('right')
# keyboard.press(Key.right)
# keyboard.release(Key.right)
# counter = 0
# else:
# if y - prev_y > 100:
# print('down')
# keyboard.press(Key.down)
# keyboard.release(Key.down)
# counter = 0
# # time.sleep(0.7)
# elif y - prev_y < -100:
# print('up')
# keyboard.press(Key.up)
# keyboard.release(Key.up)
# counter = 0
# # time.sleep(0.7)
if radius > 5:
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1]is None or pts[i] is None:
continue
thick = int(np.sqrt(len(pts) / float(i + 1)) * 2.5)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 225), thick)
cv2.imshow("Frame", img)
# cv2.imshow("mask",mask)
# cv2.imshow("res",res)
k = cv2.waitKey(1) & 0xFF
if k == ord("p"):
break
# cleanup the camera and close any open windows
cap.release()
cv2.destroyAllWindows()
| 31.070175
| 82
| 0.499529
| 89
| 0.016751
| 0
| 0
| 0
| 0
| 0
| 0
| 1,369
| 0.25767
|
a1be04a80f83b1938545b09a34c0a9a1cda47ace
| 1,285
|
py
|
Python
|
server/newsWebsite/models.py
|
thiagobrez/newsWebsite
|
130f01d29dd776eaa096080982274bb27d19ad8f
|
[
"MIT"
] | null | null | null |
server/newsWebsite/models.py
|
thiagobrez/newsWebsite
|
130f01d29dd776eaa096080982274bb27d19ad8f
|
[
"MIT"
] | 7
|
2020-09-07T18:44:00.000Z
|
2022-02-10T19:05:41.000Z
|
server/newsWebsite/models.py
|
thiagobrez/newsWebsite
|
130f01d29dd776eaa096080982274bb27d19ad8f
|
[
"MIT"
] | null | null | null |
from django.db import models
def picture_upload_path(instance, filename):
# file will be saved at <MEDIA_ROOT>/authorPictures/<filename>
return 'authorPictures/{0}'.format(filename)
def hero_upload_path(instance, filename):
# file will be saved at <MEDIA_ROOT>/heroImages/<filename>
return 'heroImages/{0}'.format(filename)
class Subject(models.Model):
name = models.CharField(max_length=200)
color = models.CharField(max_length=10)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=200, unique=True)
picture = models.ImageField(upload_to=picture_upload_path, default='authorPictures/default_author.png')
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=500)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
heroImage = models.ImageField(upload_to=hero_upload_path, blank=True, max_length=500, default='heroImages/default_article.png')
publishDate = models.DateTimeField('Publish date')
text = models.TextField(blank=True)
def __str__(self):
return self.title
| 31.341463
| 131
| 0.737743
| 932
| 0.725292
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.184436
|
a1be89c5fd04670493098c48a1472acc032f85c5
| 319
|
py
|
Python
|
Python for Everybody/Using Python to Access Web Data/Assignments/Regular Expression/Finding_Numbers_in_a_Haystack.py
|
lynnxlmiao/Coursera
|
8dc4073e29429dac14998689814388ee84435824
|
[
"MIT"
] | null | null | null |
Python for Everybody/Using Python to Access Web Data/Assignments/Regular Expression/Finding_Numbers_in_a_Haystack.py
|
lynnxlmiao/Coursera
|
8dc4073e29429dac14998689814388ee84435824
|
[
"MIT"
] | null | null | null |
Python for Everybody/Using Python to Access Web Data/Assignments/Regular Expression/Finding_Numbers_in_a_Haystack.py
|
lynnxlmiao/Coursera
|
8dc4073e29429dac14998689814388ee84435824
|
[
"MIT"
] | null | null | null |
import re
data = open('regex_sum_46353.txt')
numlist = list()
for line in data:
line = line.rstrip()
integers = re.findall('[0-9]+', line)
if len(integers) < 1: continue
for i in range(len(integers)):
num = float(integers[i])
numlist.append(num)
num_sum = sum(numlist)
print (num_sum)
| 21.266667
| 41
| 0.630094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.090909
|
a1be9584512b198578c74cac68370142c4a6feeb
| 121
|
py
|
Python
|
tuinwolk/server/daemons/tuinwolk_daemon.py
|
TuinfeesT/TuinWolk
|
0af0321948f4f573d8eb5ad1b87ea42bfa6644e1
|
[
"MIT"
] | 1
|
2017-09-08T02:34:22.000Z
|
2017-09-08T02:34:22.000Z
|
tuinwolk/server/daemons/tuinwolk_daemon.py
|
TuinfeesT/TuinWolk
|
0af0321948f4f573d8eb5ad1b87ea42bfa6644e1
|
[
"MIT"
] | null | null | null |
tuinwolk/server/daemons/tuinwolk_daemon.py
|
TuinfeesT/TuinWolk
|
0af0321948f4f573d8eb5ad1b87ea42bfa6644e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import daemon
class TuinWolkDaemon(daemon.Daemon):
def run(self):
#TODO: implement me!
pass
| 13.444444
| 36
| 0.719008
| 83
| 0.68595
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.338843
|
a1bec1b04d0a00857461f68a4976f6de5f19b088
| 7,205
|
py
|
Python
|
plugins/mobile_app.py
|
alustig/OSPi
|
d3cb0d70d19359daba1265dcb3bf09e87847d214
|
[
"CC-BY-3.0"
] | null | null | null |
plugins/mobile_app.py
|
alustig/OSPi
|
d3cb0d70d19359daba1265dcb3bf09e87847d214
|
[
"CC-BY-3.0"
] | null | null | null |
plugins/mobile_app.py
|
alustig/OSPi
|
d3cb0d70d19359daba1265dcb3bf09e87847d214
|
[
"CC-BY-3.0"
] | null | null | null |
import json
import time
import datetime
import string
import calendar
from helpers import get_cpu_temp, check_login, password_hash
import web
import gv # Gain access to ospi's settings
from urls import urls # Gain access to ospi's URL list
from webpages import ProtectedPage, WebPage
##############
## New URLs ##
urls.extend([
'/jo', 'plugins.mobile_app.options',
'/jc', 'plugins.mobile_app.cur_settings',
'/js', 'plugins.mobile_app.station_state',
'/jp', 'plugins.mobile_app.program_info',
'/jn', 'plugins.mobile_app.station_info',
'/jl', 'plugins.mobile_app.get_logs',
'/sp', 'plugins.mobile_app.set_password'])
#######################
## Class definitions ##
class options(WebPage): # /jo
"""Returns device options as json."""
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
if check_login():
jopts = {
"fwv": gv.ver_str+'-OSPi',
"tz": gv.sd['tz'],
"ext": gv.sd['nbrd'] - 1,
"seq": gv.sd['seq'],
"sdt": gv.sd['sdt'],
"mas": gv.sd['mas'],
"mton": gv.sd['mton'],
"mtof": gv.sd['mtoff'],
"urs": gv.sd['urs'],
"rso": gv.sd['rst'],
"wl": gv.sd['wl'],
"ipas": gv.sd['ipas'],
"reset": gv.sd['rbt'],
"lg": gv.sd['lg']
}
else:
jopts = {
"fwv": gv.ver_str+'-OSPi',
}
return json.dumps(jopts)
class cur_settings(ProtectedPage): # /jc
"""Returns current settings as json."""
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jsettings = {
"devt": gv.now,
"nbrd": gv.sd['nbrd'],
"en": gv.sd['en'],
"rd": gv.sd['rd'],
"rs": gv.sd['rs'],
"mm": gv.sd['mm'],
"rdst": gv.sd['rdst'],
"loc": gv.sd['loc'],
"sbits": gv.sbits,
"ps": gv.ps,
"lrun": gv.lrun,
"ct": get_cpu_temp(gv.sd['tu']),
"tu": gv.sd['tu']
}
return json.dumps(jsettings)
class station_state(ProtectedPage): # /js
"""Returns station status and total number of stations as json."""
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jstate = {
"sn": gv.srvals,
"nstations": gv.sd['nst']
}
return json.dumps(jstate)
class program_info(ProtectedPage): # /jp
"""Returns program data as json."""
def GET(self):
lpd = [] # Local program data
dse = int((time.time()+((gv.sd['tz']/4)-12)*3600)/86400) # days since epoch
for p in gv.pd:
op = p[:] # Make local copy of each program
if op[1] >= 128 and op[2] > 1:
rel_rem = (((op[1]-128) + op[2])-(dse % op[2])) % op[2]
op[1] = rel_rem + 128
lpd.append(op)
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jpinfo = {
"nprogs": gv.sd['nprogs']-1,
"nboards": gv.sd['nbrd'],
"mnp": 9999,
'pd': lpd
}
return json.dumps(jpinfo)
class station_info(ProtectedPage): # /jn
"""Returns station information as json."""
def GET(self):
disable = []
for byte in gv.sd['show']:
disable.append(~byte&255)
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jpinfo = {
"snames": gv.snames,
"ignore_rain": gv.sd['ir'],
"masop": gv.sd['mo'],
"stn_dis": disable,
"maxlen": gv.sd['snlen']
}
return json.dumps(jpinfo)
class get_logs(ProtectedPage): # /jl
"""Returns log information for specified date range."""
def GET(self):
records = self.read_log()
data = []
qdict = web.input()
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
if 'start' not in qdict or 'end' not in qdict:
return []
for r in records:
event = json.loads(r)
date = time.mktime(datetime.datetime.strptime(event["date"], "%Y-%m-%d").timetuple())
if int(qdict["start"]) <= int(date) <= int(qdict["end"]):
pid = event["program"]
if pid == "Run-once":
pid = 98
if pid == "Manual":
pid = 99
pid = int(pid)
station = int(event["station"])
duration = string.split(event["duration"], ":")
duration = (int(duration[0]) * 60) + int(duration[1])
timestamp = int(time.mktime(utc_to_local(datetime.datetime.strptime(event["date"] + " " + event["start"], "%Y-%m-%d %H:%M:%S")).timetuple()))
data.append([pid, station, duration, timestamp])
return json.dumps(data)
def read_log(self):
try:
with open('./data/log.json') as logf:
records = logf.readlines()
return records
except IOError:
return []
class set_password():
"""Save changes to device password"""
def GET(self):
qdict = web.input()
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
if not(qdict.has_key('pw')) or not(qdict.has_key('npw')) or not(qdict.has_key('cpw')):
return json.dumps({"result":3})
if password_hash(qdict['pw'], gv.sd['salt']) == gv.sd['password']:
if qdict['npw'] == "":
return json.dumps({"result":3})
elif qdict['cpw'] !='' and qdict['cpw'] == qdict['npw']:
gv.sd['password'] = password_hash(qdict['npw'], gv.sd['salt'])
else:
return json.dumps({"result":4})
else:
return json.dumps({"result":2})
return json.dumps({"result":1})
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= datetime.timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
| 33.511628
| 158
| 0.498959
| 6,121
| 0.849549
| 0
| 0
| 0
| 0
| 0
| 0
| 2,147
| 0.297988
|
a1beca2a104dc1445d55be605545d5222ed38310
| 4,427
|
py
|
Python
|
utils/iroha.py
|
LiTrans/BSMD
|
2a5660de5a4a5d49d24df4c78469b55f2be5a2d3
|
[
"Apache-2.0"
] | 1
|
2021-02-09T16:11:10.000Z
|
2021-02-09T16:11:10.000Z
|
utils/iroha.py
|
LiTrans/BSMD
|
2a5660de5a4a5d49d24df4c78469b55f2be5a2d3
|
[
"Apache-2.0"
] | 13
|
2019-11-20T17:23:41.000Z
|
2022-03-12T00:47:53.000Z
|
utils/iroha.py
|
LiTrans/BSMD
|
2a5660de5a4a5d49d24df4c78469b55f2be5a2d3
|
[
"Apache-2.0"
] | 1
|
2020-01-20T04:18:08.000Z
|
2020-01-20T04:18:08.000Z
|
"""
.. _Iroha:
Iroha
=====
Functions to post transactions in the iroha implementation of the BSMD
"""
from iroha import IrohaCrypto, Iroha, IrohaGrpc
import binascii
import sys
if sys.version_info[0] < 3:
raise Exception('Python 3 or a more recent version is required.')
# Transactions request iroha
def trace(func):
"""
A decorator for tracing methods' begin/end execution points
"""
def tracer(*args, **kwargs):
name = func.__name__
print('\tEntering "{}"'.format(name))
result = func(*args, **kwargs)
print('\tLeaving "{}"'.format(name))
return result
return tracer
@trace
def send_transaction_and_print_status(transaction, network):
"""
Send a transaction to the Blockchain (BSMD)
:param transaction: Transaction we are sending to the BSMD
:param network: address of the a node hosting the Blockchain
"""
print('This print will make the transactions run slower. When developing is useful to have this for debugging')
print('Comment all prints in function send_transaction_and_print_status to make faster transactions')
hex_hash = binascii.hexlify(IrohaCrypto.hash(transaction))
print('Transaction hash = {}, creator = {}'.format(
hex_hash, transaction.payload.reduced_payload.creator_account_id))
network.send_tx(transaction)
for status in network.tx_status_stream(transaction):
print(status)
# #################################
# functions available to all users
# #################################
def set_detail_to_node(sender, receiver, private_key, detail_key, detail_value, domain, ip):
"""
This function can be use when the User object is no available. The sender must have permission to write in the
details of the receiver.
In federated learning the details are in JSON format and contains the address (location) where the weight is stored
if the weight is small enough it can be embedded to the block if needed)
:Example:
>>> set_detail_to_node('David', 'Juan', 'private key of david', 'detail key of Juan', 'detail value', 'domain' \
'ip')
:param str sender: Name of the node sending the information
:param str receiver: Name of the node receiving the information
:param str private_key: Private key of the user
:param str detail_key: Name of the detail we want to set
:param str detail_value: Value of the detail
:param str domain: Name of the domain
:param str ip: address for connecting to the BSMD
"""
account = sender + '@' + domain
iroha = Iroha(account)
account_id = receiver + '@' + domain
ip_address = ip + ':50051'
network = IrohaGrpc(ip_address)
tx = iroha.transaction([
iroha.command('SetAccountDetail',
account_id=account_id,
key=detail_key,
value=detail_value)
])
IrohaCrypto.sign_transaction(tx, private_key)
send_transaction_and_print_status(tx, network)
def get_a_detail_written_by(name, writer, private_key, detail_key, domain, ip):
"""
This function can be use when the User object is no available. Consult a details of the node writen by other node
:Example:
>>> juan_detail = get_a_detail_written_by('David', 'Juan', 'private key of david', 'detail_key of Juan', 'domain', \
'ip')
>>> print(juan_detail)
{
"nodeA@domain":{
"Age":"35"
}
:param str name: Name of the node consulting the information
:param str writer: Name of the node who write the detail
:param str private_key: Private key of the user
:param str detail_key: Name of the detail we want to consult
:param str domain: Name of the domain
:param str ip: Address for connecting to the BSMD
:return: returns the detail writen by "the writer"
:rtype: json
"""
account_id = name + '@' + domain
user_id = writer + '@' + domain
iroha = Iroha(account_id)
ip_address = ip + ':50051'
network = IrohaGrpc(ip_address)
query = iroha.query('GetAccountDetail',
account_id=account_id,
key=detail_key,
writer=user_id)
IrohaCrypto.sign_query(query, private_key)
response = network.send_query(query)
data = response.account_detail_response
print('Account id = {}, details = {}'.format(account_id, data.detail))
return data.detail
| 35.416
| 120
| 0.664558
| 0
| 0
| 0
| 0
| 788
| 0.177999
| 0
| 0
| 2,567
| 0.579851
|
a1bee1ce9e04568e61c5f5c3e54c374e370eb72e
| 1,068
|
py
|
Python
|
tibanna_cgap/lambdas/start_run.py
|
4dn-dcic/tibanna_ff
|
6fcfc056b832c14500e525207afeb5722f366a26
|
[
"MIT"
] | 2
|
2019-10-08T17:36:02.000Z
|
2019-10-08T18:42:05.000Z
|
tibanna_cgap/lambdas/start_run.py
|
4dn-dcic/tibanna_ff
|
6fcfc056b832c14500e525207afeb5722f366a26
|
[
"MIT"
] | null | null | null |
tibanna_cgap/lambdas/start_run.py
|
4dn-dcic/tibanna_ff
|
6fcfc056b832c14500e525207afeb5722f366a26
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# import json
from tibanna_ffcommon.exceptions import exception_coordinator
from tibanna_cgap.start_run import start_run
from tibanna_cgap.vars import AWS_REGION, LAMBDA_TYPE
config = {
'function_name': 'start_run_' + LAMBDA_TYPE,
'function_module': 'service',
'function_handler': 'handler',
'handler': 'service.handler',
'region': AWS_REGION,
'runtime': 'python3.6',
'role': 'lambda_full_s3',
'description': 'Tibanna zebra start_run',
'timeout': 300,
'memory_size': 256
}
def metadata_only(event):
# this relies on the fact that event contains and output key with output files
assert event['metadata_only']
assert event['output_files']
return real_handler(event, None)
@exception_coordinator('start_run', metadata_only)
def handler(event, context):
if event.get('push_error_to_end', True):
event['push_error_to_end'] = True # push error to end by default for pony
return real_handler(event, context)
def real_handler(event, context):
return start_run(event)
| 28.105263
| 82
| 0.713483
| 0
| 0
| 0
| 0
| 247
| 0.231273
| 0
| 0
| 447
| 0.418539
|
a1bf1dc46f3a24ddc127c89f233fb631f8cdaefb
| 3,474
|
py
|
Python
|
Amplo/Observation/_model_observer.py
|
Amplo-GmbH/AutoML
|
eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606
|
[
"MIT"
] | 5
|
2022-01-07T13:34:37.000Z
|
2022-03-17T06:40:28.000Z
|
Amplo/Observation/_model_observer.py
|
Amplo-GmbH/AutoML
|
eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606
|
[
"MIT"
] | 5
|
2022-03-22T13:42:22.000Z
|
2022-03-31T16:20:44.000Z
|
Amplo/Observation/_model_observer.py
|
Amplo-GmbH/AutoML
|
eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606
|
[
"MIT"
] | 1
|
2021-12-17T22:41:11.000Z
|
2021-12-17T22:41:11.000Z
|
# Copyright by Amplo
"""
Observer for checking production readiness of model.
This part of code is strongly inspired by [1].
References
----------
[1] E. Breck, C. Shanging, E. Nielsen, M. Salib, D. Sculley (2017).
The ML test score: A rubric for ML production readiness and technical debt
reduction. 1123-1132. 10.1109/BigData.2017.8258038.
"""
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from Amplo.Observation.base import PipelineObserver
from Amplo.Observation.base import _report_obs
__all__ = ["ModelObserver"]
class ModelObserver(PipelineObserver):
"""
Model observer before putting to production.
While the field of software engineering has developed a full range of best
practices for developing reliable software systems, similar best-practices
for ML model development are still emerging.
The following tests are included:
1. TODO: Model specs are reviewed and submitted.
2. TODO: Offline and online metrics correlate.
3. TODO: All hyperparameters have been tuned.
4. TODO: The impact of model staleness is known.
5. A simpler model is not better.
6. TODO: Model quality is sufficient on important data slices.
7. TODO: The model is tested for considerations of inclusion.
"""
TYPE = "model_observer"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xt, self.xv, self.yt, self.yv = train_test_split(
self.x, self.y, test_size=0.3, random_state=9276306)
def observe(self):
self.check_better_than_linear()
@_report_obs
def check_better_than_linear(self):
"""
Checks whether the model exceeds a linear model.
This test incorporates the test ``Model 5`` from [1].
Citation:
A simpler model is not better: Regularly testing against a very
simple baseline model, such as a linear model with very few
features, is an effective strategy both for confirming the
functionality of the larger pipeline and for helping to assess the
cost to benefit tradeoffs of more sophisticated techniques.
Returns
-------
status_ok : bool
Observation status. Indicates whether a warning should be raised.
message : str
A brief description of the observation and its results.
"""
# Make score for linear model
if self.mode == self.CLASSIFICATION:
linear_model = LogisticRegression()
elif self.mode == self.REGRESSION:
linear_model = LinearRegression()
else:
raise AssertionError("Invalid mode detected.")
linear_model.fit(self.xt, self.yt)
linear_model_score = self.scorer(linear_model, self.xv, self.yv)
# Make score for model to observe
obs_model = self.model
obs_model.fit(self.xt, self.yt)
obs_model_score = self.scorer(obs_model, self.xv, self.yv)
status_ok = obs_model_score > linear_model_score
message = ("Performance of a linear model should not exceed the "
"performance of the model to observe. "
f"Score for linear model: {linear_model_score:.4f}. "
f"Score for observed model: {obs_model_score:.4f}.")
return status_ok, message
| 36.957447
| 78
| 0.670409
| 2,838
| 0.816926
| 0
| 0
| 1,771
| 0.509787
| 0
| 0
| 2,123
| 0.611111
|
a1c01c9ff8dac8f635383495ea6d6042923c0487
| 2,849
|
py
|
Python
|
mini projects/school_manager.py
|
Tryst480/python-tutorial
|
056803f185b9cf31235fdfc221a3a490c353cd70
|
[
"MIT"
] | null | null | null |
mini projects/school_manager.py
|
Tryst480/python-tutorial
|
056803f185b9cf31235fdfc221a3a490c353cd70
|
[
"MIT"
] | null | null | null |
mini projects/school_manager.py
|
Tryst480/python-tutorial
|
056803f185b9cf31235fdfc221a3a490c353cd70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# This is gonna be up to you. But basically I envisioned a system where you have a students in a classroom. Where the
# classroom only has information, like who is the teacher, how many students are there. And it's like an online class,
# so students don't know who their peers are, or who their teacher is, but can do things like study, and take test and
# stuff. Etc. But get used to how objects interact with each other and try to call stuff from other places while being
# commanded all in main():
class Student:
def __init__(self, name, laziness=5):
self.name = name
self.preparedness = 0
self._laziness = laziness
def takeTest(self, hardness):
# TODO: return a score that's 100 - difference between hardness and preparedness (score capped at 100)
return 0
def doHomework(self):
# TODO: return a score of either 0 or 100 depending on how lazy they are. Implementation is up to you.
return 0
def study(self):
# TODO: increment preparedness by a random number between 1-10 (prerparedness capped at 100)
pass
class Teacher:
def __init__(self, name):
self.name = name
self.classroom = None
self.test_grades = {}
self.homework_grades = {}
def administerTest(self, students, hardness):
# TODO: Given a hardness of a test and list of students. Make each student take test and log their grades
pass
def giveHomework(self, students):
# TODO: Given homework to student and log in their grades
pass
def giveGrades(self, students):
# TODO: Given all the test scores and homework score in each student, give 30% to HW and 70% to test.
# TODO: Return list of passed students and remove them from classroom. Clear grades for all remaining students
pass
class ClassRoom:
def __init__(self):
self.class_size_limit = 10
self.students = {}
self.teacher = None
def addStudent(self, student):
# TODO: add student to class. Print something if they try to add the same student or go over the limit
pass
def assignTeacherToClass(self, teacher):
# TODO: Assign teacher, also prompt user if they want to switch teacher if one already assigned or same teacher
pass
def getStudents(self):
# TODO: return a list of students
return
if __name__ == '__main__':
classroom = ClassRoom()
teacher = Teacher('Doctor Jones')
mike = Student('Mike')
sally = Student('Sally', laziness=1)
lebron = Student('Lebron', laziness=10)
# TODO: Assign a teacher to the classroom and add the students to the classroom. Then make the students study
# TODO: Make Students to homework, etc, exams, then pass or fail them, etc. Play around with it.
| 36.525641
| 119
| 0.67708
| 1,891
| 0.663742
| 0
| 0
| 0
| 0
| 0
| 0
| 1,684
| 0.591085
|
a1c0267af0e6d173981f4b35aa1b64d0f75f58d2
| 1,650
|
py
|
Python
|
hparams.py
|
ishine/EmotionControllableTextToSpeech
|
5dcf8afe6a0c1b8d612d6f1d8de315cf419fe594
|
[
"MIT"
] | 12
|
2021-07-10T05:18:31.000Z
|
2022-03-22T01:04:41.000Z
|
hparams.py
|
ishine/EmotionControllableTextToSpeech
|
5dcf8afe6a0c1b8d612d6f1d8de315cf419fe594
|
[
"MIT"
] | null | null | null |
hparams.py
|
ishine/EmotionControllableTextToSpeech
|
5dcf8afe6a0c1b8d612d6f1d8de315cf419fe594
|
[
"MIT"
] | 3
|
2021-06-12T05:34:41.000Z
|
2022-03-15T06:44:55.000Z
|
import os
cleaners = 'korean_cleaners'
audio_data_path = os.path.join("/cb_im/datasets/", dataset)
data_path = '/home/prml/hs_oh/dataset/emotion_korea/'
duration_path = "/home/prml/jihyun/dataset/duration_all/duration"
strength_path = "/home/prml/hs_oh/dataset/emotion_strength"
# Text
text_cleaners = ['korean_cleaners']
# Audio and mel
### Emotion Korea ###
sampling_rate = 22050
filter_length = 1024
hop_length = 256
win_length = 1024
max_wav_value = 32768.0
n_mel_channels = 80
mel_fmin = 0
mel_fmax = 8000
f0_min = 71.0
f0_max = 792.8
energy_min = 0.0
energy_max = 283.72
# FastSpeech 2
encoder_layer = 4
encoder_head = 2
encoder_hidden = 256
decoder_layer = 4
decoder_head = 2
decoder_hidden = 256
fft_conv1d_filter_size = 1024
fft_conv1d_kernel_size = (9, 1)
encoder_dropout = 0.2
decoder_dropout = 0.2
variance_predictor_filter_size = 256
variance_predictor_kernel_size = 3
variance_predictor_dropout = 0.5
max_seq_len = 10000
# Checkpoints and synthesis path
preprocessed_path = os.path.join("/home/prml/hs_oh/dataset/", "emotion_korea")
checkpoint_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "cp")
eval_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "eval")
log_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "log")
test_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "test")
# Optimizer
batch_size = 48
epochs = 1000
n_warm_up_step = 4000
grad_clip_thresh = 1.0
acc_steps = 1
betas = (0.9, 0.98)
eps = 1e-9
weight_decay = 0.
total_step = 100000
# Save, log and synthesis
save_step = 5000
eval_step = 500
eval_size = 256
log_step = 10
clear_Time = 20
| 22.297297
| 81
| 0.758788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 544
| 0.329697
|
a1c0825b266bca976c211fbcfde48bbcb725afd2
| 1,083
|
py
|
Python
|
run_tests.py
|
dannybrowne86/django-ajax-uploader
|
741213e38e9532dd83d8040af17169da9d610660
|
[
"BSD-3-Clause"
] | 75
|
2015-02-09T22:49:57.000Z
|
2021-01-31T23:47:39.000Z
|
run_tests.py
|
dannybrowne86/django-ajax-uploader
|
741213e38e9532dd83d8040af17169da9d610660
|
[
"BSD-3-Clause"
] | 13
|
2015-02-27T03:01:30.000Z
|
2020-11-18T10:11:53.000Z
|
run_tests.py
|
dannybrowne86/django-ajax-uploader
|
741213e38e9532dd83d8040af17169da9d610660
|
[
"BSD-3-Clause"
] | 29
|
2015-02-09T22:50:16.000Z
|
2019-12-25T06:41:43.000Z
|
# from https://github.com/django-extensions/django-extensions/blob/master/run_tests.py
from django.conf import settings
from django.core.management import call_command
def main():
# Dynamically configure the Django settings with the minimum necessary to
# get Django running tests
settings.configure(
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'ajaxuploader',
),
# Django replaces this, but it still wants it. *shrugs*
DATABASE_ENGINE = 'django.db.backends.sqlite3',
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
MEDIA_ROOT = '/tmp/ajaxuploader_test_media/',
MEDIA_PATH = '/media/',
ROOT_URLCONF = 'ajaxuploader.urls',
DEBUG = True,
TEMPLATE_DEBUG = True
)
# Fire off the tests
call_command('test', 'ajaxuploader')
if __name__ == '__main__':
main()
| 29.27027
| 86
| 0.600185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.492151
|
a1c0c279a861dff85fe4f00eb7ae86cd441ba20b
| 7,275
|
py
|
Python
|
shor.py
|
rodamber/cps
|
b78aa7756d24b91476f31b538f51508e2dee48b3
|
[
"MIT"
] | null | null | null |
shor.py
|
rodamber/cps
|
b78aa7756d24b91476f31b538f51508e2dee48b3
|
[
"MIT"
] | null | null | null |
shor.py
|
rodamber/cps
|
b78aa7756d24b91476f31b538f51508e2dee48b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Simulation of Shor's algorithm for integer factorization."""
import cmath
import math
import numpy as np
import random
class QuMem:
"""Representation of the memory of the quantum computer."""
def __init__(self, t, n):
"""Initialize the memory. For Shor's algorithm we have t + n qubits,
where t is such that N^2 <= 2^t < 2N^2 holds.
The memory is represented by explicitly saving all the 2^(t+n) possible
states and their corresponding amplitudes.
"""
# The amplitudes and the states are represented by three lists where,
# for each i, 0 <= i < 2^(t+n), amplitudes[i] is the amplitude of the
# state |fst[i], lst[i]>.
self.amplitudes = []
self.fst = [] # Quantum state of the first t qubits.
self.lst = [] # Quantum state of the last n qubits.
self.t = t # fst width
self.n = n # lst width
# Populate the quantum state lists.
for fst in range(2**t):
for lst in range(2**n):
self.amplitudes.append(0)
self.fst.append(fst)
self.lst.append(lst)
# Initialize the memory to the state |0, 0>.
self.amplitudes[0] = 1
def measure(self):
"""Measure the first t bits of the memory. Simulated by making a
weighted random choice of one of the possible states. The weights are
the squares of the absolute values of their amplitudes."""
return np.random.choice(
self.fst, p=list(map(lambda x: abs(x)**2, self.amplitudes)))
def __len__(self):
"""Equal to 2^(t+n). This is here for convenience."""
return len(self.amplitudes)
def __iter__(self):
"""Iterator of the quantum state. This is here for convenience."""
for x in zip(self.amplitudes, self.fst, self.lst):
yield x
def __repr__(self):
"""Represented as a linear combination, a0 |0, 0> + a1 |0, 1> + ... ,
of all the possible states."""
s = ""
for ampl, fst, lst in self:
s += "{:.4f} |{},{}> + ".format(ampl, fst, lst)
return s[:-3]
def hadamard(mem):
"""Apply the Hadamard gate to the first t qubits. After this
application, the memory is in a quantum superposition where the
measuring probability is equidistributed between the first t qubits."""
for i, (_, fst, lst) in enumerate(mem):
if lst == 0: # The last n qubits remain in state |0>
mem.amplitudes[i] = 1 / math.sqrt(2**mem.t)
return mem
def mod_exp(mem, x, N):
"""Apply the operator |j, k> |-> |j, k + x^j mod N>. However, in Shor's
algorithm k = 0, so we just apply the modular exponentiation."""
for i, (_, fst, lst) in enumerate(mem):
mem.lst[i] = pow(x, fst, N)
return mem
def qft(mem):
"""Apply quantum Fourier transform to the first t qubits."""
new_amplitudes = []
N = 2**mem.t
# Calculate root of unity in two steps, as complex exponentiation is
# expensive.
w__ = cmath.exp(2 * math.pi * 1j / N)
for k, _ in enumerate(mem):
s = 0
for j in range(N):
wjk = w__**(j * k)
s += wjk * mem.amplitudes[j]
new_amplitudes.append(s / math.sqrt(N))
mem.amplitudes = new_amplitudes
return mem
def denominator(x, qmax):
"""Finds the denominator q of the best rational approximation p/q for x
with q < qmax."""
y = x
q0, q1, q2 = 0, 1, 0
while True:
z = y - math.floor(y) # decimal part of y
if z < 0.5 / qmax**2:
return q1
y = 1 / z
q2 = math.floor(y) * q1 + q0
if q2 >= qmax:
return q1
q0, q1 = q1, q2
def shor(N, a):
"""Simulation of Shor's algorithm for order finding."""
assert 1 < a < N
while True:
n = N.bit_length()
t = math.ceil(math.log(N**2, 2)) # s.t. N^2 <= 2^t < 2N^2
mem = QuMem(t, n)
hadamard(mem)
mod_exp(mem, a, N)
qft(mem)
measure = mem.measure()
if measure == 0:
print("| measured zero, trying again ...")
else:
c = measure / 2**t
q = denominator(c, N)
p = math.floor(q * c + 0.5)
print("| measured {}, approximation for {} is {}/{}"
.format(measure, c, p, q))
mod = pow(a, q, N)
print("| {}^{} mod {} = {}".format(a, q, N, mod))
if mod == 1:
print("| got {}".format(q))
return q
else:
print("| failed, trying again ...")
def prime(n):
"""Primality test by trial division."""
if n == 2:
return True
elif n < 2 or n % 2 == 0:
return False
else:
return not any(n % x == 0
for x in range(3, math.ceil(math.sqrt(n)) + 1, 2))
def odd_prime_power(n):
"""Test if n is a power of an odd prime."""
if n < 3:
return False
factor = 0
for i in range(3, math.ceil(math.sqrt(n)) + 1, 2):
if n % i == 0:
factor = i
break
if factor == 0:
return False
for i in range(2, math.ceil(math.log(n, factor)) + 1):
if factor**i == n:
return True
return False
def factorize(N):
"""Applies Shor's algorithm to the problem of integer factorization."""
assert N > 1
if N % 2 == 0:
print(N, "is even")
elif prime(N):
print(N, "is prime")
elif odd_prime_power(N):
print(N, "is a power of an odd prime")
else:
while True:
a = random.randint(2, N - 1)
d = math.gcd(a, N)
print("| picked random a =", a)
if d != 1:
print("| got lucky, {} = {} * {}, trying again...".format(
N, d, N // d))
print("|---------------------------------------------")
else:
r = shor(N, a)
if r is None:
print("| trying again ...")
print("|-----------------------------------------------")
continue
y = r // 2
if r % 2 == 1:
print("| order {} is odd, trying again ...".format(r))
print("|-----------------------------------------------")
elif not 1 < y < N - 1:
print("| 1 < {} < {} - 1 is false, trying again".format(
y, N))
print("|-----------------------------------------------")
else:
factor = max(math.gcd(y - 1, N), math.gcd(y + 1, N))
if factor == 1:
print("| factor is one, trying again ...")
print("|---------------------------------------------")
else:
print("| found factor: {} = {} * {}".format(
N, factor, N // factor))
return factor
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("USAGE: shor.py <input>")
else:
print(factorize(int(sys.argv[1])))
| 30.567227
| 79
| 0.479175
| 2,028
| 0.278763
| 173
| 0.02378
| 0
| 0
| 0
| 0
| 2,769
| 0.380619
|
a1c2d77e61f6bdb0c438878369cd53216104adca
| 365
|
py
|
Python
|
Mundo2/lerSexo.py
|
DanieleMagalhaes/Exercicios-Python
|
394c68e8f06a10ec16539addd888960d11d1318f
|
[
"MIT"
] | null | null | null |
Mundo2/lerSexo.py
|
DanieleMagalhaes/Exercicios-Python
|
394c68e8f06a10ec16539addd888960d11d1318f
|
[
"MIT"
] | null | null | null |
Mundo2/lerSexo.py
|
DanieleMagalhaes/Exercicios-Python
|
394c68e8f06a10ec16539addd888960d11d1318f
|
[
"MIT"
] | null | null | null |
print('-'*60)
print('\33[35m[ F ] Feminino\33[m \n\33[32m[ M ] Masculino\33[m \n ')
sexo = str(input('Qual o seu sexo? ')).strip().upper()[0] # só pega a primeira letra
while sexo not in 'MF':
sexo = str(input('\33[31mDados inválidos.\33[m Por favor, informe seu sexo: ')).strip().upper()[0]
print('\nSexo {} registrado com sucesso!'.format(sexo))
print('-'*60)
| 52.142857
| 102
| 0.641096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.583106
|
a1c39f0658624fc259de69a62271fcd6a8ae59fa
| 2,858
|
py
|
Python
|
src/wordmain.py
|
keyurmodh00/SimpleHTR
|
8031ae481d396714f555bcc0c4cbb23846404a1f
|
[
"MIT"
] | null | null | null |
src/wordmain.py
|
keyurmodh00/SimpleHTR
|
8031ae481d396714f555bcc0c4cbb23846404a1f
|
[
"MIT"
] | null | null | null |
src/wordmain.py
|
keyurmodh00/SimpleHTR
|
8031ae481d396714f555bcc0c4cbb23846404a1f
|
[
"MIT"
] | null | null | null |
import os
import cv2
from WordSegmentation import wordSegmentation, prepareImg
import json
import editdistance
from path import Path
from DataLoaderIAM import DataLoaderIAM, Batch
from Model import Model, DecoderType
from SamplePreprocessor import preprocess
import argparse
import tensorflow as tf
class FilePaths:
"filenames and paths to data"
fnCharList = 'D:/SimpleHTR/model/charList.txt'
fnSummary = 'D:/SimpleHTR/model/summary.json'
fnInfer = 'D:/SimpleHTR/data/test.png'
fnCorpus = 'D:/SimpleHTR/data/corpus.txt'
def infer(model, fnImg):
"recognize text in image provided by file path"
img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
print(f'Recognized: "{recognized[0]}"')
print(f'Probability: {probability[0]}')
apex=open("D:/SimpleHTR/data/output.txt","a")
apex.write(recognized[0]+" ")
apex.close()
def main():
"""reads images from data/ and outputs the word-segmentation to out/"""
# read input images from 'in' directory
imgFiles = os.listdir('D:/SimpleHTR/input/')
for (i,f) in enumerate(imgFiles):
print('Segmenting words of sample %s'%f)
# read image, prepare it by resizing it to fixed height and converting it to grayscale
img = prepareImg(cv2.imread('D:/SimpleHTR/input/%s'%f), 50)
# execute segmentation with given parameters
# -kernelSize: size of filter kernel (odd integer)
# -sigma: standard deviation of Gaussian function used for filter kernel
# -theta: approximated width/height ratio of words, filter function is distorted by this factor
# - minArea: ignore word candidates smaller than specified area
res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)
# write output to 'out/inputFileName' directory
'''if not os.path.exists('D:/SimpleHTR/out/%s'%f):
os.mkdir('D:/SimpleHTR/out/%s'%f)'''
# iterate over all segmented words
print('Segmented into %d words'%len(res))
for (j, w) in enumerate(res):
(wordBox, wordImg) = w
(x, y, w, h) = wordBox
cv2.imwrite('D:/SimpleHTR/data/test.png', wordImg) # save word
cv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image
os.path.join(os.path.dirname('D:/SimpleHTR/src/main.py'))
tf.compat.v1.reset_default_graph()
exec(open('main.py').read())
# output summary image with bounding boxes around words
cv2.imwrite('D:/SimpleHTR/data/summary.png', img)
apex = open("D:/SimpleHTR/data/output.txt","a")
apex.write("\n")
apex.close()
if __name__ == '__main__':
main()
| 39.150685
| 103
| 0.651854
| 240
| 0.083975
| 0
| 0
| 0
| 0
| 0
| 0
| 1,343
| 0.469909
|
a1c3f7d64e7c7bb239f38c4ddad996fb0bfe247f
| 4,746
|
py
|
Python
|
asrtoolkit/data_structures/audio_file.py
|
greenkeytech/greenkey-asrtoolkit
|
f9a5990ee5c67b85dd8ff763777c986b03252ee5
|
[
"Apache-2.0"
] | 31
|
2019-08-03T08:42:37.000Z
|
2022-01-12T18:00:11.000Z
|
asrtoolkit/data_structures/audio_file.py
|
greenkeytech/greenkey-asrtoolkit
|
f9a5990ee5c67b85dd8ff763777c986b03252ee5
|
[
"Apache-2.0"
] | 28
|
2019-07-29T17:58:17.000Z
|
2021-08-20T14:30:25.000Z
|
asrtoolkit/data_structures/audio_file.py
|
greenkeytech/greenkey-asrtoolkit
|
f9a5990ee5c67b85dd8ff763777c986b03252ee5
|
[
"Apache-2.0"
] | 12
|
2019-07-29T13:16:41.000Z
|
2022-02-20T21:19:35.000Z
|
#!/usr/bin/env python
"""
Module for holding information about an audio file and doing basic conversions
"""
import hashlib
import logging
import os
import subprocess
from asrtoolkit.file_utils.name_cleaners import (
generate_segmented_file_name,
sanitize_hyphens,
strip_extension,
)
from asrtoolkit.file_utils.script_input_validation import valid_input_file
LOGGER = logging.getLogger()
def cut_utterance(
source_audio_file, target_audio_file, start_time, end_time, sample_rate=16000
):
"""
source_audio_file: str, path to file
target_audio_file: str, path to file
start_time: float or str
end_time: float or str
sample_rate: int, default 16000; audio sample rate in Hz
uses sox to segment source_audio_file to create target_audio_file that
contains audio from start_time to end_time
with audio sample rate set to sample_rate
"""
subprocess.call(
"sox -V1 {} -r {} -b 16 -c 1 {} trim {} ={}".format(
source_audio_file,
sample_rate,
target_audio_file,
start_time,
end_time,
),
shell=True,
)
def degrade_audio(source_audio_file, target_audio_file=None):
"""
Degrades audio to typical G711 level.
Useful if models need to target this audio quality.
"""
valid_input_file(source_audio_file, ["mp3", "sph", "wav", "au", "raw"])
target_audio_file = (
source_audio_file if target_audio_file is None else target_audio_file
)
# degrade to 8k
tmp1 = ".".join(source_audio_file.split(".")[:-1]) + "_tmp1.wav"
subprocess.call(
"sox -V1 {} -r 8000 -e a-law {}".format(source_audio_file, tmp1),
shell=True,
)
# convert to u-law
tmp2 = ".".join(source_audio_file.split(".")[:-1]) + "_tmp2.wav"
subprocess.call(
"sox -V1 {} --rate 8000 -e u-law {}".format(tmp1, tmp2),
shell=True,
)
# upgrade to 16k a-law signed
subprocess.call(
"sox -V1 {} --rate 16000 -e signed -b 16 --channel 1 {}".format(
tmp2, target_audio_file
),
shell=True,
)
os.remove(tmp1)
os.remove(tmp2)
def combine_audio(audio_files, output_file, gain=False):
"""
Combine audio files with possible renormalization to 0dB
"""
gain_str = ""
if gain:
gain_str = "gain -n 0"
subprocess.call(
"sox -V1 -m {} {} {}".format(" ".join(audio_files), output_file, gain_str),
shell=True,
)
class audio_file(object):
"""
Create a audio_file object for
- storing location
- retrieving a unique hash
- resampling for training
- splitting into segments given an STM file
"""
def __init__(self, location=""):
"""
Populate file location info
"""
self.location = None
if not os.path.exists(location):
raise FileNotFoundError('Could not find file at "{}"'.format(location))
self.location = location
def hash(self):
"""
Returns a sha1 hash of the file
"""
if self.location:
with open(self.location, "rb") as f:
return hashlib.sha1(f.read()).hexdigest()
else:
return hashlib.sha1("".encode()).hexdigest()
def prepare_for_training(self, file_name, sample_rate=16000):
"""
Converts to single channel (from channel 1) audio file
in SPH file format
Returns audio_file object on success, else None
"""
if file_name.split(".")[-1] != "sph":
LOGGER.warning(
"Forcing training data to use SPH file format for %s", file_name
)
file_name = strip_extension(file_name) + ".sph"
file_name = sanitize_hyphens(file_name)
# return None if error code given, otherwise return audio_file object
output_file = (
audio_file(file_name)
if not subprocess.call(
"sox -V1 {} {} rate {} remix -".format(
self.location, file_name, sample_rate
),
shell=True,
)
else None
)
return output_file
def split(self, transcript, target_dir):
"""
Split audio file and transcript into many pieces based on
valid segments of transcript
"""
os.makedirs(target_dir, exist_ok=True)
for iseg, seg in enumerate(transcript.segments):
cut_utterance(
self.location,
generate_segmented_file_name(target_dir, self.location, iseg),
seg.start,
seg.stop,
)
transcript.split(target_dir)
return
| 27.917647
| 83
| 0.596292
| 2,240
| 0.471976
| 0
| 0
| 0
| 0
| 0
| 0
| 1,764
| 0.371681
|
a1c400c5158580105326cc3e84bbb5b7fc61477c
| 574
|
py
|
Python
|
forms.py
|
qqalexqq/monkeys
|
df9a43adbda78da1f2ab1cc4c27819da4225d2e5
|
[
"MIT"
] | null | null | null |
forms.py
|
qqalexqq/monkeys
|
df9a43adbda78da1f2ab1cc4c27819da4225d2e5
|
[
"MIT"
] | null | null | null |
forms.py
|
qqalexqq/monkeys
|
df9a43adbda78da1f2ab1cc4c27819da4225d2e5
|
[
"MIT"
] | null | null | null |
from flask.ext.wtf import Form
from wtforms import (
TextField, IntegerField, HiddenField, SubmitField, validators
)
class MonkeyForm(Form):
id = HiddenField()
name = TextField('Name', validators=[validators.InputRequired()])
age = IntegerField(
'Age', validators=[
validators.InputRequired(message='Age should be an integer.'),
validators.NumberRange(min=0)
]
)
email = TextField(
'Email', validators=[validators.InputRequired(), validators.Email()]
)
submit_button = SubmitField('Submit')
| 27.333333
| 76
| 0.656794
| 450
| 0.783972
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.092334
|
a1c42f46fbea71221d404268be15bf4dbded43e9
| 7,008
|
py
|
Python
|
src/modules/model/getPretrained.py
|
sakimilo/transferLearning
|
6d5c1e878bf91a34d32add81d4a2a57091946ed3
|
[
"MIT"
] | null | null | null |
src/modules/model/getPretrained.py
|
sakimilo/transferLearning
|
6d5c1e878bf91a34d32add81d4a2a57091946ed3
|
[
"MIT"
] | 8
|
2020-03-24T17:05:21.000Z
|
2022-01-13T01:15:54.000Z
|
src/modules/model/getPretrained.py
|
sakimilo/transferLearning
|
6d5c1e878bf91a34d32add81d4a2a57091946ed3
|
[
"MIT"
] | null | null | null |
import os
import shutil
import tensorflow as tf
from tensorflow import keras
from logs import logDecorator as lD
import jsonref
import numpy as np
import pickle
import warnings
from tqdm import tqdm
from modules.data import getData
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.model.getPretrained'
### turn off tensorflow info/warning/error or all python warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings("ignore")
@lD.log(logBase + '.model')
def modelImageNet(logger, modelName, weightsFile=None, input_shape=(224, 224, 3)):
try:
if weightsFile is not None:
weights = weightsFile
else:
weights = 'imagenet'
if modelName == 'Xception':
base_model = keras.applications.xception.Xception(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'VGG16':
base_model = keras.applications.vgg16.VGG16(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'VGG16_includeTop':
base_model = keras.applications.vgg16.VGG16(input_shape=input_shape, include_top=True, weights=weights)
elif modelName == 'VGG19':
base_model = keras.applications.vgg19.VGG19(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'ResNet50':
base_model = keras.applications.resnet50.ResNet50(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'InceptionV3':
base_model = keras.applications.inception_v3.InceptionV3(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'InceptionResNetV2':
base_model = keras.applications.inception_resnet_v2.InceptionResNetV2(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'MobileNet':
base_model = keras.applications.mobilenet.MobileNet(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'DenseNet':
base_model = keras.applications.densenet.DenseNet121(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'NASNet':
base_model = keras.applications.nasnet.NASNetMobile(input_shape=input_shape, include_top=False, weights=weights)
return base_model
except Exception as e:
logger.error('Unable to get model: {} \n{}'.format(modelName, str(e)))
@lD.log(logBase + '.outputTensorBoard')
def outputTensorBoard(logger, subfolder=None):
try:
tfboardFolder = '../notebooks/tensorlog/'
if subfolder is not None:
tfboardFolder = os.path.join(tfboardFolder, subfolder)
if os.path.exists(tfboardFolder):
shutil.rmtree(tfboardFolder)
os.makedirs(tfboardFolder)
with tf.Session() as sess:
tfWriter = tf.summary.FileWriter(tfboardFolder, sess.graph)
tfWriter.close()
except Exception as e:
logger.error('Unable to output tensorboard \n{}'.format(str(e)))
@lD.log(logBase + '.visualise_graph')
def visualise_graph(logger, modelName, subfolder=None):
try:
tf.keras.backend.clear_session()
tfboardFolder = '../notebooks/tensorlog/'
if subfolder is not None:
tfboardFolder = os.path.join(tfboardFolder, subfolder)
if os.path.exists(tfboardFolder):
shutil.rmtree(tfboardFolder)
os.makedirs(tfboardFolder)
img = np.random.randint(0, 5, (1, 224, 224, 3))
modelDict = getModelFileDict()
modelLoaded = modelImageNet(modelName, modelDict[modelName])
with tf.Session() as sess:
tfWriter = tf.summary.FileWriter(tfboardFolder, sess.graph)
tfWriter.close()
except Exception as e:
logger.error('Unable to write graph into tensorboard\n{}'.format(str(e)))
@lD.log(logBase + '.visualise_layers')
def visualise_layers(logger, sess, listOfTensorNodes, inputData):
try:
outputResults = sess.run( listOfTensorNodes,
feed_dict={
'input_1:0' : inputData
})
for res, tf_node in zip(outputResults, listOfTensorNodes):
print('-'*50)
print('node: {}; shape: {}'.format(tf_node, res[0].shape))
getData.visualiseStackedArray(res[0], cmap=None)
except Exception as e:
logger.error('Unable to visualise layers \n{}'.format(str(e)))
@lD.log(logBase + '.getModelFileDict')
def getModelFileDict(logger):
try:
modelDict = {
'Xception' : '../models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
'VGG16' : '../models/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
'VGG16_includeTop' : '../models/vgg16_weights_tf_dim_ordering_tf_kernels.h5',
'VGG19' : '../models/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
'InceptionV3' : '../models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
'MobileNet' : '../models/mobilenet_1_0_224_tf_no_top.h5',
'DenseNet' : '../models/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
'NASNet' : '../models/nasnet_mobile_no_top.h5',
'ResNet50' : '../models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
'InceptionResNetV2' : '../models/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
}
return modelDict
except Exception as e:
logger.error('Unable to get model file dictionary \n{}'.format(str(e)))
@lD.log(logBase + '.checkReady')
def checkReady(logger):
try:
modelString = ['Xception', 'VGG16', 'VGG19', 'InceptionV3', 'MobileNet', 'DenseNet', 'NASNet',
'ResNet50', 'InceptionResNetV2', 'VGG16_includeTop']
modelDict = getModelFileDict()
for m in modelString:
try:
print('{} loading from {}...'.format(m, modelDict[m]), end='', flush=True)
modelLoaded = modelImageNet(modelName=m, weightsFile=modelDict[m])
print('sucessfully! '.format(m), end='', flush=True)
print('type: {}'.format(type(modelLoaded)))
except Exception as e:
print('failed. --> {}'.format(m, str(e)))
except Exception as e:
logger.error('Unable to check ready \n{}'.format(str(e)))
@lD.log(logBase + '.main')
def main(logger, resultsDict):
try:
checkReady()
except Exception as e:
logger.error('Unable to run main \n{}'.format(str(e)))
if __name__ == '__main__':
print('tf.__version__ :', tf.__version__)
print('keras.__version__:', keras.__version__)
| 37.079365
| 143
| 0.639555
| 0
| 0
| 0
| 0
| 6,356
| 0.906963
| 0
| 0
| 1,652
| 0.235731
|
a1c4c531f5d93b7c66d5df5fb932a485d12b518b
| 492
|
py
|
Python
|
Python/CountingBits.py
|
Jspsun/LEETCodePractice
|
9dba8c0441201a188b93e4d39a0a9b7602857a5f
|
[
"MIT"
] | 3
|
2017-10-14T19:49:28.000Z
|
2019-01-12T21:51:11.000Z
|
Python/CountingBits.py
|
Jspsun/LEETCodePractice
|
9dba8c0441201a188b93e4d39a0a9b7602857a5f
|
[
"MIT"
] | null | null | null |
Python/CountingBits.py
|
Jspsun/LEETCodePractice
|
9dba8c0441201a188b93e4d39a0a9b7602857a5f
|
[
"MIT"
] | 5
|
2017-02-06T19:10:23.000Z
|
2020-12-19T01:58:10.000Z
|
import math
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
bits = [0,1]
for n in range (2, num+1):
count = 0
closestPower = int(math.floor(math.log(n,2)))
if closestPower != 0:
n-= 2**closestPower
count +=1
count += bits[n]
bits.append(count)
return bits[:num+1]
print (Solution().countBits(9))
| 24.6
| 57
| 0.463415
| 446
| 0.906504
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.130081
|
a1c56433fe8bc3861e49acb291c03048e0f30a43
| 363
|
py
|
Python
|
ACM-Solution/4queen.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 2
|
2016-04-26T15:40:40.000Z
|
2018-07-18T10:16:42.000Z
|
ACM-Solution/4queen.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 1
|
2016-04-26T15:44:15.000Z
|
2016-04-29T14:44:40.000Z
|
ACM-Solution/4queen.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 1
|
2018-10-02T16:12:19.000Z
|
2018-10-02T16:12:19.000Z
|
#four queen problem bruteforce solution using permutation
from itertools import permutations
def board(vec):
print ("\n".join('.' * i + 'Q' + '.' * (n-i-1) for i in vec) + "\n===\n")
n = 8
cols = range(n)
for vec in permutations(cols):
if n == len(set(vec[i]+i for i in cols)) \
== len(set(vec[i]-i for i in cols)):
board(vec)
| 33
| 78
| 0.570248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.220386
|
a1c5f16bf229bdace56e1e6f63c0ce9caaa232d9
| 10,362
|
py
|
Python
|
View/pesquisa_produtos.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
View/pesquisa_produtos.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
View/pesquisa_produtos.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pesquisa_produtos.ui'
#
# Created by: PyQt5 View code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Frame(object):
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.resize(1048, 361)
Frame.setAutoFillBackground(False)
Frame.setStyleSheet("background: #FFF;")
self.fr_titulo_servicos = QtWidgets.QFrame(Frame)
self.fr_titulo_servicos.setGeometry(QtCore.QRect(0, 0, 1051, 60))
self.fr_titulo_servicos.setStyleSheet("")
self.fr_titulo_servicos.setObjectName("fr_titulo_servicos")
self.lb_tituloClientes_2 = QtWidgets.QLabel(self.fr_titulo_servicos)
self.lb_tituloClientes_2.setGeometry(QtCore.QRect(10, 15, 200, 30))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.lb_tituloClientes_2.setFont(font)
self.lb_tituloClientes_2.setStyleSheet("color: rgb(0, 0, 0)")
self.lb_tituloClientes_2.setObjectName("lb_tituloClientes_2")
self.bt_inserir = QtWidgets.QPushButton(self.fr_titulo_servicos)
self.bt_inserir.setGeometry(QtCore.QRect(910, 9, 131, 41))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_inserir.setFont(font)
self.bt_inserir.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_inserir.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_inserir.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_inserir.setStyleSheet("QPushButton {\n"
" background-color: rgb(78, 154, 6);\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
" background-color: #40a286\n"
"}")
self.bt_inserir.setIconSize(QtCore.QSize(75, 35))
self.bt_inserir.setObjectName("bt_inserir")
self.tb_produtos = QtWidgets.QTableWidget(Frame)
self.tb_produtos.setGeometry(QtCore.QRect(0, 100, 1041, 211))
self.tb_produtos.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.tb_produtos.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tb_produtos.setStyleSheet("QTableView{\n"
"color: #797979;\n"
"font-weight: bold;\n"
"font-size: 13px;\n"
"background: #FFF;\n"
"padding: 0 0 0 5px;\n"
"}\n"
"QHeaderView:section{\n"
"background: #FFF;\n"
"padding: 5px 0 ;\n"
"font-size: 12px;\n"
"font-family: \"Arial\";\n"
"font-weight: bold;\n"
"color: #797979;\n"
"border: none;\n"
"border-bottom: 2px solid #CCC;\n"
"text-transform: uppercase\n"
"}\n"
"QTableView::item {\n"
"border-bottom: 2px solid #CCC;\n"
"padding: 2px;\n"
"}\n"
"\n"
"")
self.tb_produtos.setFrameShape(QtWidgets.QFrame.NoFrame)
self.tb_produtos.setFrameShadow(QtWidgets.QFrame.Plain)
self.tb_produtos.setAutoScrollMargin(20)
self.tb_produtos.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tb_produtos.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tb_produtos.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tb_produtos.setShowGrid(False)
self.tb_produtos.setGridStyle(QtCore.Qt.NoPen)
self.tb_produtos.setWordWrap(False)
self.tb_produtos.setRowCount(1)
self.tb_produtos.setObjectName("tb_produtos")
self.tb_produtos.setColumnCount(8)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tb_produtos.setHorizontalHeaderItem(7, item)
self.tb_produtos.horizontalHeader().setDefaultSectionSize(120)
self.tb_produtos.horizontalHeader().setHighlightSections(False)
self.tb_produtos.horizontalHeader().setStretchLastSection(True)
self.tb_produtos.verticalHeader().setVisible(False)
self.tb_produtos.verticalHeader().setDefaultSectionSize(50)
self.tb_produtos.verticalHeader().setMinimumSectionSize(20)
self.fr_botoes = QtWidgets.QFrame(Frame)
self.fr_botoes.setGeometry(QtCore.QRect(0, 330, 1051, 30))
self.fr_botoes.setStyleSheet("background:#E1DFE0;\n"
"border: none;")
self.fr_botoes.setObjectName("fr_botoes")
self.bt_selecionar = QtWidgets.QPushButton(self.fr_botoes)
self.bt_selecionar.setGeometry(QtCore.QRect(930, 0, 120, 30))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_selecionar.setFont(font)
self.bt_selecionar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_selecionar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_selecionar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_selecionar.setStyleSheet("QPushButton {\n"
"background-color: #1E87F0;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_selecionar.setIconSize(QtCore.QSize(75, 35))
self.bt_selecionar.setObjectName("bt_selecionar")
self.bt_refresh = QtWidgets.QPushButton(Frame)
self.bt_refresh.setGeometry(QtCore.QRect(1010, 60, 30, 31))
font = QtGui.QFont()
font.setFamily("Arial")
self.bt_refresh.setFont(font)
self.bt_refresh.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_refresh.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_refresh.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_refresh.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Imagens/refresh.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bt_refresh.setIcon(icon)
self.bt_refresh.setObjectName("bt_refresh")
self.tx_busca = QtWidgets.QLineEdit(Frame)
self.tx_busca.setGeometry(QtCore.QRect(190, 60, 791, 31))
font = QtGui.QFont()
font.setFamily("Arial")
self.tx_busca.setFont(font)
self.tx_busca.setFocusPolicy(QtCore.Qt.ClickFocus)
self.tx_busca.setStyleSheet("QLineEdit {\n"
"color: #000\n"
"}\n"
"")
self.tx_busca.setObjectName("tx_busca")
self.cb_produtos = QtWidgets.QComboBox(Frame)
self.cb_produtos.setGeometry(QtCore.QRect(10, 60, 171, 31))
self.cb_produtos.setFocusPolicy(QtCore.Qt.StrongFocus)
self.cb_produtos.setStyleSheet("QComboBox{\n"
"background: #fff;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QComboBox:Focus {\n"
"border: 1px solid red;\n"
"}\n"
" QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 25px;\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
" }\n"
"QComboBox::down-arrow {\n"
" image: url(\"Imagens/down.png\");\n"
" }\n"
"")
self.cb_produtos.setObjectName("cb_produtos")
self.cb_produtos.addItem("")
self.bt_busca = QtWidgets.QPushButton(Frame)
self.bt_busca.setGeometry(QtCore.QRect(980, 60, 30, 31))
font = QtGui.QFont()
font.setFamily("Arial")
self.bt_busca.setFont(font)
self.bt_busca.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_busca.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_busca.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_busca.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("Imagens/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bt_busca.setIcon(icon1)
self.bt_busca.setObjectName("bt_busca")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Lista de Produtos"))
self.lb_tituloClientes_2.setText(_translate("Frame", "PRODUTOS"))
self.bt_inserir.setText(_translate("Frame", "NOVO PRODUTO"))
item = self.tb_produtos.verticalHeaderItem(0)
item.setText(_translate("Frame", "1"))
item = self.tb_produtos.horizontalHeaderItem(0)
item.setText(_translate("Frame", "ID"))
item = self.tb_produtos.horizontalHeaderItem(1)
item.setText(_translate("Frame", "CODIGO DE BARRAS"))
item = self.tb_produtos.horizontalHeaderItem(2)
item.setText(_translate("Frame", "ESTOQUE"))
item = self.tb_produtos.horizontalHeaderItem(3)
item.setText(_translate("Frame", "DESCRIÇÃO"))
item = self.tb_produtos.horizontalHeaderItem(4)
item.setText(_translate("Frame", "MARCA"))
item = self.tb_produtos.horizontalHeaderItem(5)
item.setText(_translate("Frame", "PREÇO"))
item = self.tb_produtos.horizontalHeaderItem(6)
item.setText(_translate("Frame", "FORNECEDOR"))
item = self.tb_produtos.horizontalHeaderItem(7)
item.setText(_translate("Frame", "CATEGORIA"))
self.bt_selecionar.setText(_translate("Frame", "SELECIONAR"))
self.bt_refresh.setToolTip(_translate("Frame", "ATUALIZAR TABELA"))
self.tx_busca.setPlaceholderText(_translate("Frame", "PROCURAR POR..."))
self.cb_produtos.setItemText(0, _translate("Frame", "SELECIONE"))
self.bt_busca.setToolTip(_translate("Frame", "BUSCAR"))
| 43.537815
| 102
| 0.687898
| 10,110
| 0.975398
| 0
| 0
| 0
| 0
| 0
| 0
| 2,190
| 0.211288
|
a1c62a23cf4d05075c2ce8fd742ceaebabdfcf8f
| 7,826
|
py
|
Python
|
zyc/zyc.py
|
Sizurka/zyc
|
5ed4158617293a613b52cb6197ca601a1b491660
|
[
"MIT"
] | null | null | null |
zyc/zyc.py
|
Sizurka/zyc
|
5ed4158617293a613b52cb6197ca601a1b491660
|
[
"MIT"
] | null | null | null |
zyc/zyc.py
|
Sizurka/zyc
|
5ed4158617293a613b52cb6197ca601a1b491660
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2019 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
GUI for finding/displaying parts and footprints.
"""
from __future__ import print_function
import os
import wx
from skidl import (
KICAD,
SchLib,
footprint_cache,
footprint_search_paths,
lib_search_paths,
skidl_cfg,
)
from .common import *
from .pckg_info import __version__
from .skidl_footprint_search import FootprintSearchPanel
from .skidl_part_search import PartSearchPanel
APP_TITLE = "zyc: SKiDL Part/Footprint Search"
APP_EXIT = 1
SHOW_HELP = 3
SHOW_ABOUT = 4
PART_SEARCH_PATH = 5
FOOTPRINT_SEARCH_PATH = 6
REFRESH = 7
class AppFrame(wx.Frame):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.panel = PartFootprintSearchPanel(self)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.panel, proportion=1, flag=wx.ALL | wx.EXPAND, border=SPACING)
self.SetSizer(box)
# Keep border same color as background of panel.
self.SetBackgroundColour(self.panel.GetBackgroundColour())
self.InitMenus()
self.SetTitle(APP_TITLE)
self.Center()
self.Show(True)
self.Fit()
def InitMenus(self):
# Top menu.
menuBar = wx.MenuBar()
# File submenu containing quit button.
fileMenu = wx.Menu()
menuBar.Append(fileMenu, "&File")
quitMenuItem = wx.MenuItem(fileMenu, APP_EXIT, "Quit\tCtrl+Q")
fileMenu.Append(quitMenuItem)
self.Bind(wx.EVT_MENU, self.OnQuit, id=APP_EXIT)
# Search submenu containing search and copy buttons.
srchMenu = wx.Menu()
menuBar.Append(srchMenu, "&Search")
partSrchPathItem = wx.MenuItem(
srchMenu, PART_SEARCH_PATH, "Set part search path...\tCtrl+P"
)
srchMenu.Append(partSrchPathItem)
self.Bind(wx.EVT_MENU, self.OnPartSearchPath, id=PART_SEARCH_PATH)
footprintSrchPathItem = wx.MenuItem(
srchMenu, FOOTPRINT_SEARCH_PATH, "Set footprint search path...\tCtrl+F"
)
srchMenu.Append(footprintSrchPathItem)
self.Bind(wx.EVT_MENU, self.OnFootprintSearchPath, id=FOOTPRINT_SEARCH_PATH)
refreshItem = wx.MenuItem(srchMenu, REFRESH, "Refresh part + footprint paths")
srchMenu.Append(refreshItem)
self.Bind(wx.EVT_MENU, self.OnRefresh, id=REFRESH)
# Help menu containing help and about buttons.
helpMenu = wx.Menu()
menuBar.Append(helpMenu, "&Help")
helpMenuItem = wx.MenuItem(helpMenu, SHOW_HELP, "Help\tCtrl+H")
helpMenu.Append(helpMenuItem)
aboutMenuItem = wx.MenuItem(helpMenu, SHOW_ABOUT, "About App\tCtrl+A")
helpMenu.Append(aboutMenuItem)
self.Bind(wx.EVT_MENU, self.ShowHelp, id=SHOW_HELP)
self.Bind(wx.EVT_MENU, self.ShowAbout, id=SHOW_ABOUT)
self.SetMenuBar(menuBar)
def OnPartSearchPath(self, event):
# Update search path for parts.
dlg = TextEntryDialog(
self,
title="Set Part Search Path",
caption="Part Search Path",
tip="Enter {sep}-separated list of directories in which to search for parts.".format(
sep=os.pathsep
),
)
dlg.Center()
dlg.SetValue(os.pathsep.join(lib_search_paths[KICAD]))
if dlg.ShowModal() == wx.ID_OK:
lib_search_paths[KICAD] = dlg.GetValue().split(os.pathsep)
skidl_cfg.store() # Stores updated lib search path in file.
dlg.Destroy()
def OnFootprintSearchPath(self, event):
# Update search path for footprints.
dlg = TextEntryDialog(
self,
title="Set Footprint Search Path",
caption="Footprint Search Path",
tip="Enter {sep}-separated list of directories in which to search for fp-lib-table file.".format(
sep=os.pathsep
),
)
dlg.Center()
dlg.SetValue(os.pathsep.join(footprint_search_paths[KICAD]))
if dlg.ShowModal() == wx.ID_OK:
footprint_search_paths[KICAD] = dlg.GetValue().split(os.pathsep)
skidl_cfg.store() # Stores updated search path in file.
dlg.Destroy()
def OnRefresh(self, event):
SchLib.reset()
footprint_cache.reset()
def ShowHelp(self, e):
Feedback(
"""
1. Enter keywords/regex in the part search box.
2. Matching parts will appear in the Library/Part table.
3. Select a row in the Library/Part table to display part info.
4. Enter keywords/regex in the footprint search box.
5. Matching footprints will appear in the Library/Footprint table.
6. Select a row in the Library/Footprint table to display the footprint.
7. a) Click the Copy button in the Part Search panel to copy
the part & footprint to the clipboard, -OR-
b) Click the Copy button in the Footprint Search panel to copy
the footprint to the clipboard, -OR-
c) Deselect (ctrl-click) the footprint row and click the
Copy button in the Part Search panel to copy just
the part to the clipboard.
8. Paste the clipboard contents into your SKiDL code.
General:
* Drag sashes to resize individual panels.
* Double-click column headers to sort table contents.
* Ctrl-click to select/deselect table cells.
""",
"Help",
)
def ShowAbout(self, e):
Feedback(
APP_TITLE + " " + __version__
+ """
(c) 2019 XESS Corp.
https://github.com/xesscorp/skidl
MIT License
""",
"About",
)
def OnQuit(self, e):
self.Close()
class PartFootprintSearchPanel(wx.SplitterWindow):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# Subpanel for part search panel.
self.part_panel = add_border(
add_title(PartSearchPanel(self), "Part Search", wx.TOP), wx.BOTTOM
)
# self.part_panel = box_it(PartSearchPanel(self), "Part Search")
# Subpanel for footprint search.
self.footprint_panel = add_border(
add_title(FootprintSearchPanel(self), "Footprint Search", wx.TOP), wx.TOP
)
# self.footprint_panel = box_it(FootprintSearchPanel(self), "Footprint Search")
# Split subpanels top/bottom.
self.SplitHorizontally(self.part_panel, self.footprint_panel, sashPosition=0)
self.SetSashGravity(0.5) # Both subpanels expand/contract equally.
self.Update()
def main():
# import wx.lib.inspection
app = wx.App()
AppFrame(None)
# wx.lib.inspection.InspectionTool().Show()
app.MainLoop()
if __name__ == "__main__":
main()
| 32.882353
| 109
| 0.662663
| 5,934
| 0.758048
| 0
| 0
| 0
| 0
| 0
| 0
| 3,407
| 0.435232
|
a1c6e9a43d6622094c50a6e5fb6886a83b2efa97
| 516
|
py
|
Python
|
train/ip.py
|
VCG/gp
|
cd106b604f8670a70add469d41180e34df3b1068
|
[
"MIT"
] | null | null | null |
train/ip.py
|
VCG/gp
|
cd106b604f8670a70add469d41180e34df3b1068
|
[
"MIT"
] | null | null | null |
train/ip.py
|
VCG/gp
|
cd106b604f8670a70add469d41180e34df3b1068
|
[
"MIT"
] | null | null | null |
import cPickle as pickle
import os; import sys; sys.path.append('..')
import gp
import gp.nets as nets
PATCH_PATH = ('iplb')
X_train, y_train, X_test, y_test = gp.Patch.load_rgb(PATCH_PATH)
X_train = X_train[:,:-1,:,:]
X_test = X_test[:,:-1,:,:]
cnn = nets.RGNetPlus()
cnn = cnn.fit(X_train, y_train)
test_accuracy = cnn.score(X_test, y_test)
print test_accuracy
# store CNN
sys.setrecursionlimit(1000000000)
with open(os.path.expanduser('~/Projects/gp/nets/IP_FULL.p'), 'wb') as f:
pickle.dump(cnn, f, -1)
| 21.5
| 73
| 0.705426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.106589
|
a1c8a7137ea1d05162f631c75ad27f5dd11e2101
| 1,066
|
py
|
Python
|
test/TestSourceMissing.py
|
falcon-org/Falcon
|
113b47ea6eef6ebbaba91eca596ca89e211cad67
|
[
"BSD-3-Clause"
] | null | null | null |
test/TestSourceMissing.py
|
falcon-org/Falcon
|
113b47ea6eef6ebbaba91eca596ca89e211cad67
|
[
"BSD-3-Clause"
] | null | null | null |
test/TestSourceMissing.py
|
falcon-org/Falcon
|
113b47ea6eef6ebbaba91eca596ca89e211cad67
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Check that falcon rebuilds an output if it is deleted.
import time
import os
makefile = '''
{
"rules":
[
{
"inputs": [ "source1", "source2" ],
"outputs": [ "output" ],
"cmd": "cat source1 > output && cat source2 >> output"
}
]
}
'''
def run(test):
test.create_makefile(makefile)
test.write_file("source1", "1")
test.write_file("source2", "2")
test.start()
# Build a first time to generate output
assert(set(["source1", "source2", "output"]) == set(test.get_dirty_targets()))
test.build()
assert(test.get_dirty_targets() == [])
assert(test.get_file_content('output') == '12')
# Delete source1
time.sleep(1)
os.remove("source1")
test.expect_watchman_trigger("source1")
assert(set(["source1", "output"]) == set(test.get_dirty_targets()))
# Build again. This should fail.
try:
test.build()
# Unreachable, the build should fail because a source file is missing.
assert(false)
except:
pass
# This should generate an error log
test.expect_error_log()
| 21.755102
| 80
| 0.638837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 546
| 0.512195
|
a1c9ea67f9a8ebf42ecee72115e10b2677436a17
| 216
|
py
|
Python
|
awesimsoss/__init__.py
|
spacetelescope/AWESim_SOSS
|
75669276bd8ce22bc86d6845c771964ffec94d07
|
[
"MIT"
] | 4
|
2019-12-17T19:04:25.000Z
|
2020-09-22T15:53:09.000Z
|
awesimsoss/__init__.py
|
spacetelescope/awesimsoss
|
75669276bd8ce22bc86d6845c771964ffec94d07
|
[
"MIT"
] | 94
|
2018-10-17T18:03:57.000Z
|
2021-03-01T07:34:21.000Z
|
awesimsoss/__init__.py
|
spacetelescope/awesimsoss
|
75669276bd8ce22bc86d6845c771964ffec94d07
|
[
"MIT"
] | 8
|
2018-10-17T20:45:49.000Z
|
2021-04-14T11:41:41.000Z
|
# -*- coding: utf-8 -*-
"""Top-level package for awesimsoss."""
__author__ = """Joe Filippazzo"""
__email__ = 'jfilippazzo@stsci.edu'
__version__ = '0.3.5'
from .awesim import TSO, TestTSO, BlackbodyTSO, ModelTSO
| 21.6
| 56
| 0.689815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.518519
|
a1cbe0620d09eccc4613b82d60775050479f1c1b
| 6,565
|
py
|
Python
|
keyboards/inline/in_processing/keyboards_sum_ready.py
|
itcosplay/cryptobot
|
6890cfde64a631bf0e4db55f6873a2217212d801
|
[
"MIT"
] | null | null | null |
keyboards/inline/in_processing/keyboards_sum_ready.py
|
itcosplay/cryptobot
|
6890cfde64a631bf0e4db55f6873a2217212d801
|
[
"MIT"
] | null | null | null |
keyboards/inline/in_processing/keyboards_sum_ready.py
|
itcosplay/cryptobot
|
6890cfde64a631bf0e4db55f6873a2217212d801
|
[
"MIT"
] | null | null | null |
from data import all_emoji
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.utils.callback_data import CallbackData
from data import all_emoji
from utils.googlesheets import send_to_google
from utils.set_minus_and_plus_currences import set_minus_and_plus
from utils.get_minuses_sum_FGH import get_minus_FGH
from utils.get_values_FGH_MNO import get_plus_FGH
cb_what_sum = CallbackData('cb_ws', 'type_btn')
def create_kb_what_sum():
keyboard = InlineKeyboardMarkup()
keyboard.add (
InlineKeyboardButton (
text = 'скорректировать',
callback_data = cb_what_sum.new(type_btn='correct_sum')
)
)
keyboard.add (
InlineKeyboardButton (
text = 'подтвердить',
callback_data = cb_what_sum.new(type_btn='confirm_sum')
)
)
keyboard.add (
InlineKeyboardButton (
text = 'вернуться к заявке',
callback_data = cb_what_sum.new(type_btn='back_to_chosen_request')
)
)
back__main_menu = all_emoji['back__main_menu']
keyboard.add (
InlineKeyboardButton (
text=f'назад {back__main_menu} главное меню',
callback_data=cb_what_sum.new (
type_btn='back_main_menu'
)
)
)
return keyboard
cb_choose_currency = CallbackData('anprix', 'curr', 'type_btn')
def create_kb_choose_currency_processing(request):
emo_snail = all_emoji['back__main_menu']
# добавляет плюсы и оставляет минусы если операция - обмен
if request[3] == 'обмен':
if not request[5] == '0':
rub = request[5]
rub = str(rub)
if rub[0] != '-': rub = '+' + rub + ' ₽'
else: rub = rub + ' ₽'
else: rub = ''
if not request[6] == '0':
usd = request[6]
usd = str(usd)
if usd[0] != '-': usd = '+' + usd + ' $'
else: usd = usd + ' $'
else: usd = ''
if not request[7] == '0':
eur = request[7]
eur = str(eur)
if eur[0] != '-': eur = '+' + eur + ' €'
else: eur = eur + ' €'
else: eur = ''
else:
if not request[5] == '0':
rub = request[5]
rub = str(rub)
if rub[0] == '-': rub = rub[1:] + ' ₽'
else: rub = rub + ' ₽'
else: rub = ''
if not request[6] == '0':
usd = request[6]
usd = str(usd)
if usd[0] == '-': usd = usd[1:] + ' $'
else: usd = usd + ' $'
else: usd = ''
if not request[7] == '0':
eur = request[7]
eur = str(eur)
if eur[0] == '-': eur = eur[1:] + ' €'
else: eur = eur + ' €'
else: eur = ''
keyboard = InlineKeyboardMarkup()
if not request[5] == '0':
keyboard.add (
InlineKeyboardButton (
text = '{}'.format(rub),
callback_data = cb_choose_currency.new(curr='rub', type_btn='change_curr')
)
)
if not request[6] == '0':
keyboard.add (
InlineKeyboardButton (
text = '{}'.format(usd),
callback_data = cb_choose_currency.new(curr='usd', type_btn='change_curr')
)
)
if not request[7] == '0':
keyboard.add (
InlineKeyboardButton (
text = '{}'.format(eur),
callback_data = cb_choose_currency.new(curr='eur', type_btn='change_curr')
)
)
keyboard.add (
InlineKeyboardButton (
text=f'назад {emo_snail} главное меню',
callback_data=cb_choose_currency.new (
curr='-',
type_btn='back_main_menu'
)
)
)
return keyboard
cb_what_sum_correct = CallbackData('cbwsc', 'curr', 'type_btn')
def create_kb_what_sum_correct(request):
keyboard = InlineKeyboardMarkup()
rub, usd, eur = get_minus_FGH(request)
if rub != '':
keyboard.add (
InlineKeyboardButton (
text=rub,
callback_data = cb_what_sum_correct.new (
curr='rub',
type_btn='change_curr'
)
)
)
if usd != '':
keyboard.add (
InlineKeyboardButton (
text=usd,
callback_data = cb_what_sum_correct.new (
curr='usd',
type_btn='change_curr'
)
)
)
if eur != '':
keyboard.add (
InlineKeyboardButton (
text=eur,
callback_data = cb_what_sum_correct.new (
curr='eur',
type_btn='change_curr'
)
)
)
emo_snail = all_emoji['back__main_menu']
keyboard.add (
InlineKeyboardButton (
text=f'назад {emo_snail} главное меню',
callback_data=cb_what_sum_correct.new (
curr='-',
type_btn='back_main_menu'
)
)
)
return keyboard
cb_sum_correct_chunk = CallbackData('cbscc', 'curr', 'type_btn')
def create_kb_sum_correct_chunk(request):
keyboard = InlineKeyboardMarkup()
rub, usd, eur = get_plus_FGH(request)
if rub != '':
keyboard.add (
InlineKeyboardButton (
text=rub,
callback_data = cb_sum_correct_chunk.new (
curr='rub',
type_btn='change_curr'
)
)
)
if usd != '':
keyboard.add (
InlineKeyboardButton (
text=usd,
callback_data = cb_sum_correct_chunk.new (
curr='usd',
type_btn='change_curr'
)
)
)
if eur != '':
keyboard.add (
InlineKeyboardButton (
text=eur,
callback_data = cb_sum_correct_chunk.new (
curr='eur',
type_btn='change_curr'
)
)
)
emo_snail = all_emoji['back__main_menu']
keyboard.add (
InlineKeyboardButton (
text=f'назад {emo_snail} главное меню',
callback_data=cb_sum_correct_chunk.new (
curr='-',
type_btn='back_main_menu'
)
)
)
return keyboard
| 26.795918
| 90
| 0.493374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,005
| 0.149132
|
a1cc680c5d6f410a35524d1c6900493495131044
| 181
|
py
|
Python
|
hw4/4.3.py
|
ArtemNikolaev/gb-hw
|
b82403e39dc1ca530dc438309fc98ba89ce4337b
|
[
"Unlicense"
] | null | null | null |
hw4/4.3.py
|
ArtemNikolaev/gb-hw
|
b82403e39dc1ca530dc438309fc98ba89ce4337b
|
[
"Unlicense"
] | 40
|
2021-12-30T15:57:10.000Z
|
2022-01-26T16:44:24.000Z
|
hw4/4.3.py
|
ArtemNikolaev/gb-hw
|
b82403e39dc1ca530dc438309fc98ba89ce4337b
|
[
"Unlicense"
] | 1
|
2022-03-12T19:17:26.000Z
|
2022-03-12T19:17:26.000Z
|
# https://github.com/ArtemNikolaev/gb-hw/issues/24
def multiple_of_20_21():
return (i for i in range(20, 241) if i % 20 == 0 or i % 21 == 0)
print(list(multiple_of_20_21()))
| 22.625
| 68
| 0.662983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.276243
|
a1cd9d12331888d9263e120a221bcfaacd01d426
| 1,153
|
py
|
Python
|
simulations/gamma_plot.py
|
austindavidbrown/Centered-Metropolis-Hastings
|
a96749a31ddcfbcaad081f6f9d2fb7ddcb55991f
|
[
"BSD-3-Clause"
] | null | null | null |
simulations/gamma_plot.py
|
austindavidbrown/Centered-Metropolis-Hastings
|
a96749a31ddcfbcaad081f6f9d2fb7ddcb55991f
|
[
"BSD-3-Clause"
] | null | null | null |
simulations/gamma_plot.py
|
austindavidbrown/Centered-Metropolis-Hastings
|
a96749a31ddcfbcaad081f6f9d2fb7ddcb55991f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
ssh brow5079@compute.cla.umn.edu
#qsub -I -q gpu
qsub -I -l nodes=1:ppn=10
module load python/conda/3.7
source activate env
ipython
"""
from math import sqrt, pi, exp
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import seaborn as sns
linewidth = 4
alpha = .8
plt.clf()
plt.style.use("ggplot")
plt.figure(figsize=(10, 8))
iterations = torch.arange(0, 1000, 1)
gammas = [.5, 1, 1.5, 2, 2.5]
colors = sns.color_palette("tab10")
for i in range(0, len(gammas)):
gamma = gammas[i]
color = colors[i]
y = (1 - exp(-(1 + gamma**(1/2))**(2)))**(iterations)
plt.plot(iterations, y,
label = r"$\gamma$ = {}".format(gamma),
alpha = alpha,
color = color,
linewidth = linewidth)
plt.tick_params(axis='x', labelsize=20)
plt.tick_params(axis='y', labelsize=20)
plt.xlabel(r"Iterations", fontsize = 25, color="black")
plt.ylabel(r"Decrease in Wasserstein distance", fontsize = 25, color="black")
plt.legend(loc="best", fontsize=25, borderpad=.05, framealpha=0)
plt.savefig("decrease_plot.png", pad_inches=0, bbox_inches='tight',)
| 23.06
| 77
| 0.657415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 270
| 0.234172
|
a1cdf3d6b6757ac8b742a5871545ebfcd99aef04
| 13,761
|
py
|
Python
|
hopper_controller/src/hexapod/folding_manager.py
|
CreedyNZ/Hopper_ROS
|
1e6354109f034a7d1d41a5b39ddcb632cfee64b2
|
[
"MIT"
] | 36
|
2018-12-19T18:03:08.000Z
|
2022-02-21T16:20:12.000Z
|
hopper_controller/src/hexapod/folding_manager.py
|
CreedyNZ/Hopper_ROS
|
1e6354109f034a7d1d41a5b39ddcb632cfee64b2
|
[
"MIT"
] | null | null | null |
hopper_controller/src/hexapod/folding_manager.py
|
CreedyNZ/Hopper_ROS
|
1e6354109f034a7d1d41a5b39ddcb632cfee64b2
|
[
"MIT"
] | 7
|
2019-08-11T20:31:27.000Z
|
2021-09-19T04:34:18.000Z
|
import rospy
MOVE_CYCLE_PERIOD = 0.01
def move_towards(target, current, step=1):
if abs(target-current) < step:
return target, True
else:
if target > current:
return current + step, False
else:
return current - step, False
def move_leg(leg, coxa=None, femur=None, tibia=None, step=1.3):
coxa_done = True
femur_done = True
tibia_done = True
if coxa:
leg.coxa, coxa_done = move_towards(coxa, leg.coxa, step)
if femur:
leg.femur, femur_done = move_towards(femur, leg.femur, step)
if tibia:
leg.tibia, tibia_done = move_towards(tibia, leg.tibia, step)
return coxa_done and femur_done and tibia_done
def is_leg_close(leg, coxa=None, femur=None, tibia=None, tolerance=20):
coxa_close = True
femur_close = True
tibia_close = True
if coxa:
coxa_close = leg.coxa + tolerance > coxa > leg.coxa - tolerance
if femur:
femur_close = leg.femur + tolerance > femur > leg.femur - tolerance
if tibia:
tibia_close = leg.tibia + tolerance > tibia > leg.tibia - tolerance
return coxa_close and femur_close and tibia_close
class FoldingManager(object):
def __init__(self, body_controller):
super(FoldingManager, self).__init__()
self.body_controller = body_controller
self.last_motor_position = None
def position_femur_tibia(self):
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, None, 60, 240)
lm = move_leg(self.last_motor_position.left_middle, None, 60, 240)
lr = move_leg(self.last_motor_position.left_rear, None, 60, 240)
rf = move_leg(self.last_motor_position.right_front, None, 240, 60)
rm = move_leg(self.last_motor_position.right_middle, None, 240, 60)
rr = move_leg(self.last_motor_position.right_rear, None, 240, 60)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
rospy.sleep(0.05)
def check_if_folded(self):
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
lf = is_leg_close(self.last_motor_position.left_front, 240)
lm = is_leg_close(self.last_motor_position.left_middle, 240) or is_leg_close(self.last_motor_position.left_middle, 60)
lr = is_leg_close(self.last_motor_position.left_rear, 60)
rf = is_leg_close(self.last_motor_position.right_front, 60)
rm = is_leg_close(self.last_motor_position.right_middle, 60) or is_leg_close(self.last_motor_position.right_middle, 240)
rr = is_leg_close(self.last_motor_position.right_rear, 240)
return lf and lm and lr and rf and rm and rr
def unfold(self):
self.position_femur_tibia()
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = False
lr = False
rf = False
rr = False
if self.last_motor_position.left_middle.coxa > 120:
lf = move_leg(self.last_motor_position.left_front, 150)
lm = move_leg(self.last_motor_position.left_middle, 150)
if self.last_motor_position.left_middle.coxa < 180:
lr = move_leg(self.last_motor_position.left_rear, 150)
if self.last_motor_position.right_middle.coxa < 180:
rf = move_leg(self.last_motor_position.right_front, 150)
rm = move_leg(self.last_motor_position.right_middle, 150)
if self.last_motor_position.right_middle.coxa > 120:
rr = move_leg(self.last_motor_position.right_rear, 150)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, tibia=210)
lm = move_leg(self.last_motor_position.left_middle, tibia=210)
lr = move_leg(self.last_motor_position.left_rear, tibia=210)
rf = move_leg(self.last_motor_position.right_front, tibia=90)
rm = move_leg(self.last_motor_position.right_middle, tibia=90)
rr = move_leg(self.last_motor_position.right_rear, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
def fold(self):
self.position_femur_tibia()
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
if not self.check_if_folded():
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, 150)
rm = move_leg(self.last_motor_position.right_middle, 150)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, 240)
lr = move_leg(self.last_motor_position.left_rear, 60)
rf = move_leg(self.last_motor_position.right_front, 60)
rr = move_leg(self.last_motor_position.right_rear, 240)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr and rf and rr:
break
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, 240)
rm = move_leg(self.last_motor_position.right_middle, 60)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
def unfold_on_ground(self):
self.position_femur_tibia()
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
# lift middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, tibia=200)
rm = move_leg(self.last_motor_position.right_middle, tibia=100)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
# fold out middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, coxa=150)
rm = move_leg(self.last_motor_position.right_middle, coxa=150)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
# lower right leg
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rm = move_leg(self.last_motor_position.right_middle, femur=170, tibia=100)
self.body_controller.set_motors(self.last_motor_position)
if rm:
break
# unfold right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, coxa=150)
rr = move_leg(self.last_motor_position.right_rear, coxa=150)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# lift right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, tibia=90)
rr = move_leg(self.last_motor_position.right_rear, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# switch lifted side
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=130, tibia=200)
rm = move_leg(self.last_motor_position.right_middle, femur=240, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if rm and lm:
break
# unfold left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, coxa=150)
lr = move_leg(self.last_motor_position.left_rear, coxa=150)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# lift left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, tibia=210)
lr = move_leg(self.last_motor_position.left_rear, tibia=210)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# lift middle left
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=60, tibia=210)
self.body_controller.set_motors(self.last_motor_position)
if lm:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
def fold_on_ground(self):
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, 150, femur=60, tibia=210)
lm = move_leg(self.last_motor_position.left_middle, 150, femur=60, tibia=210)
lr = move_leg(self.last_motor_position.left_rear, 150, femur=60, tibia=210)
rf = move_leg(self.last_motor_position.right_front, 150, femur=240, tibia=90)
rm = move_leg(self.last_motor_position.right_middle, 150, femur=240, tibia=90)
rr = move_leg(self.last_motor_position.right_rear, 150, femur=240, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
# lower right leg
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rm = move_leg(self.last_motor_position.right_middle, femur=170, tibia=100)
self.body_controller.set_motors(self.last_motor_position)
if rm:
break
# compress right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, None, 240, 60)
rr = move_leg(self.last_motor_position.right_rear, None, 240, 60)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# fold right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, 60)
rr = move_leg(self.last_motor_position.right_rear, 240)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# switch lifted side
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=130, tibia=200)
rm = move_leg(self.last_motor_position.right_middle, femur=240, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if rm and lm:
break
# compress left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, None, 60, 240)
lr = move_leg(self.last_motor_position.left_rear, None, 60, 240)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# fold left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, 240)
lr = move_leg(self.last_motor_position.left_rear, 60)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# lift left middle leg
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=60, tibia=210)
self.body_controller.set_motors(self.last_motor_position)
if lm:
break
# fold middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, 230)
rm = move_leg(self.last_motor_position.right_middle, 70)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
# compress middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, None, 60, 240)
rm = move_leg(self.last_motor_position.right_middle, None, 240, 60)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
| 44.824104
| 128
| 0.637599
| 12,589
| 0.914832
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.024562
|
a1d0867a1669f7b83b98d82fdaa8c25a6b04cd98
| 2,237
|
py
|
Python
|
Teil_57_12_Kugeln.py
|
chrMenzel/A-beautiful-code-in-Python
|
92ee43c1fb03c299384d4de8bebb590c5ba1b623
|
[
"MIT"
] | 50
|
2018-12-23T15:46:16.000Z
|
2022-03-28T15:49:59.000Z
|
Teil_57_12_Kugeln.py
|
chrMenzel/A-beautiful-code-in-Python
|
92ee43c1fb03c299384d4de8bebb590c5ba1b623
|
[
"MIT"
] | 9
|
2018-12-03T10:31:29.000Z
|
2022-01-20T14:41:33.000Z
|
Teil_57_12_Kugeln.py
|
chrMenzel/A-beautiful-code-in-Python
|
92ee43c1fb03c299384d4de8bebb590c5ba1b623
|
[
"MIT"
] | 69
|
2019-02-02T11:59:09.000Z
|
2022-03-28T15:54:28.000Z
|
import random as rnd
from itertools import combinations
from time import perf_counter as pfc
def seite_ermitteln(versuch):
seite = [0]*anz_kugeln
links = set(versuch[:len(versuch)//2])
for nr in versuch:
seite[nr] = -1 if nr in links else 1
return seite
def wiegen(nr, gewicht, seite):
return gewicht * seite[nr]
def statusänderung(wiegung, seite):
for nr, status in kugeln:
if wiegung == 0 and seite[nr] == 0: continue
if (wiegung == 0 and seite[nr] != 0) or (wiegung != 0 and seite[nr] == 0):
kugeln[nr][1] = '='
else:
kugeln[nr][1] = stati[wiegung == seite[nr]].get(status, status)
def kugel2str(liste):
return ' '.join([f'{nr}{kugeln[nr][1]}' for nr in liste])
def prüfung(v1, v2m, v2lr):
text = ''
for nr in range(anz_kugeln):
for k in kugeln: k[1] = '?'
gesucht = (nr, rnd.choice((-1, 1)))
text += f'Gesucht wird {gesucht[0]}{"+" if gesucht[1] == 1 else "-"}\n'
for n in range(2):
v = v1 if n == 0 else v2m if wiegung == 0 else v2lr
seite = seite_ermitteln(v)
wiegung = wiegen(*gesucht, seite)
statusänderung(wiegung, seite)
text += f'{wiegung} {kugel2str(v)}\n'
kandidaten = [k[0] for k in kugeln if k[1] != '=']
if len(kandidaten) > 3: return False, text
text += f'Kandidaten = {kugel2str(kandidaten)}\n\n'
return True, text
def prüfe_varianten(modus):
anz_lösungen = 0
vs = set()
for anz in range(1, anz_kugeln//2+1):
for v2l in combinations(range(anz_kugeln), anz):
for v2r in combinations(range(anz_kugeln), anz):
if set(v2l) & set(v2r): continue
if (v2r,v2l) in vs: continue
vs.add((v2l, v2r))
e, text = prüfung(v1, v2m, v2l+v2r)
if e:
anz_lösungen += 1
if modus > 0: print(f'Lösung Nr. {anz_lösungen} für V2lr {v2l} <-> {v2r}')
if modus > 1: print(text+'\n\n')
if modus > 2: return
print(f'Anzahl Lösungen für V2lr: {anz_lösungen}')
start = pfc()
stati = {True: {'?': '+', '-': '='},
False: {'?': '-', '+': '='}}
anz_kugeln = 12
kugeln = [[nr, '?'] for nr in range(anz_kugeln)]
v1 = [0, 1, 2, 3, 4, 5, 6, 7]
v2m = [8, 9, 10, 0, 1, 2]
prüfe_varianten(0)
print(f'{pfc()-start:.2f} Sek.')
| 28.679487
| 84
| 0.582924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.147046
|