max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
teacher/explanation/__init__.py | Kaysera/fuzzy-lore | 3 | 12768451 | <filename>teacher/explanation/__init__.py<gh_stars>1-10
from ._factual import FID3_factual, m_factual, mr_factual, c_factual
from ._counterfactual import FID3_counterfactual, i_counterfactual, f_counterfactual
from .FID3_explainer import FID3Explainer
from .FDT_explainer import FDTExplainer
__all__ = [
"FID3_factual",
"m_factual",
"mr_factual",
"c_factual",
"FID3_counterfactual",
"i_counterfactual",
"f_counterfactual",
"FID3Explainer",
"FDTExplainer"
]
| 1.539063 | 2 |
Python-desenvolvimento/ex031.py | MarcosMaciel-MMRS/Desenvolvimento-python | 0 | 12768452 | <filename>Python-desenvolvimento/ex031.py
#custo de uma viagem. ate 200km( R$0,50), mais q isso (R$0,45)
distancia = float(input('Informe a distância: '))
print('Por essa distância {}km: '.format(distancia))
if distancia <= 200:
preco = distancia * 0.50
print('O valor da passagem será de R${:.2f}.'.format(preco))
else:
preco = distancia * 0.45
print('O valor da passagem será de R${:.2f}.'.format(preco))
| 4.15625 | 4 |
src/openpersonen/api/tests/test_data/kind.py | maykinmedia/open-personen | 2 | 12768453 | <reponame>maykinmedia/open-personen
KIND_RETRIEVE_DATA = {
"_embedded": {
"naam": {
"_embedded": {
"inOnderzoek": {
"_embedded": {
"datumIngangOnderzoek": {
"dag": None,
"datum": None,
"jaar": None,
"maand": None,
}
},
"geslachtsnaam": False,
"voornamen": False,
"voorvoegsel": False,
}
},
"geslachtsnaam": "<NAME>",
"voorletters": "K",
"voornamen": "Media Kind",
"voorvoegsel": "van",
},
"geboorte": {
"_embedded": {
"datum": {"dag": 15, "datum": "1999-06-15", "jaar": 1999, "maand": 6},
"land": {"code": "6030", "omschrijving": "Nederland"},
"plaats": {"code": "624", "omschrijving": "Amsterdam"},
"inOnderzoek": {
"_embedded": {
"datumIngangOnderzoek": {
"dag": None,
"datum": None,
"jaar": None,
"maand": None,
}
},
"datum": False,
"land": False,
"plaats": False,
},
}
},
"inOnderzoek": {
"_embedded": {
"datumIngangOnderzoek": {
"dag": None,
"datum": None,
"jaar": None,
"maand": None,
}
},
"burgerservicenummer": False,
},
},
"burgerservicenummer": "456789123",
"geheimhoudingPersoonsgegevens": True,
"leeftijd": 21,
}
KIND_RETRIEVE_DATA_NO_DATES = {
"_embedded": {
"naam": {
"_embedded": {
"inOnderzoek": {
"_embedded": {
"datumIngangOnderzoek": {
"dag": None,
"datum": None,
"jaar": None,
"maand": None,
}
},
"geslachtsnaam": False,
"voornamen": False,
"voorvoegsel": False,
}
},
"geslachtsnaam": "<NAME>",
"voorletters": "K",
"voornamen": "<NAME>",
"voorvoegsel": "van",
},
"geboorte": {
"_embedded": {
"datum": {"dag": None, "datum": None, "jaar": None, "maand": None},
"land": {"code": "6030", "omschrijving": "Nederland"},
"plaats": {"code": "624", "omschrijving": "Amsterdam"},
"inOnderzoek": {
"_embedded": {
"datumIngangOnderzoek": {
"dag": None,
"datum": None,
"jaar": None,
"maand": None,
}
},
"datum": False,
"land": False,
"plaats": False,
},
}
},
"inOnderzoek": {
"_embedded": {
"datumIngangOnderzoek": {
"dag": None,
"datum": None,
"jaar": None,
"maand": None,
}
},
"burgerservicenummer": False,
},
},
"burgerservicenummer": "456789123",
"geheimhoudingPersoonsgegevens": True,
"leeftijd": 0,
}
| 1.46875 | 1 |
spyce/test/support.py | markrwilliams/capysicum | 7 | 12768454 | <filename>spyce/test/support.py<gh_stars>1-10
import contextlib
import tempfile
import unittest
import socket
import errno
import sys
import os
class ErrnoMixin(unittest.TestCase):
@contextlib.contextmanager
def assertRaisesWithErrno(self, exc, errno):
with self.assertRaises(exc) as caught_ce:
yield
self.assertTrue(caught_ce.exception.errno, errno)
class TemporaryFDMixin(unittest.TestCase):
def setUp(self):
cn = self.__class__.__name__
kwargs = {}
if sys.version_info.major > 2:
# Python 3 will use a buffered io object that *always*
# gets flushed on close, even if the buffer is empty.
# Disable buffering to avoid ENOTCAPABLE on self.f.close()
# TODO: write tests to exercise "normal" (i.e., unicode
# and buffered) Python 3 files.
kwargs['buffering'] = 0
self.f = tempfile.TemporaryFile('wb+',
prefix="spyce_test_{}_tmp".format(cn),
**kwargs)
self.pipeReadFD, self.pipeWriteFD = self.pipeFDs = os.pipe()
self.socketSideA, self.socketSideB = self.sockets = socket.socketpair()
def tearDown(self):
self.f.close()
for s in self.sockets:
s.close()
for fd in self.pipeFDs:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
| 2.203125 | 2 |
auxCallsHandler.py | storage4grid/s4g-electric-vehicle-analytics | 0 | 12768455 | <reponame>storage4grid/s4g-electric-vehicle-analytics
import os, json, time
| 0.902344 | 1 |
phase4.py | NimaVahdat/SNN-Framework- | 0 | 12768456 | import torch
from cnsproject.network.neural_populations import LIFPopulation, ELIFPopulation, AELIFPopulation
from cnsproject.network.monitors import Monitor
from cnsproject.network.connections import DenseConnection, RandomConnection
from cnsproject.plotting.plotting import plot_current, raster, population_activity
from typing import Tuple, Callable, Iterable, Union
class phase4():
def __init__(
self,
N: int,
time: int = 100,
dt: Union[float, torch.Tensor]= 1,
** kwargs
) -> None:
self.shape_exc = (int(N*0.8/2),)
self.shape_inh = (int(N*0.2),)
self.time = time
self.dt = dt
def current_maker(self, mean, std, threshold=None, jump=60) -> Callable[[int], int]:
torch.manual_seed(16)
if threshold != None:
a = torch.empty(threshold + 1).normal_(mean=mean+jump, std=std)
b = torch.empty(self.time - threshold).normal_(mean=mean, std=std)
x = torch.cat((b,a))
else:
x = torch.empty(self.time + 1).normal_(mean=mean, std=std)
y = torch.Tensor([0])
x = torch.cat((y,x))
def current_rand(t: int):
return x[int(t)]
return current_rand
def pop_maker(
self,
shape: Iterable[int],
model = "LIF",
is_inhibitory: bool = False,
v_rest: Union[float, torch.Tensor] = -70.,
threshold: Union[float, torch.Tensor] = -50.,
tau: Union[float, torch.Tensor] = 15,
dt: Union[float, torch.Tensor]= 1,
R: Union[float, torch.Tensor] = 1.,
delta_t: int = 1.,
theta_rh: float = -55.,
tau_w: Union[float, torch.Tensor] = 5,
w: Union[float, torch.Tensor] = 2,
a: Union[float, torch.Tensor] = 5,
b: Union[float, torch.Tensor] = 2):
if model == "LIF":
neuron = LIFPopulation(shape=shape,
is_inhibitory=is_inhibitory,
v_rest=v_rest,
threshold=threshold,
tau=tau,
dt=dt,
R=R)
elif model == "ELIF":
neuron = LIFPopulation(shape=shape,
is_inhibitory=is_inhibitory,
v_rest=v_rest,
threshold=threshold,
tau=tau,
dt=dt,
R=R,
theta_rh=theta_rh,
delta_t=delta_t)
elif model == "AELIF":
neuron = AELIFPopulation(shape=shape,
is_inhibitory=is_inhibitory,
v_rest=v_rest,
threshold=threshold,
tau=tau,
dt=dt,
R=R,
theta_rh=theta_rh,
delta_t=delta_t,
tau_w=tau_w,
w=w,
a=a,
b=b)
return neuron
def Simulation(self, pop_inh, pop_exc1, pop_exc2, mean: list, std: list):
current0 = self.current_maker(mean[0], std[0])
current1 = self.current_maker(mean[1], std[1], threshold=50)
current2 = self.current_maker(mean[2], std[2])
monitor_exc1 = Monitor(pop_exc1, state_variables=["s", "v"])
monitor_exc1.set_time_steps(self.time, self.dt)
monitor_exc1.reset_state_variables()
monitor_exc2 = Monitor(pop_exc2, state_variables=["s", "v"])
monitor_exc2.set_time_steps(self.time, self.dt)
monitor_exc2.reset_state_variables()
monitor_inh = Monitor(pop_inh, state_variables=["s", "v"])
monitor_inh.set_time_steps(self.time, self.dt)
monitor_inh.reset_state_variables()
connect_exc1_to_inh = DenseConnection(pop_exc1, pop_inh, C=25,control=1)
connect_inh_to_exc1 = DenseConnection(pop_inh, pop_exc1, C=13,control=1)
connect_exc2_to_inh = DenseConnection(pop_exc2, pop_inh, C=25,control=1)
connect_inh_to_exc2 = DenseConnection(pop_inh, pop_exc2, C=13,control=1)
connect_exc1_to_exc2 = DenseConnection(pop_exc1, pop_exc2, C=5,control=1)
connect_exc2_to_exc1 = DenseConnection(pop_exc2, pop_exc1, C=5,control=1)
connect_inside_exc1 = RandomConnection(pop_exc1, pop_exc1, insode=True, C=10,control=1)
connect_inside_exc2 = RandomConnection(pop_exc2, pop_exc2, inside=True, C=10,control=1)
connect_inside_inh = RandomConnection(pop_inh, pop_inh, inside=True, C=10,control=1)
for t in range(self.time):
input_I_inh = torch.Tensor([current0(t=t)] * pop_inh.shape[0])
input_I_exc1 = torch.Tensor([current1(t=t)] * pop_exc1.shape[0])
input_I_exc2 = torch.Tensor([current2(t=t)] * pop_exc2.shape[0])
noise_inh = torch.empty(input_I_inh.size(0)).normal_(0, 5)
noise_exc1 = torch.empty(input_I_exc1.size(0)).normal_(0, 5)
noise_exc2 = torch.empty(input_I_exc2.size(0)).normal_(0, 5)
traces_inh = connect_exc1_to_inh.compute(pop_exc1.s) +\
connect_exc2_to_inh.compute(pop_exc2.s) + \
connect_inside_inh.compute(pop_inh.s)
traces_exc1 = connect_inh_to_exc1.compute(pop_inh.s) +\
connect_exc2_to_exc1.compute(pop_exc2.s) + \
connect_inside_exc1.compute(pop_exc1.s)
traces_exc2 = connect_inh_to_exc2.compute(pop_inh.s) +\
connect_exc1_to_exc2.compute(pop_exc1.s) + \
connect_inside_exc2.compute(pop_exc2.s)
pop_inh.forward(I=input_I_inh-noise_inh, traces=traces_inh)
pop_exc1.forward(I=input_I_exc1-noise_exc1, traces=traces_exc1)
pop_exc2.forward(I=input_I_exc2-noise_exc2, traces=traces_exc2)
monitor_inh.record()
monitor_exc1.record()
monitor_exc2.record()
s_inh = monitor_inh.get("s")
s_exc1 = monitor_exc1.get("s")
s_exc2 = monitor_exc2.get("s")
population_activity(s_inh, "of inh")
population_activity(s_exc1, "of exc1")
population_activity(s_exc2, "of exc2")
plot_current(current=current0, time=(0, self.time), dt=self.dt, label="inh input current")
plot_current(current=current1, time=(0, self.time), dt=self.dt, label="exc1 input current")
plot_current(current=current2, time=(0, self.time), dt=self.dt, label="exc2 input current")
raster(s_exc1, label="exc1")
raster(s_exc2, label="exc2")
raster(s_inh=s_inh, label="inh")
if __name__ == "__main__":
p = phase4(100)
pop_exc1=p.pop_maker(p.shape_exc, tau=20)
pop_exc2=p.pop_maker(p.shape_exc, tau=20)
pop_inh=p.pop_maker(p.shape_inh, tau=10, R=1, is_inhibitory=True)
p.Simulation(pop_inh, pop_exc1, pop_exc2, [20, 45, 45], [5, 5, 5]) | 2.296875 | 2 |
nere/ner_models/bilstm_att.py | WangShengguang/NERE | 0 | 12768457 | <filename>nere/ner_models/bilstm_att.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiLSTM_ATT(nn.Module):
def __init__(self, vocab_size, num_ent_tags, ent_emb_dim, batch_size, sequence_len):
super(BiLSTM_ATT, self).__init__()
self.batch_size = batch_size
self.vocab_size = vocab_size
self.embedding_dim = ent_emb_dim
self.hidden_dim = 256
self.num_ent_tags = num_ent_tags
self.word_embeds = nn.Embedding(self.vocab_size, self.embedding_dim)
self.ent_label_embeddings = nn.Embedding(num_ent_tags, ent_emb_dim)
self.lstm = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.hidden_dim // 2,
num_layers=2, bidirectional=True)
self.dropout_emb = nn.Dropout(p=0.5)
self.dropout_lstm = nn.Dropout(p=0.5)
self.dropout_att = nn.Dropout(p=0.5)
self.dropout = nn.Dropout(0.5)
self.att_weight = nn.Parameter(torch.randn(self.batch_size, sequence_len, self.hidden_dim))
# todo
# https://discuss.pytorch.org/t/define-the-number-of-in-feature-in-nn-linear-dynamically/31185/2
# sequence_feature_len = 100
# self.adaptive_max_pool1d = nn.AdaptiveMaxPool1d(sequence_feature_len)
self.classifier = nn.Linear(self.hidden_dim, self.num_ent_tags)
self.criterion_loss = nn.CrossEntropyLoss()
def attention(self, H):
"""
:param H: batch_size, hidden_dim, sequence_len
:return: batch_size, hidden_dim, sequence_len
"""
M = F.tanh(H)
a = F.softmax(torch.bmm(self.att_weight, M), 2) # batch_size,sequence_len,sequence_len
return torch.bmm(H, a)
def get_ent_features(self, seq_output, ent_masks):
"""
Args:
seq_output: (batch_size, seq_length, hidden_size)
ent_mentions: (batch_size, 2, ent_label_dim), 2: 2 mentions, ent_label_dim: `start` and `end` indices
"""
# shape: (batch_size, seq_length, hidden_size)
# ent_masks_expand = ent_masks.unsqueeze(-1).expand_as(seq_output).half()
ent_masks_expand = ent_masks.unsqueeze(-1).expand_as(seq_output).float()
# shape: (batch_size, 1)
# ent_masks_sum = ent_masks.sum(dim=1).unsqueeze(1).half()
ent_masks_sum = ent_masks.sum(dim=1).unsqueeze(1).float()
ones = torch.ones_like(ent_masks_sum)
ent_masks_sum = torch.where(ent_masks_sum > 0, ent_masks_sum, ones)
# shape: (batch_size, hidden_size)
ent_features = seq_output.mul(ent_masks_expand).sum(dim=1).div(ent_masks_sum)
return ent_features
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
embeds = self.word_embeds(input_ids)
# embeds = torch.transpose(embeds, 0, 1)
lstm_out, (h_n, h_c) = self.lstm(embeds) # seq_len, batch, num_directions * hidden_size
# lstm_out = lstm_out.permute(1, 2, 0)
lstm_out = self.dropout_lstm(lstm_out) # batch_size, hidden_dim, sequence_len
att_out = F.tanh(self.attention(lstm_out))
att_out = torch.transpose(att_out, 1, 2)
all_features = self.dropout_att(att_out)
logits = self.classifier(all_features)
label_indices = logits.argmax(dim=1)
if labels is None:
return label_indices
else:
loss = self.criterion_loss(logits.view(-1, self.num_ent_tags), labels.view(-1))
return label_indices, loss
| 2.5 | 2 |
bank/src/bank_account_app/views.py | yuramorozov01/bank_system | 0 | 12768458 | from bank_account_app.models import BankAccount
from bank_account_app.permissions import (CanChangeBankAccount,
CanViewBankAccount)
from bank_account_app.serializers import (BankAccountDetailsSerializer,
BankAccountShortDetailsSerializer)
from django.db import transaction
from django.db.models import F
from rest_framework import permissions, validators, viewsets
from rest_framework.decorators import action
class BankAccountViewSet(viewsets.ReadOnlyModelViewSet):
'''
retrieve:
Get the specified bank account.
list:
Get a list of all bank accounts.
'''
def get_queryset(self):
querysets_dict = {
'retrieve': BankAccount.objects.all(),
'list': BankAccount.objects.all(),
'top_up': BankAccount.objects.all(),
}
queryset = querysets_dict.get(self.action)
return queryset.distinct()
def get_serializer_class(self):
serializers_dict = {
'retrieve': BankAccountDetailsSerializer,
'list': BankAccountShortDetailsSerializer,
'top_up': BankAccountDetailsSerializer,
}
serializer_class = serializers_dict.get(self.action)
return serializer_class
def get_permissions(self):
base_permissions = [permissions.IsAuthenticated, CanViewBankAccount]
permissions_dict = {
'retrieve': [],
'list': [],
'top_up': [CanChangeBankAccount],
}
base_permissions += permissions_dict.get(self.action, [])
return [permission() for permission in base_permissions]
@action(methods=['PUT', 'PATCH'], detail=True)
def top_up(self, request, pk=None):
# Top up balance to bank account
try:
bank_account = BankAccount.objects.get(pk=pk)
amount = self.request.POST.get('amount')
amount = self.validate_top_up_amount(amount)
with transaction.atomic():
bank_account.balance = F('balance') + amount
bank_account.save()
return self.retrieve(request, pk=pk)
except BankAccount.DoesNotExist:
raise validators.ValidationError({
'bank_account': 'Bank account with specified id doesn\'t exists!',
})
def validate_top_up_amount(self, value):
if value is None:
raise validators.ValidationError({
'amount': 'This field is required!'
})
float_value = 0
try:
float_value = float(value)
except ValueError:
raise validators.ValidationError({
'amount': 'Incorrect value!'
})
if float_value < 0:
raise validators.ValidationError({
'amount': 'This field is has to be positive!'
})
return float_value
| 1.960938 | 2 |
jtracker/__init__.py | jthub/jt-cli | 2 | 12768459 | __version__ = '0.2.0a33'
| 1.054688 | 1 |
poker/hand.py | ensley/poker | 0 | 12768460 | <reponame>ensley/poker<filename>poker/hand.py<gh_stars>0
import collections
from poker import card
class Hand(collections.MutableSequence):
"""docstring for Hand"""
def __init__(self, cards):
cards = cards.split()
cards = [card.Card(c) for c in cards]
if len(cards) != 5:
raise ValueError('A hand must have exactly 5 cards')
hand_tuples = zip([c.rank for c in cards],
[c.suit for c in cards])
if len(set(hand_tuples)) != 5:
raise ValueError('A hand cannot contain duplicate cards')
self._cards = cards
self._rank_counts = self.count_ranks()
self._score = self.calculate_score()
def calculate_score(self):
score = 0
hand_category_rank = self.get_category_rank()
tiebreakers = self.get_tiebreakers()
if hand_category_rank == 'straight flush':
r = 8
elif hand_category_rank == 'four of a kind':
r = 7
elif hand_category_rank == 'full house':
r = 6
elif hand_category_rank == 'flush':
r = 5
elif hand_category_rank == 'straight':
r = 4
elif hand_category_rank == 'three of a kind':
r = 3
elif hand_category_rank == 'two pair':
r = 2
elif hand_category_rank == 'one pair':
r = 1
elif hand_category_rank == 'high card':
r = 0
score += r * 15**6
for idx, tie in zip(range(5, -1, -1), tiebreakers):
score += tie * 15**idx
return score
def get_category_rank(self):
if self.is_straight_flush():
return 'straight flush'
elif self.is_four_kind():
return 'four of a kind'
elif self.is_full_house():
return 'full house'
elif self.is_flush():
return 'flush'
elif self.is_straight():
return 'straight'
elif self.is_three_kind():
return 'three of a kind'
elif self.is_two_pair():
return 'two pair'
elif self.is_one_pair():
return 'one pair'
elif self.is_high():
return 'high card'
else:
raise ValueError('Couldn\'t score hand')
def get_tiebreakers(self):
return [t[0] for t in self._rank_counts]
def count_ranks(self):
rank_counts = collections.Counter([c.rank for c in self._cards]).most_common()
rank_counts.sort(key=lambda tup: (-tup[1], -tup[0]))
return rank_counts
def is_straight_flush(self):
return self.is_straight() and self.is_flush()
def is_four_kind(self):
return self._rank_counts[0][1] == 4
def is_full_house(self):
if len(self._rank_counts) != 2:
return False
return self._rank_counts[0][1] == 3 and self._rank_counts[1][1] == 2
def is_straight(self):
return self.is_high_straight() or self.is_low_straight()
def is_high_straight(self):
if len(self._rank_counts) != 5:
return False
ranks = [c.rank for c in self._cards]
return max(ranks) - min(ranks) == 4
def is_low_straight(self):
if len(self._rank_counts) != 5:
return False
ranks = [c.rank for c in self._cards]
if max(ranks) != 14:
return False
ranks[ranks.index(14)] = 1
return max(ranks) - min(ranks) == 4
def is_flush(self):
suits = [c.suit for c in self._cards]
return len(set(suits)) == 1
def is_three_kind(self):
if len(self._rank_counts) != 3:
return False
return self._rank_counts[0][1] == 3
def is_two_pair(self):
if len(self._rank_counts) != 3:
return False
return self._rank_counts[0][1] == 2 and self._rank_counts[1][1] == 2
def is_one_pair(self):
if len(self._rank_counts) != 4:
return False
return self._rank_counts[0][1] == 2
def is_high(self):
return len(self._rank_counts) == 5 and not self.is_straight()
@property
def score(self):
return self._score
def __getitem__(self, item):
return self._cards[item]
def __setitem__(self, item, value):
self._cards[item] = value
self._rank_counts = self.count_ranks()
def __delitem__(self, item):
del self._cards[item]
self._rank_counts = self.count_ranks()
def __len__(self):
return len(self._cards)
def insert(self, index, value):
if len(self) == 5:
raise IndexError('A hand cannot contain more than 5 cards')
self._cards.insert(index, value)
self._rank_counts = self.count_ranks()
def sort(self, key=None, reverse=None):
self._cards.sort(key=key, reverse=reverse)
def __lt__(a, b):
return a._score < b._score
def __le__(a, b):
return a._score <= b._score
def __eq__(a, b):
return a._score == b._score
def __ne__(a, b):
return a._score != b._score
def __ge__(a, b):
return a._score >= b._score
def __gt__(a, b):
return a._score > b._score
def __repr__(self):
return 'Hand({0!r},{1!r},{2!r},{3!r},{4!r})'.format(*[str(c) for c in self._cards])
def __str__(self):
return ' '.join([str(c) for c in self._cards]) | 3.265625 | 3 |
package/zimagi/exceptions.py | zimagi/zima | 0 | 12768461 | <filename>package/zimagi/exceptions.py<gh_stars>0
import sys
import traceback
def format_exception_info():
exc_type, exc_value, exc_tb = sys.exc_info()
return traceback.format_exception(exc_type, exc_value, exc_tb)
class ParameterError(Exception):
pass
class CodecError(Exception):
pass
class CommandError(Exception):
pass
class CommandClientError(CommandError):
pass
class CommandParseError(CommandError):
pass
class CommandConnectionError(CommandError):
pass
class CommandResponseError(CommandError):
pass
class DataError(Exception):
pass
class DataParseError(DataError):
pass
| 2.3125 | 2 |
delphin/mrs/path.py | matichorvat/pydelphin | 9 | 12768462 | """
Classes and functions for path exploration on semantic graphs.
"""
#import pdb
import re
import warnings
from collections import deque, defaultdict
from itertools import product
from .components import (Pred, links, var_sort)
from .util import powerset
from .config import IVARG_ROLE
from delphin.exceptions import XmrsError
# for rebuilding Xmrs from paths
from delphin.mrs import Node, Link, Pred, Dmrs
TOP = 'TOP'
STAR = '*'
# flags
NODEID = NID = 1 # pred#NID... or #NID...
PRED = P = 2 # pred or "pred" or 'pred
VARSORT = VS = 4 # pred[e], pred[x], etc.
VARPROPS = VP = 8 # pred[@PROP=val]
OUTAXES = OUT = 16 # pred:ARG1/NEQ>
INAXES = IN = 32 # pred<ARG1/EQ:
UNDIRECTEDAXES = UND = 64 # pred:/EQ:
SUBPATHS = SP = 128 # pred:ARG1/NEQ>pred2
CARG = C = 256 # pred:CARG>"value"
BALANCED = B = 512
CONTEXT = VS | VP | SP
ALLAXES = OUT | IN | UND
DEFAULT = P | VS | VP | OUT | IN | SP
ALL = NID | P | VS | VP | OUT | IN | UND | SP
class XmrsPathError(XmrsError): pass
# GRAPH WALKING ########################################################
def axis_sort(axis):
return (
not axis[-1] == '>', # forward links first
not axis[0] == '<', # then backward, then undirected
not (len(axis) >= 5 and axis[1:4] == 'LBL'), # LBL before other args
(len(axis) >= 6 and axis[1:5] == 'BODY'), # BODY last
axis[1:] # otherwise alphabtical
)
def step_sort(step):
nodeid, axis = step
return tuple(
list(axis_sort(axis)) + [nodeid]
)
def walk(xmrs, start=0, method='headed', sort_key=step_sort):
if method not in ('top-down', 'bottom-up', 'headed'):
raise XmrsPathError("Invalid path-finding method: {}".format(method))
if not (start == 0 or xmrs.pred(start)):
raise XmrsPathError('Start nodeid not in Xmrs graph.')
linkdict = _build_linkdict(xmrs)
for step in _walk(start, linkdict, set(), method, sort_key):
yield step
def _walk(nodeid, linkdict, visited, method, sort_key):
if nodeid in visited:
return
visited.add(nodeid)
local_links = linkdict.get(nodeid, [])
steps = sorted(
filter(_axis_filter(method), local_links),
key=sort_key
)
for tgtnid, axis in steps:
# if this undirected link was already traversed in the other
# direction, just yield this step but don't recurse
if axis == ':/EQ:' and tgtnid in visited:
#yield (nodeid, tgtnid, axis)
continue
yield (nodeid, tgtnid, axis)
for step in _walk(tgtnid, linkdict, visited, method, sort_key):
yield step
def _build_linkdict(xmrs):
ld = defaultdict(list)
for link in links(xmrs):
axis = '{}/{}'.format(link.rargname or '', link.post)
if link_is_directed(link):
ld[link.start].append((link.end, ':{}>'.format(axis)))
ld[link.end].append((link.start, '<{}:'.format(axis)))
else:
# pretend they are directed
#ld[link.end]['<{}:'.format(axis)] = link.start
ld[link.start].append((link.end, ':{}:'.format(axis)))
ld[link.end].append((link.start, ':{}:'.format(axis)))
return ld
def _axis_filter(method):
# top-down: :X/Y> or :X/Y: (the latter only if added)
def axis_filter(step):
nid, axis = step
if method == 'headed' and headed(axis) or \
method == 'top-down' and axis.startswith(':') or \
method == 'bottom-up' and axis.endswith(':'):
return True
return False
return axis_filter
def link_is_directed(link):
return bool(link.rargname) or link.post != 'EQ'
def headed(axis):
# quantifiers and X/EQ links are not the heads of their subgraphs
if axis == '<RSTR/H:' or axis.endswith('/EQ:'):
return True
if (axis == ':RSTR/H>' or
axis.endswith('/EQ>') or
axis.startswith('<')):
return False
return True
# CLASSES ##############################################################
class XmrsPathNode(object):
__slots__ = ('nodeid', 'pred', 'context', 'links', '_overlapping_links',
'_depth', '_order')
def __init__(self, nodeid, pred, context=None, links=None):
self.nodeid = nodeid
self.pred = pred
self.context = dict(context or [])
self.links = dict(links or [])
self._overlapping_links = {} # {overlapping_axis: orig_axis, ...}
self._depth = (
max([-1] +
[x._depth for x in self.links.values() if x is not None]) +
1
)
self._order = (
sum(x._order for x in self.links.values() if x is not None) +
1
)
def __getitem__(self, key):
return self.links[key]
def __iter__(self):
return iter(self.links.items())
def __len__(self):
return self._depth
def update(self, other):
self.nodeid = other.nodeid or self.nodeid
self.pred = other.pred or self.pred
self.context.update(other.context or [])
for axis, tgt in other.links.items():
if not self.links.get(axis):
self.links[axis] = tgt
else:
self[axis].update(tgt)
def depth(self):
return self._depth
def order(self):
return self._order
# def extend(self, extents):
# for axes, extent in extents:
# # the final axis may be new information
# tgt = self.follow(axes[:-1])
# if axes:
# subtgt = tgt.links.get(axes[-1])
# if subtgt is None:
# tgt.links[axes[-1]] = extent
# continue
# else:
# tgt = subtgt
# tgt.update(extent)
# class XmrsPath(XmrsPathNode):
# def __init__(self, nodeid, pred, context=None, links=None):
# XmrsPathNode.__init__(self, nodeid, pred, context, links)
# self.calculate_metrics()
# @classmethod
# def from_node(cls, node):
# return cls(node.nodeid, node.pred, node.context, node.links)
# def calculate_metrics(self):
# self._distance = {}
# self._depth = {}
# self._preds = {}
# self._calculate_metrics(self, 0, 0)
# def _calculate_metrics(self, curnode, depth, distance):
# if curnode is None:
# return
# # add pred index
# try:
# self._preds[curnode.pred].append(curnode)
# except KeyError:
# self._preds[curnode.pred] = []
# self._preds[curnode.pred].append(curnode)
# _id = id(curnode)
# # we may re-update if we're on a shorter path
# updated = False
# if _id not in self._distance or distance < self._distance[_id]:
# self._distance[_id] = distance
# updated = True
# if _id not in self._depth or abs(depth) < abs(self._depth[_id]):
# self._depth[_id] = depth
# updated = True
# if not updated:
# return
# for link in curnode.links:
# if link.endswith('>'):
# self._calculate_metrics(curnode[link], depth+1, distance+1)
# elif link.startswith('<'):
# self._calculate_metrics(curnode[link], depth-1, distance+1)
# else:
# self._calculate_metrics(curnode[link], depth, distance+1)
# def distance(self, node=None):
# if node is None:
# return max(self._distance.values())
# else:
# return self._distance[id(node)]
# def depth(self, node=None, direction=max):
# if node is None:
# return direction(self._depth.values())
# return self._depth[id(node)]
# def select(self, pred):
# return self._preds.get(pred, [])
# # def extend(self, extents, base_axes=None):
# # if base_axes is None:
# # base_axes = []
# # base = self.follow(base_axes)
# # base.extend(extents)
# # self.calculate_metrics()
# HELPER FUNCTIONS ##########################################################
def get_nodeids(node):
yield node.nodeid
for link, path_node in node:
if path_node is None:
continue
for nid in get_nodeids(path_node):
yield nid
def get_preds(node):
yield node.pred
for link, path_node in node:
if path_node is None:
continue
for pred in get_preds(path_node):
yield pred
def copy(node, depth=-1, flags=ALL):
nodeid = node.nodeid if (flags & NODEID) else None
pred = node.pred if (flags & PRED) else None
context = dict(
(k, v) for k, v in node.context.items()
if (k == 'varsort' and (flags & VARSORT)) or
(k.startswith('@') and (flags & VARPROPS)) or
(k[0] in (':', '<') and (flags & SUBPATHS))
)
links = {}
if depth != 0:
for axis, tgt in node.links.items():
if tgt is None:
if _valid_axis(axis, flags):
links[axis] = None
elif (flags & SUBPATHS):
links[axis] = copy(tgt, depth-1, flags=flags)
n = XmrsPathNode(nodeid, pred, context=context, links=links)
return n
def _valid_axis(axis, flags):
return (
(axis.endswith('>') and (flags & OUTAXES)) or
(axis.startswith('<') and (flags & INAXES)) or
(axis.endswith(':') and (flags & UNDIRECTEDAXES))
)
def follow(obj, axes):
axes = list(reversed(axes))
while axes:
obj = obj[axes.pop()]
return obj
def merge(base, obj, location=None):
"""
merge is like XmrsPathNode.update() except it raises errors on
unequal non-None values.
"""
# pump object to it's location with dummy nodes
while location:
axis = location.pop()
obj = XmrsPathNode(None, None, links={axis: obj})
if base is None:
return obj
_merge(base, obj)
# if isinstance(base, XmrsPath):
# base.calculate_metrics()
return base
def _merge(basenode, objnode):
if basenode is None or objnode is None:
return basenode or objnode
basenode.nodeid = _merge_atom(basenode.nodeid, objnode.nodeid)
basenode.pred = _merge_atom(basenode.pred, objnode.pred)
baseside = basenode.context
for k, v in objnode.context.items():
if k[0] in (':', '<'): # subpath context; need to recurse
baseside[k] = _merge(baseside.get(k), v)
else:
baseside[k] = _merge_atom(baseside.get(k), v)
baseside = basenode.links
for axis, tgt in objnode.links.items():
baseside[axis] = _merge(baseside.get(axis), tgt)
return basenode
def _merge_atom(obj1, obj2):
if obj1 is None or obj1 == STAR:
return obj2 or obj1 # or obj1 in case obj2 is None and obj1 == STAR
elif obj2 is None or obj2 == STAR:
return obj1 or obj2 # or obj2 in case obj1 is None and obj2 == STAR
elif obj1 == obj2:
return obj1
else:
raise XmrsPathError(
'Cannot merge MrsPath atoms: {} and {}'.format(obj1, obj2)
)
# WRITING PATHS #############################################################
def format(node, sort_key=axis_sort, depth=-1, flags=DEFAULT):
if node is None:
return ''
symbol = ''
if (flags & PRED) and node.pred is not None:
symbol = str(node.pred)
nodeid = ''
if (flags & NODEID) and node.nodeid is not None:
nodeid = '#{}'.format(node.nodeid)
if not (symbol or nodeid):
symbol = STAR
context = _format_context(node, sort_key, depth, flags)
subpath = ''
if depth != 0:
subpath = _format_subpath(node, sort_key, depth-1, flags)
return '{}{}{}{}'.format(symbol, nodeid, context, subpath)
def _format_context(node, sort_key, depth, flags):
context = ''
if (flags & CONTEXT) and node.context:
contexts = []
for k in sorted(node.context, key=_context_sort):
v = node.context[k]
if k == 'varsort':
if (flags & VARSORT):
contexts.append(v)
elif k[0] == '@':
if (flags & VARPROPS):
contexts.append('{}={}'.format(k, v))
elif k[0] in (':', '<'):
if v is not None and (flags & SUBPATHS):
v = format(v, sort_key, depth-1, flags)
elif _valid_axis(k, flags):
v = ''
else:
continue
contexts.append('{}{}'.format(k, v))
else:
raise XmrsPathError('Invalid context key: {}'.format(k))
if contexts:
context = '[{}]'.format(' & '.join(contexts))
return context
def _format_subpath(node, sort_key, depth, flags):
links = []
axislist = _prepare_axes(node, sort_key)
for axis, tgt in axislist:
if tgt is not None and (flags & SUBPATHS):
tgt = format(tgt, sort_key, depth, flags)
elif _valid_axis(axis, flags):
tgt = ''
else:
continue
links.append('{}{}'.format(axis, tgt))
if len(links) > 1:
subpath = '({})'.format(' & '.join(links))
else:
subpath = ''.join(links) # possibly just ''
return subpath
def _prepare_axes(node, sort_key):
"""
Sort axes and combine those that point to the same target and go
in the same direction.
"""
links = node.links
o_links = node._overlapping_links
overlap = {ax2 for ax in links for ax2 in o_links.get(ax, [])}
axes = []
for axis in sorted(links.keys(), key=sort_key):
if axis in overlap: continue
tgt = links[axis]
if axis in o_links:
s, e = axis[0], axis[-1]
axis = '%s%s%s' % (
s, '&'.join(a[1:-1] for a in [axis] + o_links[axis]), e
)
axes.append((axis, tgt))
return axes
def _context_sort(k):
return (k != 'varsort', k[0] in (':', '<'), k)
# FINDING PATHS #############################################################
def find_paths(
xmrs,
nodeids=None,
method='top-down',
flags=DEFAULT,
max_distance=-1,
subpath_select=list):
warnings.warn('find_paths() is deprecated; use explore()',
DeprecationWarning)
return explore(xmrs, nodeids, method, flags, max_distance, subpath_select)
def explore(
xmrs,
nodeids=None,
method='top-down',
flags=DEFAULT,
max_distance=-1,
subpath_select=list):
if nodeids is None: nodeids = [0] + xmrs._nodeids # 0 for TOP
stepmap = defaultdict(lambda: defaultdict(set))
for startnid in nodeids:
if startnid in stepmap:
continue # start node already done
for start, end, axis in walk(xmrs, start=startnid, method=method):
stepmap[start][end].add(axis)
# if axis in stepmap.get(start, {}):
# continue # current node already done
# stepmap[start][axis] = end
for nodeid in nodeids:
for node in _explore(
xmrs, stepmap, nodeid, flags,
max_distance, subpath_select, set()):
#yield XmrsPath.from_node(node)
yield node
def _explore(
xmrs,
stepmap,
start,
flags,
max_distance,
subpath_select,
visited):
if start in visited:
return
visited = visited.union([start])
ctext = None
if start == 0:
symbol = TOP
else:
symbol = xmrs.pred(start)
if (flags & CONTEXT):
ctext = {}
# it's not guaranteed that an EP has an intrinsic variable
if IVARG_ROLE in xmrs.args(start):
iv = xmrs.args(start)[IVARG_ROLE]
varsort = var_sort(iv)
ctext['varsort'] = varsort
props = xmrs.properties(iv)
ctext.update([
('@{}'.format(k), v)
for k, v in props.items()
])
steps = stepmap.get(start, {}) # this is {end_nodeid: set(axes), ...}
# remove :/EQ: if necessary and generate mapping for overlapping axes
overlap = {}
for end, axes in steps.items():
if (':/EQ:' in axes and
(not (flags & UNDIRECTEDAXES) or
(end in visited and ':/EQ:' in stepmap[end].get(start, [])))):
axes.difference_update([':/EQ:'])
if len(axes) > 1:
# don't sort if this significantly hurts performance
axes = sorted(axes, key=axis_sort)
s, e = axes[0][0], axes[0][-1] # axis direction characters
overlap[axes[0]] = [
ax for ax in axes[1:] if ax[0] == s and ax[-1] == e
]
# exclude TOP from being its own path node
if start != 0:
n = XmrsPathNode(
start,
symbol,
context=ctext,
links={axis: None for axes in steps.values() for axis in axes}
)
n._overlapping_links = overlap
yield n
# keep tuples of axes instead of mapping each unique axis. This is
# for things like coordination where more than one axis point to the
# same thing, and we don't want to enumerate all possibilities.
subpaths = {}
for tgtnid, axes in steps.items():
if tgtnid == 0:
# assume only one axis going to TOP (can there be more than 1?)
axis = next(iter(axes))
subpaths[(axis,)] = [XmrsPathNode(tgtnid, TOP)]
elif (flags & SUBPATHS) and max_distance != 0:
if not axes: # maybe an :/EQ: was pruned and nothing remained
continue
sps = subpath_select(list(
_explore(xmrs, stepmap, tgtnid, flags,
max_distance-1, subpath_select, visited)
))
if not (flags & BALANCED):
sps.append(None)
subpaths[tuple(axes)] = sps
if subpaths:
# beware of magic below:
# links maps a tuple of axes (usually just one axis, like
# (ARG1/NEQ,)) to a list of subpaths.
# This gets the product of subpaths for all axes, then remaps
# axis tuples to the appropriate subpaths. E.g. if subpaths is
# {(':ARG1/NEQ>',): [def],
# (':ARG2/NEQ>',':ARG3/EQ>'): [ghi, jkl]}
# then alts is
# [{(':ARG1/NEQ>',): def, ('ARG2/NEQ>', ':ARG3/EQ>'): ghi},
# {(':ARG1/NEQ>',): def, ('ARG2/NEQ>', ':ARG3/EQ>'): jkl}]
alts = list(map(
lambda z: dict(zip(subpaths.keys(), z)),
product(*subpaths.values())
))
# now enumerate the tupled axes
for alt in alts:
ld = dict((a, tgt) for axes, tgt in alt.items() for a in axes)
# don't output all null axes (already done above)
if set(ld.values()) != {None}:
n = XmrsPathNode(start, symbol, context=ctext, links=ld)
n._overlapping_links = overlap
yield n
# READING PATHS #############################################################
tokenizer = re.compile(
# two kinds of strings: "double quoted", and 'open-single-quoted
r'(?P<string>"[^"\\]*(?:\\.[^"\\]*)*"|\'[^ \\]*(?:\\.[^ \\]*)*)'
# axes should be like :X/Y>, <X/Y:, :X/Y:, :X/Y&A/B>, etc.
r'|(?P<axis>[<:][^/]*/(?:[HN]?EQ|H)(?:&[^/]*/(?:[HN]?EQ|H))*[:>])'
r'|(?P<symbol>[^\s#:><@=()\[\]&|]+)' # non-breaking characters
r'|(?P<nodeid>#\d+)' # nodeids (e.g. #10003)
r'|(?P<punc>[@=()\[\]&|])' # meaningful punctuation
)
def read_path(path_string):
toks = deque((mo.lastgroup, mo.group())
for mo in tokenizer.finditer(path_string))
try:
node = _read_node(toks)
except IndexError:
raise XmrsPathError('Unexpected termination for path: {}'
.format(path_string))
if node is None:
raise XmrsPathError('Error reading path: {}'
.format(path_string))
elif toks:
raise XmrsPathError('Unconsumed tokens: {}'
.format(', '.join(tok[1] for tok in toks)))
#path = XmrsPath.from_node(startnode)
#return path
return node
def _read_node(tokens):
if not tokens or tokens[0][0] not in {'string', 'symbol', 'nodeid'}:
return None
# A node can be a pred, a nodeid, or both (in that order). This
# means two 'if's, not 'if-else'.
mtype, mtext = tokens.popleft()
pred = nodeid = None
if mtype in ('string', 'symbol'):
if mtext == TOP or mtext == STAR:
pred = mtext
else:
pred = Pred.stringpred(mtext)
if tokens and tokens[0][0] == 'nodeid':
mtype, mtext = tokens.popleft()
if mtype == 'nodeid':
nodeid = int(mtext[1:]) # get rid of the initial # character
context = _read_context(tokens)
links = _read_links(tokens)
return XmrsPathNode(
nodeid,
pred,
context=context,
links=links
)
def _read_context(tokens):
if not tokens or tokens[0] != ('punc', '['):
return None
_, _ = tokens.popleft() # this is the ('punc', '[')
# context can be a varsort, an @attribute, or an axis
context = {}
for token in _read_conjunction(tokens):
mtype, mtext = token
if mtype == 'symbol':
context['varsort'] = mtext
elif token == ('punc', '@'):
_, attr = tokens.popleft()
assert tokens.popleft() == ('punc', '=')
_, val = tokens.popleft()
context['@{}'.format(attr)] = val
elif mtype == 'axis':
tgt = _read_node(tokens)
start, end = mtext[0], mtext[-1]
axes = mtext[1:-1].split('&')
for ax in axes:
ax = '%s%s%s' % (start, ax.strip(), end)
context[ax] = tgt
else:
raise XmrsPathError(
'Invalid conjunct in context: {}'.format(mtext)
)
assert tokens.popleft() == ('punc', ']')
return context
def _read_links(tokens):
if not tokens or (tokens[0][0] != 'axis' and tokens[0][1] != '('):
return None
mtype, mtext = tokens.popleft()
# it could be a single :axis
if mtype == 'axis':
return {mtext: _read_node(tokens)}
# or (:many :axes)
assert mtext == '('
links = {}
for token in _read_conjunction(tokens):
mtype, mtext = token
if mtype == 'axis':
tgt = _read_node(tokens)
start, end = mtext[0], mtext[-1]
axes = mtext[1:-1].split('&')
for ax in axes:
ax = '%s%s%s' % (start, ax.strip(), end)
links[ax] = tgt
else:
raise XmrsPathError('Invalid conjunct in axes: {}'.format(mtext))
assert tokens.popleft() == ('punc', ')')
return links
def _read_conjunction(tokens):
yield tokens.popleft()
while tokens[0] == ('punc', '&'):
tokens.popleft() # the & character
yield tokens.popleft()
# # SEARCHING PATHS ###########################################################
def find_node(base, node=None, nodeid=None, pred=None, context=None):
matches = []
if node is None:
node = XmrsPathNode(nodeid, pred, context=context)
if _nodes_unifiable(base, node):
matches.append(([], base))
# there's no cycle detection below because paths are (supposedly) trees
agenda = [([a], sp) for a, sp in base.links.items() if sp is not None]
while agenda:
axes, base = agenda.pop()
if _nodes_unifiable(base, node):
matches.append((axes, base))
agenda.extend(
(axes+[a], sp) for a, sp in base.links.items() if sp is not None
)
return matches
def _nodes_unifiable(n1, n2):
if n1 is None or n2 is None:
return True
# nodeids same or one/both is None
if not (n1.nodeid is None or
n2.nodeid is None or
n1.nodeid == n2.nodeid):
return False
# preds same or one/both is None or STAR
if not (n1.pred in (None, STAR) or
n2.pred in (None, STAR) or
n1.pred == n2.pred):
return False
# context can be properties or subpaths
for k, v2 in n2.context.items():
if k[0] in (':', '<'): # subpaths must be recursively unifiable
if not _nodes_unifiable(n1.context.get(k), v2):
return False
else: # properties just need to be equal
v1 = n1.context.get(k)
if not (v1 is None or v2 is None or v1 == v2):
return False
# links are just like context subpaths
if not all(_nodes_unifiable(n1.links.get(axis), sp2)
for axis, sp2 in n2.links.items()):
return False
return True
def match(pattern, p, flags=DEFAULT):
if (flags & NODEID) and (pattern.nodeid != p.nodeid):
return False
if (flags & PRED):
p1 = pattern.pred
p2 = p.pred
if not (p1 == STAR or p2 == STAR or p1 == p2):
return False
if (flags & CONTEXT):
c1 = pattern.context
c2 = p.context
check_sp = flags & SUBPATHS
check_vs = flags & VARSORT
check_vp = flags & VARPROPS
for k, a in c1.items():
if k[0] in (':', '<') and check_sp:
b = c2.get(k)
if not (a is None or b is None or match(a, b)):
return False
elif (k == 'varsort' and check_vs) or check_vp:
if c2.get(k, a) != a:
return False
if (flags & SUBPATHS):
for axis, pattern_ in pattern.links.items():
p_ = p.links.get(axis)
if not (pattern_ is None or p_ is None or match(pattern_, p_)):
return False
return True
def subpaths(p):
all_sps = []
sps = list(_subpaths(p))
sps = sps[1:] # the first subpath is the same as the original
return sps
def _subpaths(p):
if p is None:
return
sps = {ax: list(_subpaths(tgt)) + [None] for ax, tgt in p.links.items()}
# this fancy bit is the same as in _explore()
alts = list(map(
lambda z: dict(zip(sps.keys(), z)),
product(*sps.values())
))
for alt in alts:
ld = dict((axis, tgt) for axis, tgt in alt.items())
n = XmrsPathNode(p.nodeid, p.pred, context=p.context, links=ld)
n._overlapping_links = p._overlapping_links
yield n
# BUILDING XMRS#########################################################
def reify_xmrs(path):
#from delphin.mrs import simpledmrs
# if hasattr(path, 'start'):
# path = path.start
if path.pred == TOP:
assert len(path.links) == 1
axis, path = list(path.links.items())[0]
else:
axis = ':/H>' # just pretend there was a TOP:/H>
if path is None:
return
for upath, _, _ in _unique_paths(path, defaultdict(set), 10000):
m = _reify_xmrs(upath, top_axis=axis)
if m.is_well_formed():
yield m
#print(simpledmrs.dumps_one(m, pretty_print=True))
def _unique_paths(path, nidmap, nextnid):
if path is None:
yield (path, nidmap, nextnid)
return
# first get possibilities for the current node
node_repr = format(path, depth=0, flags=PRED|CONTEXT)
# if already has nodeid, use it; otherwise create or use from nidmap
if path.nodeid is None:
nids = [nextnid]
# only consider existing nids if they aren't quantifiers because
# we expect to see many quantifiers but they are all unique
if not path.pred.is_quantifier():
nids += list(nidmap.get(node_repr, []))
nextnid += 1
else:
nids = [path.nodeid]
alts = []
for nid in nids:
alts.append((
_new_node(path, nid),
_new_nidmap(nidmap, node_repr, nid),
nextnid
))
# then for each alternative, find possible descendants
agenda = list(path.links.items())
while agenda:
_alts = []
axis, tgt = agenda.pop()
for node, nm, nn in alts:
for subpath, _nm, _nn in _unique_paths(tgt, nm, nn):
n = copy(node)
n.links[axis] = subpath
_alts.append((n, _nm, _nn))
alts = _alts
for alt in alts:
yield alt
def _new_node(node, nid=None):
new_node = copy(node, depth=0)
if nid is not None:
new_node.nodeid = nid
return new_node
def _new_nidmap(nidmap, node_repr, nid):
nm = defaultdict(set, {k: v.copy() for k, v in nidmap.items()})
nm[node_repr].add(nid)
return nm
def _reify_xmrs(path, top_axis=None):
nodes = {}
links = []
agenda = [(0, top_axis or ':/H>', path)]
while agenda:
srcnid, axis, tgt = agenda.pop()
if tgt is None:
continue
# add link to tgt
rargname, post = axis.strip(':<>').split('/')
if axis.startswith('<'):
links.append(Link(tgt.nodeid, srcnid, rargname or None, post))
elif axis.endswith('>'):
links.append(Link(srcnid, tgt.nodeid, rargname or None, post))
elif axis == ':/EQ:':
links.append(Link(srcnid, tgt.nodeid, None, 'EQ'))
else:
raise XmrsPathError('Invalid axis: {}'.format(axis))
# add node if necessary (note, currently does not update pred
# or sortinfo if encountered twice)
if tgt.nodeid not in nodes:
sortinfo = dict(
[('cvarsort', tgt.context.get('varsort') or 'u')] +
[(k.lstrip('@'), v)
for k, v in tgt.context.items() if k.startswith('@')]
)
nodes[tgt.nodeid] = Node(tgt.nodeid, tgt.pred, sortinfo=sortinfo)
# add new agenda for tgt
for axis, next_tgt in tgt.links.items():
agenda.append((tgt.nodeid, axis, next_tgt))
return Dmrs(list(nodes.values()), links)
| 2.34375 | 2 |
PY_Basics/test_square.py | CodexLink/StashedCodes | 0 | 12768463 | <reponame>CodexLink/StashedCodes<gh_stars>0
import math
def test_sqrt():
num = 25
assert math.sqrt(num) == 5
def test_square():
num = 7
assert num * num == 49
| 2.046875 | 2 |
toolbox/visualization/bin/setup.py | brennengreen/NIRFAST-Parallel | 1 | 12768464 | # for building the exe:
# python setup.py py2exe --includes sip
from distutils.core import setup
from py2exe.build_exe import py2exe
from glob import glob
import py2exe
import sys
sys.path.append("C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT")
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
setup(
data_files=data_files,
console=[{"script": "nirviz.py"}]
) | 1.804688 | 2 |
tests/test_airflownetworkdistributioncomponentduct.py | marcelosalles/pyidf | 19 | 12768465 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkDistributionComponentDuct
log = logging.getLogger(__name__)
class TestAirflowNetworkDistributionComponentDuct(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airflownetworkdistributioncomponentduct(self):
pyidf.validation_level = ValidationLevel.error
obj = AirflowNetworkDistributionComponentDuct()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_duct_length = 0.0001
obj.duct_length = var_duct_length
# real
var_hydraulic_diameter = 0.0001
obj.hydraulic_diameter = var_hydraulic_diameter
# real
var_cross_section_area = 0.0001
obj.cross_section_area = var_cross_section_area
# real
var_surface_roughness = 0.0001
obj.surface_roughness = var_surface_roughness
# real
var_coefficient_for_local_dynamic_loss_due_to_fitting = 0.0
obj.coefficient_for_local_dynamic_loss_due_to_fitting = var_coefficient_for_local_dynamic_loss_due_to_fitting
# real
var_overall_heat_transmittance_coefficient_ufactor_from_air_to_air = 0.0001
obj.overall_heat_transmittance_coefficient_ufactor_from_air_to_air = var_overall_heat_transmittance_coefficient_ufactor_from_air_to_air
# real
var_overall_moisture_transmittance_coefficient_from_air_to_air = 0.0001
obj.overall_moisture_transmittance_coefficient_from_air_to_air = var_overall_moisture_transmittance_coefficient_from_air_to_air
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airflownetworkdistributioncomponentducts[0].name, var_name)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].duct_length, var_duct_length)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].hydraulic_diameter, var_hydraulic_diameter)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].cross_section_area, var_cross_section_area)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].surface_roughness, var_surface_roughness)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].coefficient_for_local_dynamic_loss_due_to_fitting, var_coefficient_for_local_dynamic_loss_due_to_fitting)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].overall_heat_transmittance_coefficient_ufactor_from_air_to_air, var_overall_heat_transmittance_coefficient_ufactor_from_air_to_air)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].overall_moisture_transmittance_coefficient_from_air_to_air, var_overall_moisture_transmittance_coefficient_from_air_to_air) | 2.40625 | 2 |
ls/sampleproject/tests/test_square_builder.py | linuxsoftware/ls.SampleProject | 0 | 12768466 | # ------------------------------------------------------------------------------
# Unittests
# ------------------------------------------------------------------------------
import sys
import os.path
from unittest import TestCase
from ls.sampleproject import SquareBuilder
class TestSquareBuilder(TestCase):
def setUp(self):
here = os.path.dirname(os.path.abspath(__file__))
self.builder = SquareBuilder(os.path.join(here, "test.words"))
def testMatchingWords(self):
matchingWords = set(self.builder.matchingWords)
self.assertEqual(len(matchingWords), len(self.builder.matchingWords))
for word in self.builder.matchingWords:
self.assertEqual(len(word), 6)
drow = word[::-1]
self.assertNotEqual(drow, word)
self.assertIn(drow, matchingWords)
def testSquares(self):
for square in self.builder:
self.assertIs(type(square), list)
self.assertEqual(len(square), 6)
for n in range(6):
with self.subTest(row=n):
self.assertIs(type(square[n]), str)
self.assertEqual(len(square[n]), 6)
row = lambda y: [c for c in square[y]]
column = lambda x: [row[x] for row in square]
for n in range(6):
with self.subTest(n=n):
self.assertEqual(row(n), column(n))
self.assertEqual(row(0), row(5)[::-1])
self.assertEqual(row(1), row(4)[::-1])
self.assertEqual(row(2), row(3)[::-1])
self.assertEqual(column(0), column(5)[::-1])
self.assertEqual(column(1), column(4)[::-1])
self.assertEqual(column(2), column(3)[::-1])
| 3.21875 | 3 |
portfolio/display/models.py | vasu1818/django-Portfolio | 0 | 12768467 | <reponame>vasu1818/django-Portfolio
from django.db import models
# Create your models here.
class Profile(models.Model):
image = models.ImageField(upload_to="images/")
summary = models.CharField(max_length=200) | 2.03125 | 2 |
tests/test_executors.py | smartlegionlab/commandpack | 1 | 12768468 | <filename>tests/test_executors.py
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, <NAME>
# All rights reserved.
# -------------------------------------------------------
import os
from commandpack.executors import OsExecutor, SubExecutor, Executor
class TestOsExecutor:
def test_execute(self, os_executor, command_status):
command, status = command_status
assert os_executor.execute(command) == status
def test__call__(self, os_executor, command_status):
command, status = command_status
assert os_executor(command) == status
class TestSubExecutor:
def test_execute(self, sub_executor, command_status):
command, status = command_status
assert sub_executor.execute(command) == status
def test__call__(self, sub_executor, command_status):
command, status = command_status
assert sub_executor(command) == status
class TestExecutor:
def test_execute(self, executor, command_status):
command, status = command_status
assert executor.execute(command) == status
assert executor.os_executor(command) == status
assert executor.sub_executor(command) == status
assert executor.sub_executor.execute(command) == status
def test__get_executor(self, executor, os_executor, sub_executor, os_name):
os.name = os_name
if os.name == 'posix':
assert isinstance(executor._get_executor(), type(os_executor))
else:
assert isinstance(executor._get_executor(), type(sub_executor))
| 2.765625 | 3 |
cam/models.py | 4the1appdevs/face_recognition | 1 | 12768469 | # -*- coding: utf-8 -*-
#from pkg_resources import resource_filename
class dlib_model:
def pose_predictor_model_location():
return "./models/dlib/shape_predictor_68_face_landmarks.dat"
def pose_predictor_five_point_model_location():
return "./models/dlib/shape_predictor_5_face_landmarks.dat"
def face_recognition_model_location():
return "./models/dlib/dlib_face_recognition_resnet_model_v1_for_asian.dat"
def cnn_face_detector_model_location():
return "./models/dlib/mmod_human_face_detector.dat"
class opencv_model:
def caff_model_location():
return "./models/opencv/res10_300x300_ssd_iter_140000_fp16.caffemodel"
def caff_cfgfile_location():
return "./models/opencv/deploy.prototxt"
def tensorflow_model_location():
return "./models/opencv/opencv_face_detector_uint8.pb"
def tensorflow_cfgfile_location():
return "./models/opencv/opencv_face_detector.pbtxt"
class classifier_model:
def classifier_location():
return "./models/classifier/face_classifier.pkl"
| 1.695313 | 2 |
syntacticframes_project/syntacticframes/migrations/0007_auto_20141106_2121.py | aymara/verbenet-editor | 0 | 12768470 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syntacticframes', '0006_auto_20141103_0939'),
]
operations = [
migrations.AlterModelOptions(
name='verbnetclass',
options={'ordering': ['levin_class', 'name']},
),
migrations.AlterModelOptions(
name='verbnetframeset',
options={'ordering': ['tree_id']},
),
migrations.AlterField(
model_name='verbnetframeset',
name='tree_id',
field=models.PositiveSmallIntegerField(),
preserve_default=True,
),
]
| 1.53125 | 2 |
helpers/rally.py | klehman-rally/rasl-theia | 0 | 12768471 | <filename>helpers/rally.py<gh_stars>0
import sys
import time
import json
import re
import time
import string
from collections import OrderedDict
import requests
RALLY_BASE_URL = 'https://rally1.rallydev.com/slm/webservice/v2.0'
RALLY_ART_ID_PATTERN = re.compile(r'\b((?:US|S|DE|DS|TA|TC|F|I|T)\d{1,7})\b')
PAGESIZE = 2000
ENTITY = {"S" : "HierarchicalRequirement",
"US" : "HierarchicalRequirement",
"DE" : "Defect",
"DS" : "DefectSuite",
"TA" : "Task",
"TC" : "TestCase",
"F" : "Feature",
"I" : "Initiative",
"T" : "Theme",
}
DEFECT_FIELDS = "FormattedID Name Workspace Project SubmittedBy Blocked BlockedReason Description Environment Owner State ScheduleState Priority Severity Resolution Ready LastUpdateDate Tags".split(' ')
STORY_FIELDS = "FormattedID Name Workspace Project CreatedBy Owner Blocked BlockedReason Description ScheduleState Release Feature PlanEstimate LastUpdateDate Tags".split(' ')
ART_FIELDS = {'DE' : DEFECT_FIELDS,
'S' : STORY_FIELDS,
'US' : STORY_FIELDS,
}
def rallyFIDs(target):
"""
Given a target String that may have Rally FormattedIDs in it, extract those
and return a list of emptiness or the items in there that match the Rally FID pattern.
"""
hits = RALLY_ART_ID_PATTERN.findall(target)
return hits
def artPrefix(target):
"""
Given a target that is a FormattedID, burn off the trailing digits
returning the artifact prefix letters as a string.
"""
letters = [char for char in target[:2] if char not in string.digits]
return "".join(letters)
def getRallyArtifact(apikey, workspace, fid):
headers = {'zsessionid': apikey}
entity = ENTITY[artPrefix(fid)]
fields = ART_FIELDS[artPrefix(fid)]
specific_fid = f'(FormattedID = "{fid}")'
params = {'workspace' : f'workspace/{workspace}',
#'fetch' : 'true',
'fetch' : f'{",".join(fields)}',
'query' : specific_fid
}
url = f'{RALLY_BASE_URL}/{entity}'
print(f'getRallyArtifact params: {params}')
#print(f'headers {headers}')
#print(f'url {url}')
response = requests.get(url, headers=headers, params=params)
print(f'status_code: {response.status_code}')
result = json.loads(response.text)
errors = result['QueryResult']['Errors']
if errors:
print(f'result Errors: {errors}')
warnings = result['QueryResult']['Warnings']
if warnings:
print(f'result Warnings: {warnings}')
items = result['QueryResult']['Results']
print(f'results items ({result["QueryResult"]["TotalResultCount"]}): {repr(items)}')
if not items:
return None
bloated_item = items.pop(0) # we expect only 1 item to be returned
raw_item = {key : value for key, value in bloated_item.items() if key in fields}
item = OrderedDict()
item['entity'] = 'Story' if entity == 'HierarchicalRequirement' else entity
item['art_url'] = bloated_item['_ref']
for attr in fields:
value = raw_item.get(attr, None)
if value:
if attr in ['Workspace', 'Project', 'SubmittedBy', 'CreatedBy', 'Owner']:
value = raw_item[attr]['_refObjectName']
if attr == 'LastUpdateDate':
value = value.replace('T', ' ')
if attr == 'Ready':
value = 'yes' if value else 'no'
if attr == 'PlanEstimate':
value = str(value).split('.')[0]
if attr == 'Release':
value = raw_item[attr]['_refObjectName']
if attr == 'Feature':
# placeholder for now, this won't work
# have to chase the raw_item[attr] ref value via WSAPI to get the FormattedID
value = raw_item[attr]['_ref']
if attr == 'Tags':
if raw_item['Tags']['Count'] == 0:
continue
tags_collection_ref = raw_item['Tags']['_ref']
tags = getTags(headers, tags_collection_ref)
value = ", ".join(tags)
item[attr] = value
return item
def getTags(headers, tags_ref):
response = requests.get(tags_ref, headers=headers)
if response.status_code != 200:
return []
result = json.loads(response.text)
if result['QueryResult']['Errors']:
return []
items = result['QueryResult']['Results']
tags = [item['Name'] for item in items]
return tags
| 2.421875 | 2 |
basics/singals_and_slots_1.py | klinga/pyside-tests | 0 | 12768472 | <filename>basics/singals_and_slots_1.py
import sys
from PySide2.QtCore import QSize, Qt
from PySide2.QtWidgets import QApplication, QMainWindow, QPushButton
class MainWindow(QMainWindow):
"""
Subclass QMainWindow
"""
def __init__(self):
super().__init__()
# variables
self.button_is_checked = True
self.setWindowTitle("My App")
button = QPushButton("Press me!")
# toggle state of button when clicked
button.setCheckable(True)
button.clicked.connect(self.button_toggled)
button.clicked.connect(self.button_clicked)
# set fixed size that user is unable to modify
self.setFixedSize(QSize(400, 300))
# other options
# self.setMinimumSize(QSize(400, 300))
# self.setMaximumSize(QSize(400, 300))
# set the central widget of the window
self.setCentralWidget(button)
def button_clicked(self):
print("Clicked...")
def button_toggled(self, checked):
self.button_is_checked = checked
print(f"Checked? {self.button_is_checked}")
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| 2.953125 | 3 |
setup.py | m4ce/google-drive-api | 2 | 12768473 | <reponame>m4ce/google-drive-api
from distutils.core import setup
version = '0.0.4'
setup(
name = 'google-drive-api',
packages = ['google_drive'],
version = version,
description = 'Python library for Google Drive API',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/m4ce/google-drive-api-python',
download_url = 'https://github.com/m4ce/google-drive-api-python/tarball/%s' % (version,),
keywords = ['google', 'drive', 'google drive'],
classifiers = [],
install_requires = ["google-api-python-client"]
)
| 1.4375 | 1 |
u24_lymphocyte/third_party/treeano/sandbox/nodes/tests/wta_sparsity_test.py | ALSM-PhD/quip_classification | 45 | 12768474 | import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import wta_sparisty as wta
fX = theano.config.floatX
def test_wta_spatial_sparsity_node_serialization():
tn.check_serialization(wta.WTASpatialSparsityNode("a"))
def test_wta_sparsity_node_serialization():
tn.check_serialization(wta.WTASparsityNode("a"))
def test_wta_spatial_sparsity_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 2, 2, 2)),
wta.WTASpatialSparsityNode("a")]
).network()
fn = network.function(["i"], ["s"])
x = np.arange(16).reshape(2, 2, 2, 2).astype(fX)
ans = x.copy()
ans[..., 0] = 0
ans[..., 0, :] = 0
np.testing.assert_allclose(fn(x)[0],
ans)
def test_wta_sparsity_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 2, 2, 2)),
wta.WTASparsityNode("a", percentile=0.5)]
).network()
fn = network.function(["i"], ["s"])
x = np.arange(16).reshape(2, 2, 2, 2).astype(fX)
ans = x.copy()
ans[..., 0] = 0
ans[..., 0, :] = 0
ans[0] = 0
res = fn(x)[0]
np.testing.assert_allclose(res, ans)
| 2.015625 | 2 |
tests/test_checker.py | parafoxia/len8 | 3 | 12768475 | # Copyright (c) 2021-2022, <NAME>, Jonxslays
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pathlib import Path
import pytest
import len8
from len8.errors import BadLines, InvalidPath
TEST_FILE = Path(__file__).parent / "testdata.py"
TEST_NON_VALID = TEST_FILE.parent / "nsx_simple_app.nsx"
TEST_TOML_CONFIG = TEST_FILE.parent / "test.toml"
TEST_LOL_TOML_CONFIG = TEST_FILE.parent / "test_lol.toml"
@pytest.fixture() # type: ignore
def default_checker() -> len8.Checker:
return len8.Checker()
@pytest.fixture() # type: ignore
def extended_checker() -> len8.Checker:
return len8.Checker(
exclude=["custom", Path("another")], extend=2, strict=True
)
@pytest.fixture() # type: ignore
def custom_checker() -> len8.Checker:
return len8.Checker(max_code_length=100, max_docs_length=80)
@pytest.fixture() # type: ignore
def valid_config() -> len8.Config:
return len8.Config(TEST_TOML_CONFIG)
def test_default_init(default_checker: len8.Checker) -> None:
assert isinstance(default_checker, len8.Checker)
assert default_checker.exclude == [
Path(".nox"),
Path(".venv"),
Path("venv"),
]
assert default_checker.extend == 0
assert default_checker.bad_lines is None
assert default_checker.strict is False
assert default_checker.code_length == 79
assert default_checker.docs_length == 72
def test_extended_init(extended_checker: len8.Checker) -> None:
assert isinstance(extended_checker, len8.Checker)
assert extended_checker.exclude == [
Path(".nox"),
Path(".venv"),
Path("venv"),
Path("custom"),
Path("another"),
]
assert extended_checker.extend == 2
assert extended_checker.bad_lines is None
assert extended_checker.strict is True
assert extended_checker.code_length == 99
assert extended_checker.docs_length == 72
def test_custom_init(custom_checker: len8.Checker) -> None:
assert isinstance(custom_checker, len8.Checker)
assert custom_checker.exclude == [
Path(".nox"),
Path(".venv"),
Path("venv"),
]
assert custom_checker.extend == 0
assert custom_checker.bad_lines is None
assert custom_checker.strict is False
assert custom_checker.code_length == 100
assert custom_checker.docs_length == 80
def test_bad_inits(default_checker: len8.Checker) -> None:
with pytest.raises(ValueError) as exc:
len8.Checker(extend=5)
assert f"{exc.value}" == "'extend' should be between 0 and 2 inclusive"
with pytest.raises(ValueError) as exc:
len8.Checker(max_code_length=-1)
assert f"{exc.value}" == "line lengths cannot be less than 0"
with pytest.raises(ValueError) as exc:
len8.Checker(max_docs_length=-1)
assert f"{exc.value}" == "line lengths cannot be less than 0"
with pytest.raises(ValueError) as exc:
default_checker.extend = 5
assert f"{exc.value}" == "'extend' should be between 0 and 2 inclusive"
def test_setting_lengths(default_checker: len8.Checker) -> None:
default_checker.set_lengths(code=100, docs=80)
assert default_checker.code_length == 100
assert default_checker.docs_length == 80
default_checker.set_lengths(docs=50)
assert default_checker.code_length == 100
assert default_checker.docs_length == 50
default_checker.set_lengths(code=None)
assert default_checker.code_length == 79
assert default_checker.docs_length == 50
def test_non_strict_output(default_checker: len8.Checker) -> None:
output = (
f"\33[1m{TEST_FILE}\33[0m\n"
" * Line 4 (76/72)\n"
" * Line 5 (83/79)\n"
" * Line 11 (78/72)\n\n"
f"\33[1m\33[31mFound 3 problems\33[0m"
)
assert default_checker.check(TEST_FILE) == output
def test_non_strict_output_extended(default_checker: len8.Checker) -> None:
default_checker.extend = 2
output = (
f"\33[1m{TEST_FILE}\33[0m\n"
" * Line 4 (76/72)\n"
" * Line 11 (78/72)\n\n"
f"\33[1m\33[31mFound 2 problems\33[0m"
)
assert default_checker.check(TEST_FILE) == output
assert default_checker.check(TEST_FILE.parent) == output
def test_strict_output(default_checker: len8.Checker) -> None:
default_checker.strict = True
output = (
f"\33[1m{TEST_FILE}\33[0m\n"
" * Line 4 (76/72)\n"
" * Line 5 (83/79)\n"
" * Line 11 (78/72)\n\n"
f"\33[1m\33[31mFound 3 problems\33[0m"
)
with pytest.raises(BadLines) as exc:
assert default_checker.check(TEST_FILE) == output
assert f"{exc.value}" == output
def test_update_excludes(default_checker: len8.Checker) -> None:
default_checker.exclude = [Path("custom"), Path("another")]
assert default_checker.exclude == [
Path(".nox"),
Path(".venv"),
Path("venv"),
Path("custom"),
Path("another"),
]
def test_file_validation(default_checker: len8.Checker) -> None:
assert default_checker._is_valid(TEST_FILE)
assert not default_checker._is_valid(Path("README.md"))
default_checker.exclude = [Path(__file__).parent]
assert default_checker._is_valid(Path("len8").absolute())
assert not default_checker._is_valid(Path("tests").absolute())
default_checker.exclude = [Path("testdata.py")]
assert default_checker._is_valid(Path("checker.py"))
assert not default_checker._is_valid(Path("testdata.py"))
def test_pathlib_conversion_on_check(default_checker: len8.Checker) -> None:
output = (
f"\33[1m{TEST_FILE}\33[0m\n"
" * Line 4 (76/72)\n"
" * Line 5 (83/79)\n"
" * Line 11 (78/72)\n\n"
f"\33[1m\33[31mFound 3 problems\33[0m"
)
assert default_checker.check(f"{TEST_FILE}") == output
default_checker.strict = True
with pytest.raises(InvalidPath) as exc:
assert default_checker.check(f"invalid_dir") == output
assert f"{exc.value}" == f"Error: 'invalid_dir' is not a valid path."
def test_skip_invalid_files(default_checker: len8.Checker) -> None:
try:
default_checker.check(TEST_NON_VALID)
except UnicodeDecodeError:
pytest.fail()
def test__check_dir(default_checker: len8.Checker) -> None:
default_checker._check(Path("tests"))
def test_valid_config_init(valid_config: len8.Config) -> None:
assert isinstance(valid_config, len8.Config)
assert valid_config.include == ["tests/testdata.py"]
assert valid_config.exclude == ["tests/exclude.py"]
assert valid_config.code_length == 88
assert valid_config.docs_length == 69
assert valid_config.strict is True
assert valid_config.is_configured
def test_invalid_config_init() -> None:
with pytest.raises(len8.ConfigurationError) as e:
_ = len8.Config(TEST_NON_VALID)
assert str(e.value) == (
f"'{TEST_NON_VALID}' is not a valid configuration file."
)
def test_config_missing_len8() -> None:
config = len8.Config(TEST_LOL_TOML_CONFIG)
assert not config.is_configured
assert config.strict is False
assert config.docs_length is None
assert config.code_length is None
assert config.include is None
assert config.exclude is None
def test_config_bad_toml_syntax() -> None:
p = "./tests/invalid.toml"
with open(p, "w") as f:
f.write("[tool.invalid_toml_syntax\n")
with pytest.raises(len8.ConfigurationError) as e:
_ = len8.Config(p)
assert "Failed to parse configuration file" in str(e.value)
Path(p).unlink()
def test_checker_from_invalid_config() -> None:
with pytest.raises(len8.ConfigurationError) as e:
_ = len8.Checker.from_config(TEST_NON_VALID)
assert str(e.value) == (
f"'{TEST_NON_VALID}' is not a valid configuration file."
)
def test_checker_from_valid_config(valid_config: len8.Config) -> None:
checker = len8.Checker.from_config(valid_config)
assert checker.code_length == 88
assert checker.docs_length == 69
assert checker.extend == 0
assert checker.strict is True
assert checker.exclude == [
Path(".nox"),
Path(".venv"),
Path("venv"),
Path("tests/exclude.py"),
]
| 1.445313 | 1 |
810/HW04_Test_Qi_Zhao.py | qiblaqi/Stevens-Python-Course-ByQ.Zhao | 0 | 12768476 | """
this is a program contains Test Class for HW04
Written by <NAME>
"""
import unittest as ut
import HW04_Qi_Zhao as HW04
class TestFraction(ut.TestCase):
#Test cases class with extended test cases.
def test_simplify(self):
self.assertEqual(HW04.Fraction(9, 27).simplify(),HW04.Fraction(1,3))
self.assertEqual(HW04.Fraction(9, -27).simplify(),HW04.Fraction(-1,3))
self.assertEqual(HW04.Fraction(1, -4).simplify(),HW04.Fraction(-1,4))
self.assertFalse(HW04.Fraction(2, -4).simplify()==HW04.Fraction(-1,4))
class TestIteration(ut.TestCase):
#Test cases for all the other functions
def test_count_vowels(self):
#test for the count vowels function
self.assertEqual(HW04.count_vowels("Happy Day!"),2)
self.assertEqual(HW04.count_vowels("HAppy Deust!"),3)
self.assertEqual(HW04.count_vowels("H ppy D st!"),0)
self.assertFalse(HW04.count_vowels("H ppy D s3et!")==0)
def test_last_occurance(self):
#test for the last occurance function for all sequence
self.assertEqual(HW04.last_occurrence(42,[10,20,23,42,42]),4)
self.assertEqual(HW04.last_occurrence('apple',['10',20,'23','42','apple']),4)
self.assertEqual(HW04.last_occurrence('p','apple'),2)
self.assertEqual(HW04.last_occurrence('p',[]),None)
def test_my_enumerate(self):
#test for the generator
strA = "hello world!"
strB = "hello "
self.assertTrue(list(HW04.my_enumerate(strA))==list(enumerate(strA)))
self.assertFalse(list(HW04.my_enumerate(strA))==list(enumerate(strB)))
if __name__ == '__main__':
ut.main(exit=False,verbosity=2)
| 3.421875 | 3 |
pmworker/document_file.py | ciur/papermerge-worker | 2 | 12768477 | import os
class DocumentFile:
def __init__(self, fmtdirstr, file_name, media_root):
self.fmtdirstr = fmtdirstr
self.file_name = file_name
self.media_root = media_root
@property
def dir_path(self):
return os.path.join(
self.media_root,
self.fmtdirstr
)
def __str__(self):
return self.abspath
def __repr__(self):
return self.abspath
@property
def rootname(self):
root, _ = os.path.splitext(
os.path.basename(self.file_name)
)
return root
@property
def is_image(self):
"""
"""
ext = os.path.splitext(self.abspath)[1]
if ext.lower() in ('.png', '.jpeg', '.jpg'):
return True
return False
@property
def abspath(self):
return os.path.join(
self.dir_path,
self.file_name
)
@property
def exists(self):
return os.path.exists(self.abspath)
| 3.15625 | 3 |
codes_/0705_Design_HashSet.py | SaitoTsutomu/leetcode | 0 | 12768478 | # %% [705. Design HashSet](https://leetcode.com/problems/design-hashset/)
class MyHashSet(set):
remove = set.discard
contains = set.__contains__
| 2.03125 | 2 |
script/main.py | MakiSakurai/robosys2021_homework2 | 0 | 12768479 | <filename>script/main.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import moveit_commander
import geometry_msgs.msg
import math
from geometry_msgs.msg import Point
from geometry_msgs.msg import Twist
from robosys2021_homework2.msg import CustomArray
class pose_main():
def __init__(self):
rospy.init_node('MediaPipe_pose_tutle')
rospy.Subscriber('/pose_topic', CustomArray, self.callback, queue_size=1)
self.pose1_x = 0
self.pose1_y = 0
self.pose2_x = 0
self.pose2_y = 0
self.pose3_x = 0
self.pose3_y = 0
self.pose4_x = 0
self.pose4_y = 0
def callback(self, msg):
self.pose1_x = msg.points[0].x
self.pose1_y = msg.points[0].y
self.pose2_x = msg.points[1].x
self.pose2_y = msg.points[1].y
self.pose3_x = msg.points[2].x
self.pose3_y = msg.points[2].y
self.pose4_x = msg.points[3].x
self.pose4_y = msg.points[3].y
def loop(self):
pub = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
vel_msg = Twist()
rate = rospy.Rate(1) # 1hz
if 1 <= self.pose1_y <= 180 and 1 <= self.pose2_y <= 180:
vel_msg.linear.x = 0.5
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = 0
print("直進")
if 680 >= self.pose3_x >= 540 and 1 <= self.pose3_y <= 180: #左
vel_msg.linear.x = 0
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = 0.5
print("左回転")
if 1 <= self.pose4_x <= 140 and 1 <= self.pose4_y <= 180: #右
vel_msg.linear.x = 0
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = -0.5
print("右回転")
pub.publish(vel_msg)
rate.sleep()
if __name__ == '__main__':
makimaki = pose_main()
while not rospy.is_shutdown(): #直進
makimaki.loop()
| 2.734375 | 3 |
v2ex_daily_mission/notifier/slack.py | lord63/v2ex_daily_mission | 44 | 12768480 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import requests
from v2ex_daily_mission.notifier.abc import Notifier, NotificationSendFailedException
class SlackNotifier(Notifier):
def __init__(self, config):
self.config = config
def send_notification(self):
url = self.config['notifier']['slack']['url']
data = {
"text": "v2ex_daily_mission: sign failed."
}
try:
response = requests.post(url, data=json.dumps(data))
if response.text != 'ok':
raise NotificationSendFailedException(
"slack notification send failed, response: {}".format(response.text)
)
except requests.RequestException as e:
raise NotificationSendFailedException(
"slack notification send failed, error: {}".format(e)
)
| 2.859375 | 3 |
dserver/config.py | dandk105/Asyncpyserver | 0 | 12768481 | # -*- coding: utf-8 -*-
# tab level: 4
#!/usr/bin/env python3
"""this module will control all settings
"""
import logging
import logging.config
import logging.handlers
import json
import os
from pathlib import Path
from mytemplate.errorclass import CustomError as customerr
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ConfManage:
"""this class manage all config on this server.
this class use setting data from pyserver/conf/json file in default.
Methods
-------
convert_path(relative_path)
return absolute path
load_data_dict
set json data
return_server_addr
return tuple of server data
subsclibe_client(client_address)
"""
def __init__(self, env_path):
self.abs_path = None
self.confi_set = {}
self.env = os.getenv("PY_SERVER_CONF")
if env_path is None:
self.rel_path = self.env
else:
self.rel_path = env_path
def setup_logging(self,
Defaultpath="./conf/logconf.json",
Defaultlevel=logging.INFO,
Envkey="LOG_CFG"
):
"""this method setting of logging.
default logging level is INFO.
if you want to change level,you should change Defaultlevel parameters.
Parameters
----------
Defaultpath : str, optional
[description], by default "./conf/logconf.json"
Defaultlevel : logginLevel, optional
[description], by default logging.INFO
Envkey : str, optional
[description], by default "LOG_CFG"
"""
log_locate_rel = Defaultpath
log_locate_abs = self.convert_path(log_locate_rel)
abs_path = Path(log_locate_abs)
if abs_path.exists():
c_dict = self.load_jsonfile(str(abs_path))
logging.config.dictConfig(c_dict)
else:
logging.basicConfig(level=Defaultlevel)
def convert_path(self, relative_path):
"""return boolean parameter.
This function return Flase if happend error.
Parameters
----------
relative_path : str
Returns
-------
abs_path : str
"""
try:
if self.rel_path is not None:
relative_path = self.rel_path
rel_path = Path(relative_path)
abs_path = rel_path.resolve(strict=True)
except FileNotFoundError:
raise customerr.PathError
except Exception as e:
logger.error(e)
raise Exception
else:
return str(abs_path)
def return_pathobj(self, abs_path):
path = Path(abs_path)
return path
def output_fileobj(self, abs_path):
path = self.return_pathobj(abs_path)
try:
with path.open("r", encoding="utf-8") as f:
obj = self.format_IO(f)
except TypeError as e:
print(e)
return obj
def format_IO(self, IO):
# bad condition
if IO is True:
Fobj = json.loads(IO)
elif IO is False:
Fobj = IO.read()
else:
raise OSError
return Fobj
def load_jsonfile(self, abs_path):
"""this method convert JSON file to fit object of python,
So this can only be used for JSON files.
Patameters
----------
abs_path: str
purpose json file path
Returns
-------
py_dataobj: Python object
"""
path = self.return_pathobj(abs_path)
try:
with path.open(mode="r", encoding="utf-8") as f:
py_dataobj = json.load(f)
logger.info("%s", "read conf file sucess.")
except (OSError, json.JSONDecodeError):
logger.error("%s", "cant read conf file. should check there is config file")
return None
else:
return py_dataobj
def return_server_addr(self):
"""this method get path data from its args.
which return tuple about this server address data.
Returns
-------
conf_tuple : tuple
exsample ("0.0.0.0",1234)
"""
conf_dict = self.confi_set.copy()
key = conf_dict.keys()
ip_port = ("host_ip", "host_port")
# bad code
if key is ip_port[0] & key is ip_port[1]:
conf_tuple = conf_dict[ip_port[0]], conf_dict[ip_port[1]]
logger.info("%s", "compleate this app conf")
return conf_tuple
def get_plaformdata(self):
pass
| 2.53125 | 3 |
stop/exit.py | J-Massey/postproc | 0 | 12768482 | # -*- coding: utf-8 -*-
"""
@author: <NAME>
@description: Graceful stopping condition
@contact: <EMAIL>
"""
import signal
import time
import subprocess
from pathlib import Path
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
if __name__ == '__main__':
try:
subprocess.call('rm .kill', shell=True, cwd=Path.cwd())
except FileNotFoundError:
print("No .kill fn present. You are cleared for takeoff")
killer = GracefulKiller()
while not killer.kill_now:
time.sleep(1)
print("Found SIGINT/SIGTERM signal")
subprocess.call('touch .kill', shell=True, cwd=Path.cwd())
print("Clean Exit. The flow field has been saved :)") | 2.546875 | 3 |
Python/hardware/HardwareDevices.py | marcostrullato/RoobertV2 | 0 | 12768483 | #!/usr/bin/env python
# Roobert V2 - second version of home robot project
# ________ ______ _____
# ___ __ \______________ /_______________ /_
# __ /_/ / __ \ __ \_ __ \ _ \_ ___/ __/
# _ _, _// /_/ / /_/ / /_/ / __/ / / /_
# /_/ |_| \____/\____//_.___/\___//_/ \__/
#
# Project website: http://roobert.springwald.de
#
# ##############################################
# # Roobert hardware device factory and config #
# ##############################################
#
# Licensed under MIT License (MIT)
#
# Copyright (c) 2018 <NAME> | <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import division
import time, os, sys
my_file = os.path.abspath(__file__)
my_path ='/'.join(my_file.split('/')[0:-1])
sys.path.insert(0,my_path + "/" )
sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/multitasking" )
sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/hardware" )
from MultiProcessing import *
from array import array
from SharedInts import SharedInts
from SharedFloats import SharedFloats
from LX16AServos import LX16AServos
from SmartServoManager import SmartServoManager
from Arms import Arms
from Neck import Neck
from RgbLeds import RgbLeds
import atexit
class HardwareDevices():
_bodyLeds = None
_arms = None
_neck = None
_servoManager = None
_servos = None
__singleton = None
_released = False
@staticmethod
def singleton():
if (HardwareDevices.__singleton == None):
HardwareDevices.__singleton = HardwareDevices()
return HardwareDevices.__singleton
@property
def arms(self):
return self._arms
@property
def neck(self):
return self._neck
@property
def BodyLeds(self):
return self._bodyLeds
def __init__(self):
self._servos = LX16AServos();
self._servoManager = SmartServoManager(lX16AServos=self._servos, ramp=0, maxSpeed=1)
self._arms = Arms(self._servoManager)
self._neck = Neck(self._servoManager)
self._servoManager.Start()
self._neck.SetLeftRight(0)
self._neck.SetUpDown(0)
self._bodyLeds = RgbLeds([
my_path + '/../Gfx/Body/hearth2.gif',
my_path + '/../../RoobertGifs/e8nZC.gif',
my_path + '/../../RoobertGifs/U9LwW86.gif',
my_path + '/../../RoobertGifs/Spin_Toad.gif',
my_path + '/../../RoobertGifs/haleye.gif',
my_path + '/../../RoobertGifs/Yoshi_render.gif'
])
def Release(self):
if (self._released == False):
self._released = True
print("releasing hardware devices")
if (self._bodyLeds != None):
self._bodyLeds.Release()
if (self._arms != None):
self._arms.Release()
if (self._neck != None):
self._neck.Release()
if (self._servoManager != None):
self._servoManager.Release()
if (self._servos != None):
self._servos.Release()
def __del__(self):
self.Release()
def exit_handler():
devices.Release()
if __name__ == "__main__":
devices = HardwareDevices.singleton()
atexit.register(exit_handler)
devices.arms.WaitTillTargetsReached();
time.sleep(5)
devices.Release()
| 1.75 | 2 |
Day_6_Lists/lists_g1.py | ValRCS/Python_TietoEvry_Sep2021 | 0 | 12768484 | # # # you have some similar items that you want to store
# a1 = 3
# a2 = 5
# a3 = 8
# # ...
# a100 = 151
# # # # # # # There has to be a better way
# #
# # # # # # # # # What is a list after all?
# # # # # # # # # * ordered
# # # # # # # # # * collection of arbitrary objects (anything goes in)
# # # # # # # # # * nested (onion principle, Matroyshka)
# # # # # # # # # * mutable - maināmas vērtības
# # # # # # # # # * dynamic - size can change
# # # # # # # trade_off - not the most efficient as far as memory usage goes
# ## for more space efficiency there are Python libraries such as numpy with ndarray structures which are based C arrays
#
empty_list = [] # alternative would be empty_list = list()
print(empty_list) # we can add values later
print(len(empty_list))
#
my_list = [5, 6, "Valdis", True, 3.65, "alus"] # most common way of creating a list using [el1, el2]
print(my_list)
print(type(my_list), len(my_list))
#
#
print(my_list[0]) # so list index starts at 0 for the first element
# # major difference with string is that lists are mutable
my_list[1] = "Mr. 50" # lists are mutable (unlike strings) types inside also will change on the run
print(my_list)
drink = my_list[-1] # last element
print(my_list[-1], drink, my_list[5]) # again like in string we have indexes in both directions
#
# # typically we do not need an index for items when looping
for el in my_list:
print(el, "is type", type(el))
# # Pythonic way to show index is to enumerate
for i, el in enumerate(my_list): # if we need index , default start is 0
print(i, el, "is type", type(el))
print(f"Item no. {i} is {el}")
#
# # i can start index at some value
for i,el in enumerate(my_list, start=1000): # if we need index to start at some number
print(f"Item no. {i} is {el}")
#
list_2d = list(enumerate(my_list))
print(list_2d)
#
numbers = list(range(10)) # range is not a list it used to be in Python 2.7, it is ready on demand
print(numbers)
#
# print(len(my_list))
# for i in range(len(my_list)): # this way is not encouraged, this is too C like, no need for this style
# print(f"Item no. {i} is {my_list[i]}")
drinks = ["water", "juice", "coffee", "tea", "milk", "beer"]
# idioms
for drink in drinks: # list in plural item in singular
print(drink)
#
# List slicing - we can use it to get a part of the list
print(my_list[:3]) # so we only print the first 3 elements from the list
print(my_list[-2:]) # last two
print(my_list[1:4]) # from the second to the fourth, fifth is not included
print( my_list[1:-1]) # from the second to the last but one
print(my_list[::2]) # jumping over every 2nd one
my_numbers = list(range(100,200,10)) # no 0 lidz 9, also shows how to create a list from another sequence like object
print(my_numbers)
print(numbers)
print(numbers[::2]) # evens starting with 0, since we jump to every 2nd one
print(numbers[1::2]) # so odd numbers here
print(my_numbers[::2]) # even starting with 0, 2, 4
print(my_numbers[1::2]) # all odd indexed numbers, index 1, 3, 5, 7
# # # # print(my_list[1::2]) # start with 2nd element and then take every 2nd element
# # print(my_list[-1], my_list[len(my_list)-1]) # last element, we use the short syntax
print(my_numbers[::-1])
print(my_list[::-1])
print(numbers[::-1])
print(reversed(numbers)) # this we would use when we do not need the list completely for looping
# # why would you use such a construction
# # because you do not want to create a new list in memory, you want to use the original
for n in reversed(numbers): # this is more efficient than numbers[::-1] because we do not create a new list in memory
print(n)
# print(list(reversed(numbers)))
my_reversed_numbers = my_numbers[::-1]
print(my_reversed_numbers)
# # # # print(reversed(my_list)) # returns an iterator - so not a list but sort of prepared to list collection
# # # # print(list(reversed(my_list))) # so we need to cast it to list
# # # # print(my_list[::-1]) # so same as above when used on a list
# # empty_list = [] # more common
# also_empty_list = list()
# print(empty_list, also_empty_list)
food = "kartupelis"
print(food)
food_chars = list(food) # so type casing just like str, int, float, bool etc
# list will work with any sequence type - and str is a sequence type
print(food_chars)
print("OLD and TIRED", food_chars[5])
food_chars[5] = "m" # so replacing the 6th elemen - in this case a letter p with m
print("NEW and FRESH", food_chars[5])
print(food_chars)
maybe_food = str(food_chars) # not quite what we want, but it is a string
print(maybe_food) # just a string of what printing a list would look like
# so maybe_food is a string, but it is not a list anymore
food_again = "".join(food_chars) # "" shows what we are putting between each character, in this case nothing
print(food_again)
food_again_with_space = " ".join(food_chars) # "" shows what we are putting between each character
print(food_again_with_space)
food_again_with_smile = "**😁**".join(food_chars) # "" shows what we are putting between each character
print(food_again_with_smile)
small_list = ["Valdis", "likes", "beer"]
separator = "==="
new_text = separator.join(small_list)
print(new_text)
# num_string = "||".join(numbers) # we will need to convert numbers to theri str representation
# print(num_string)
# # # print(list("kartupelis")) # can create a list out of string
print("kartupelis".split("p")) # i could split string by something
sentence = "A quick brown fox jumped over a sleeping dog"
print(sentence) # string
words = sentence.split(" ") # we split by some character in this case whitespace
print(words) # list with words
#
# sentence_with_exclams = ".!.".join(words)
# print(sentence_with_exclams)
# # # # # # # # # how to check for existance in list
print(my_list)
print("3.65 is in my list?", 3.65 in my_list)
# print(66 in my_list)
print("Valdis" in my_list)
print(my_list[2]) # Valdis
print("al" in "Valdis", "al" in my_list[2])
print("al" in my_list) # this is false,because in needs a exact match, to get partial we need to go deeper
# # # # # # # # # # # iterate over items
# print("*"*20)
# # for it in my_list:
# # print(it)
# #
needle = "al" # what we want to find in our list
for item in my_list:
print("Checking ", item)
if type(item) == str and needle in item: # not all types have in operator
print(f"Found {needle=} in {item=}") # python 3.8 and up, good for debuggin
print(f"Found needle={needle} in item={item}") # for python 3.7
# #
# # # # # # # # # # #
# # # # # # # my_list.append()
my_list.append("<NAME>") # adds "<NAME>" at the end of my_list
my_list.append("<NAME>") # IN PLACE methods, means we modify the list
print(my_list)
# #
# # # # # # # # # example how to filter something
find_list = [] # so we have an empty list in beginning
needle = "al"
for item in my_list: # i can reuse item in the loop
# if needle in item: will not work because we have non strings in list
if type(item) == str and needle in item:
print(f"Found {needle=} in {item=}")
find_list.append(item)
print(f"{needle=} found in {find_list=}")
# # # # # # # # # ps the above could be done simpler with list comprehension
# #
# # # # # # # # # # # out of place meaning find_list stays the same
new_list = my_list + ["Kalējs", "Audējs"] # OUT OF PLACE addition, my_list is not modified
print(len(new_list), len(my_list))
print(my_list)
print(new_list)
# #
new_list += ["Malējs", "Salīgais"] # shorthand for new_list = new_list + [new items ] so flattened
print(new_list)
new_list.append(["Svarīgais", "Mazais"]) #notice append added a list a s nested
print(new_list) # notice that we have a list in the list
print(new_list[-1])
print(new_list[-1][-1], new_list[-1][1]) # in this case for size 2 1 and -1 give same results
new_list.extend(["Fantastiskais", "Lapsa"]) # very similar to += IN PLACE
print(new_list)
# #
# # # print(f"{str(my_list)}") # not quite what we want
# # # # # # how to convert all values to str
str_list = []
for item in my_list:
str_list.append(str(item)) # so if item is already string nothing will happen
print(str_list)
# #
number_str_list = []
for num in numbers:
number_str_list.append(str(num))
print(numbers) # list of integers
print(number_str_list) # a list of strings
#
number_string = ",".join(number_str_list)
print(number_string)
# # i can go in reverse as well
numbers_deconstructed = number_string.split(",")
print(numbers_deconstructed)
#
# my_numbers = []
# for it in numbers_deconstructed:
# my_numbers.append(int(it))
# print(my_numbers)
#
#
#
# # # # # # # # # # list comprehensions make it even short
# print(my_list)
str_list_2 = [str(item) for item in my_list] # so i go through each item and make a new list with string versions of all items
print(str_list_2)
# #
square_list = []
for n in range(10):
square_list.append(n**2)
print(square_list)
# list comprehension example of square_list
squares = [n**2 for n in my_numbers] # a new list with squares of original numbers
print(squares)
# # wierd_squares = [n*n or 9000 for n in my_numbers] # we could utilie or for 0
# # print(wierd_squares)
#
# list comprehension can serve as a filter
just_numbers = [1,5,2,2,5,7,9,1,5,11,10]
odd_numbers = [n for n in just_numbers if n%2 == 1]
print(odd_numbers)
#
print(numbers)
odd_squares = [n*n for n in numbers if n%2 == 1]
print(odd_squares)
#
# # same idea as above
# odd_squares_also = []
# for n in my_numbers:
# if n%2 == 1:
# odd_squares_also.append(n*n)
# # advantage of long approach is that here we can do more stuff,print etc
# print(odd_squares_also)
#
print(str_list)
print(str_list_2)
# #
print("Lists have equal values inside?",str_list == str_list_2) # check if lists contain equal values
print("Lists are physically same?", str_list is str_list_2) # check if our variables reference the same list
str_list_3 = str_list # so str_list_3 is a shortcut to same values as, NOT A COPY!
print(str_list == str_list_3, str_list is str_list_3)
str_list_copy = str_list.copy() # create a new list with same values
print(str_list == str_list_copy, str_list is str_list_copy)
print(id(str_list))
print(id(str_list_3))
print(id(str_list_copy))
# #
print(needle)
# # # # # # # need needle of course
# # # # so i can add if as filter to list comprehension
beer_list = [item for item in str_list if needle in item]
print(beer_list)
beer_list = beer_list[1:] #get rid of Valdis (first element with index 0) in my beer list
print(beer_list)
# #
beer_list += ["<NAME>"] # we create a list on demand - list literal beer_list = beer_list + ["<NAME>"]
# # # # # similar to beer_list.append("<NAME>")
# print(beer_list)
# #
# # # # squares = [num*num for num in range(10)] # so we come up with num on the spot
# # # # print(squares)
# # # squares_matrix = [[num, "squared", num*num] for num in range(10)]
# # # print(squares_matrix) # so list of lists (2d array basically)
# # # print(squares_matrix[9][2], squares_matrix[-1][-1])
# #
# beer_list += ["Malējs"] # same as new_list = new_list + ["Malējs"]
# # # # # # new_list
# #
print(beer_list[-1])
print(beer_list)
last_beer = beer_list[-1]
print(last_beer)
print(beer_list)
# beer_list = beer_list[:-1] #so i get rid of last element
# print(last_beer, beer_list)
beer_list.append("Malējs")
print(beer_list)
last_beer = beer_list.pop() # also IN PLACE meaning i destroyed the last value
print(last_beer, beer_list)
beer_list.reverse() # so i reverse the list IN PLACE
print(beer_list)
beer_list.reverse() # so i reverse the list IN PLACE
print(beer_list)
# #
# # # # # print(f"We took out {last_beer}")
# # # # # print(beer_list)
# # # # # beer_list.append(last_beer)
# # # # # print(beer_list)
# #
beer_count = 0
for el in beer_list:
if "alus" in el:
# if "alus" == el: # so count will be for exact matches
beer_count += 1
print(beer_count)
# #
# # # # # so above count can be done with count method
print(beer_list.count("alus")) # only exact matches
print(beer_list.index("alus")) # will be 0 since we start counting with 0
# # # print(beer_list.find("Mālenīetis")) # find does not exist for lists, unlike string
beer_list.extend(["Labietis", "Mālpils alus"]) # again in place similar to +=
print(beer_list)
print(beer_list.index("Mālpils alus"))
# beer_with_zh = [el for el in beer_list if "ža" in el]
# print(beer_with_zh)
# # # # print(len(beer_with_zh))
# # # # beer_in_description = [el for el in beer_list if "alus" in el]
# # # # print(beer_in_description)
# # # # # has_alus_count = len([el for el in beer_list if "alus" in el])
# # # # # print(has_alus_count)
# #
beer_list.insert(2, "Cēsu sula") # so it will insert BEFORE index 2 (meaning before 3rd element)
print(beer_list)
beer_list.insert(5, "Cēsu sula") # in general we want append instead of insert for speed
print(beer_list)
beer_list.remove("Cēsu sula") # removes first occurance IN PLACE
print(beer_list)
# # we could keep removing, but easier is to use a list comprehension to make a new list
clean_beers = [el for el in beer_list if el != "Cēsu sula"]
print(clean_beers)
#
# while "Cēsu sula" in beer_list.copy(): # careful with looping and changing element size
# print("found Cēsu sula")
# beer_list.remove("Cēsu sula")
# # print(beer_list)
# #
# # # # # # # # # beer_list.remove("Cēsu sula") # again in place first match
# # # # # # # # # print(beer_list)
# # # # # # # # # beer_list.remove("alus")
# # # # # # # # # print(beer_list)
# # # # # # # # # beer_list.remove("alus")
# # # # # # # # # print(beer_list)
# #
# # beer_list.reverse() # in place reversal
# # print(beer_list)
new_beer_list = beer_list[::-1] # so i save the reversed list but keep the original
print(new_beer_list)
# #
# # # # # # # # # # so if we have comparable data types inside (so same types)
new_beer_list.sort() # in place sort, modifies existing
print(new_beer_list)
# num_list = [1,2,3,0, -5.5, 2.7, True, False, 0.5, 0] # we can compare int, float and bool
# print(num_list)
# print(num_list.sort()) # returns None! because IN PLACE
# print(num_list)
# #
# # sorted_by_len = sorted(new_beer_list, key=len) # out of place meaning returns new list
# # print(sorted_by_len)
# # # # # sorted_by_len_rev = sorted(new_beer_list, key=len, reverse=True) # out of place meaning returns new list
# # # # # print(sorted_by_len_rev)
# # # # # print( min(beer_list), max(beer_list)) # by alphabet
# #
numbers = [1, 4, -5, 3.16, 10, 9000, 5]
print(min(numbers),max(numbers), sum(numbers), sum(numbers)/len(numbers))
my_sorted_numbers = sorted(numbers) # OUT OF PLACE sort we need to save it in new variable
print(my_sorted_numbers)
# # avg = round(sum(numbers)/len(numbers), 2)
# # print(avg)
# #
# saved_sort_asc = sorted(numbers) # out of place does not modify numbers
# print(saved_sort_asc)
# # # # # # # # # # sorted(numbers, reverse=True) # out of place does not modify numbers
# # # # # print(numbers)
# # # # # print(numbers.sort()) # in place meaning it modifies the numbers
# # # # # print(numbers)
# # # # # # # # # # numbers.remove(9000) # will remove in place first 9000 found in the list
# # # # # # # # # # numbers
# # # # # # # # # # min(numbers), max(numbers)
# # # print(sum(my_numbers), min(my_numbers), max(my_numbers))
# # # # # # # # # our own sum
# # # total = 0
# # # for n in my_numbers:
# # # total += n
# # # # useful if we want to do more stuff with individual elements in list
# # # print(total)
# #
# # # # sentence = "Quick brown fox jumped over a sleeping dog"
# # # # words = sentence.split() # default split is by whitespace convert into a list of words
# # # # print(words)
# # # # words[2] = "bear" # i can a modify a list
# # # # print(words)
# # # # # # # # # # so str(words) will not work exactly so we need something else
# # # # print(str(words)) # not what we want)
# # # # new_sentence = " ".join(words) # we will lose any double or triple whitespace
# # # # print(new_sentence)
# # # # compressed_sent = "".join(words) # all words together
# # # # print(compressed_sent)
# # # # funky_sentence = "*:*".join(words) # we will lose any double or triple whitespace
# # # # print(funky_sentence)
# #
# # # # # # # # # # we can create a list of letters
# # # # food = "kartupelis"
# # # # letters = list(food) # list with all letters
# # # # print(letters)
# # # # letters[5] = "m"
# # # # new_word = "".join(letters) # we join by nothing so no spaces in the new word
# # # # print(new_word)
# #
# # print(words)
# # new_list = []
# # for word in words:
# # new_list.append(word.capitalize())
# # print(new_list)
# # # # # # # # # # # list comprehension same as above
# # new_list_2 = [w.capitalize() for w in words]
# # print(new_list_2)
# # # # filtered_list = [w for w in words if w.startswith("b")]
# # # # print(filtered_list)
# # # # filtered_list = [w.upper() for w in words if w.startswith("b")]
# # # # print(filtered_list)
# # # # # # # # # # filtered_list_2 = [w for w in words if w[0] == "b"]
# # # # # # # # # # filtered_list_2
# # # # # # # # # # filtered_list_3 = [w.upper() for w in words if w[0] == "b"]
# # # # # # # # # filtered_list_3
# #
# # # # print("Hello")
# #
# # # # # # # # # numbers = list(range(10)) # we cast to list our range object
# # # # # # # # # print(numbers)
# # # # squares = []
# # # # for n in range(10): # could also use range(10)
# # # # squares.append(n*n)
# # # # print(squares)
# # # # squares_2 = [n*n for n in range(10)] # list comprehension of the above
# # # # print(squares_2)
# # # # even_squares = [n*n for n in range(10) if n % 2 == 0]
# # # # print(even_squares)
# #
# # # # # # # # # print("Whew we need a beer now don't we ?")
# #
# # # # # # # # # # food
# # # # # print(food)
# # # # # char_codes = [ord(c) for c in food]
# # # # # print(char_codes)
# # # # # char_codes_list = [[f"Char:{c}", ord(c)] for c in food]
# # # # # print(char_codes_list)
# # # # # # # # # # print(char_codes_list[0])
# # # # # # # # # # print(char_codes_list[0][0])
# # # # # # # # # # print(char_codes_list[-1])
# # # # # # # # # # print(char_codes_list[-1][-1])
# # # # # # # # so list of lists of characters
# # # # # # # chars = [[c for c in list(word)] for word in sentence.split()]
# # # # # # # print(chars)
#
| 4.625 | 5 |
attic/iterables/almost_aritprog_v0.py | matteoshen/example-code | 5,651 | 12768485 | """
Arithmetic progression class
>>> ap = ArithmeticProgression(1, .5, 3)
>>> list(ap)
[1.0, 1.5, 2.0, 2.5]
"""
from collections import abc
class ArithmeticProgression:
def __init__(self, begin, step, end):
self.begin = begin
self.step = step
self.end = end
def __iter__(self):
return ArithmeticProgressionIterator(self)
class ArithmeticProgressionIterator(abc.Iterator):
def __init__(self, arithmetic_progression):
self._ap = arithmetic_progression
self._index = 0
def __next__(self):
first = type(self._ap.begin + self._ap.step)(self._ap.begin)
result = first + self._ap.step * self._index
if result < self._ap.end:
self._index += 1
return result
else:
raise StopIteration
| 3.6875 | 4 |
examples/all.py | yukkun007/mmbooks | 0 | 12768486 | <gh_stars>0
from typing import List
from moz_books import Calil, Google, OpenDB, Rakuten, SearchParams
from moz_books.interface.i_service import IService
from moz_books.log import get_logger
LOGGER = get_logger(__name__)
def search(params: SearchParams):
services: List[IService] = [Rakuten(), Google(), OpenDB(), Calil()]
for service in services:
books = service.search_books(params)
for book in books:
LOGGER.info(book)
def main() -> None:
# search(SearchParams(title="5秒後に意外な"))
# search(SearchParams(author="桃戸ハル"))
search(SearchParams(isbn="9784052046209")) # 5秒後に意外な結末ミノタウロスの青い迷宮
# search(SearchParams(isbn="9784532280208")) # カンブリア宮殿村上龍の質問術
if __name__ == "__main__":
main()
| 2.390625 | 2 |
libcity/executor/map_matching_executor.py | moghadas76/test_bigcity | 221 | 12768487 | from logging import getLogger
from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor
from libcity.utils import get_evaluator
class MapMatchingExecutor(AbstractTraditionExecutor):
def __init__(self, config, model):
self.model = model
self.config = config
self.evaluator = get_evaluator(config)
self.exp_id = self.config.get('exp_id', None)
self.cache_dir = './libcity/cache/{}/model_cache'.format(self.exp_id)
self.evaluate_res_dir = './libcity/cache/{}/evaluate_cache'.format(self.exp_id)
self._logger = getLogger()
def evaluate(self, test_data):
"""
use model to test data
Args:
test_data
"""
result = self.model.run(test_data)
batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']}
self.evaluator.collect(batch)
self.evaluator.save_result(self.evaluate_res_dir)
def train(self, train_dataloader, eval_dataloader):
"""
对于传统模型,不需要训练
Args:
train_dataloader(torch.Dataloader): Dataloader
eval_dataloader(torch.Dataloader): Dataloader
"""
pass # do nothing
| 2.484375 | 2 |
snoop/data/celery.py | liquidinvestigations/hoover-snoop2 | 0 | 12768488 | <gh_stars>0
"""Configuration for Celery.
Logging and Settings for Celery are all handled here.
"""
import logging
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snoop.defaultsettings")
app = Celery('snoop.data')
app.conf.update(
worker_log_format="[%(asctime)s: %(name)s %(levelname)s] %(message)s",
)
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.task_queue_max_priority = 10
app.conf.task_default_priority = 5
app.autodiscover_tasks()
# from pprint import pprint
# pprint(logging.Logger.manager.loggerDict)
logging.getLogger('celery').setLevel(logging.INFO)
| 2 | 2 |
copilot-read-email.py | ghoelzer-rht/python-copilot-experiments | 0 | 12768489 | # Create program to read email from Gmail account and perform a sentiment analysis on the email.
import imaplib
import email
import email.message
import email.utils
import re
import datetime
import sys
import os
# import pandas as pd
# import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.backends.backend_pdf import PdfPages
from email.parser import Parser
from email.header import decode_header
from email.utils import parsedate_tz, mktime_tz, parsedate
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.audio import MIMEAudio
from email.mime.application import MIMEApplication
from email.encoders import encode_base64
from email.utils import make_msgid
from email.utils import formatdate
from email.utils import formataddr
from email.utils import getaddresses
from email.utils import parseaddr
def main():
# Create a connection to the Gmail server
print("\n*** Starting Email Sentiment Analyzer *** \n")
print("\n* Connecting to Gmail * \n")
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login('<EMAIL>', 'ppyaqlzrpmpmyiez')
mail.select('inbox')
result, data = mail.uid('search', None, "ALL")
inbox_item_list = data[0].split()
latest_email_uid = inbox_item_list[-1]
result, data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = data[0][1]
email_message = email.message_from_string(raw_email.decode('utf-8'))
email_subject = email_message['subject']
email_from = email_message['from']
email_date = email_message['date']
# email_body = get_body(email_message)
email_body_plain = email_message.get_payload()
# Define the date format
date_format = '%a, %d %b %Y %H:%M:%S %z'
date_format_2 = '%d/%b/%Y'
# Parse the email date
email_date_parsed = datetime.datetime.strptime(email_date, date_format)
email_date_parsed = email_date_parsed.strftime(date_format_2)
# Print parsed email subject and date
print('Email subject: ' + email_subject)
print('Email date: ' + email_date_parsed)
# Evaluate the sentiment of the email
# Create a dataframe to store the data
# Connect to AWS S3
# =============================================================================
if __name__ == "__main__":
main()
| 2.953125 | 3 |
Two_Pointers/Tests.py | daniel-zeiler/potential-happiness | 0 | 12768490 | <reponame>daniel-zeiler/potential-happiness<filename>Two_Pointers/Tests.py
import unittest
from typing import List
import Two_Pointers.Solutions as two_pointer
import Two_Pointers.Solutions_Two as two_pointer_two
import Linked_List.Tests as linked_list
def print_list(head: linked_list.ListNode):
result = []
while head:
result.append(head.val)
head = head.next
print(result)
class SolutionsTest(unittest.TestCase, linked_list.CustomAssertion):
def test_max_area(self):
height = [1, 8, 6, 2, 5, 4, 8, 3, 7]
output = 49
self.assertEqual(output, two_pointer_two.maxArea(height))
height = [1, 1]
output = 1
self.assertEqual(output, two_pointer_two.maxArea(height))
height = [4, 3, 2, 1, 4]
output = 16
self.assertEqual(output, two_pointer_two.maxArea(height))
height = [1, 2, 1]
output = 2
self.assertEqual(output, two_pointer_two.maxArea(height))
def test_remove_nth_from_end(self):
input = linked_list.list_builder([1, 2, 3, 4, 5])
n = 2
output = linked_list.list_builder([1, 2, 3, 5])
self.assert_compare_lists(output, two_pointer_two.removeNthFromEnd(input, n))
input = linked_list.list_builder([1, 2])
n = 1
output = linked_list.list_builder([1])
self.assert_compare_lists(output, two_pointer_two.removeNthFromEnd(input, n))
input = linked_list.list_builder([1])
n = 1
output = linked_list.list_builder([])
self.assert_compare_lists(output, two_pointer_two.removeNthFromEnd(input, n))
def test_remove_duplicates(self):
nums = [1, 1, 2]
output = 2
self.assertEqual(output, two_pointer_two.removeDuplicates(nums))
self.assertListEqual([1, 2], nums[:2])
nums = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]
output = 5
self.assertEqual(output, two_pointer_two.removeDuplicates(nums))
self.assertListEqual([0, 1, 2, 3, 4], nums[:5])
def test_remove_elements(self):
nums = [3, 2, 2, 3]
val = 3
output = 2
output_2 = [2, 2]
self.assertEqual(output, two_pointer.removeElement(nums, val))
self.assertCountEqual(output_2, nums[:2])
nums = [0, 1, 2, 2, 3, 0, 4, 2]
val = 2
output = 5
output_2 = [0, 1, 4, 0, 3]
self.assertEqual(output, two_pointer.removeElement(nums, val))
self.assertCountEqual(output_2, nums[:5])
def test_sort_color(self):
nums = [2, 0, 2, 1, 1, 0]
output = [0, 0, 1, 1, 2, 2]
two_pointer_two.sortColors(nums)
self.assertListEqual(output, nums)
nums = [2, 0, 1]
output = [0, 1, 2]
two_pointer_two.sortColors(nums)
self.assertListEqual(output, nums)
nums = [0]
output = [0]
two_pointer_two.sortColors(nums)
self.assertListEqual(output, nums)
nums = [1]
output = [1]
two_pointer_two.sortColors(nums)
self.assertListEqual(output, nums)
def test_is_palindrome(self):
s = "A man, a plan, a canal: Panama"
output = True
self.assertEqual(output, two_pointer.isPalindrome(s))
s = "race a car"
output = False
self.assertEqual(output, two_pointer.isPalindrome(s))
s = " "
output = True
self.assertEqual(output, two_pointer.isPalindrome(s))
def test_move_zeros(self):
nums = [0, 1, 0, 3, 12]
output = [1, 3, 12, 0, 0]
two_pointer.moveZeroes(nums)
self.assertListEqual(output, nums)
def test_reverse_string(self):
s = ["h", "e", "l", "l", "o"]
output = ["o", "l", "l", "e", "h"]
two_pointer.reverseString(s)
self.assertListEqual(output, s)
s = ["H", "a", "n", "n", "a", "h"]
output = ["h", "a", "n", "n", "a", "H"]
two_pointer.reverseString(s)
self.assertListEqual(output, s)
def test_intersection(self):
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
output = [2]
self.assertListEqual(output, two_pointer_two.intersection(nums1, nums2))
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
output = [4, 9]
self.assertCountEqual(output, two_pointer_two.intersection(nums1, nums2))
def test_is_subsequence(self):
s = "abc"
t = "ahbgdc"
output = True
self.assertEqual(output, two_pointer_two.isSubsequence(s, t))
s = "axc"
t = "ahbgdc"
output = False
self.assertEqual(output, two_pointer_two.isSubsequence(s, t))
def test_reverse(self):
s = "Let's take LeetCode contest"
output = "s'teL ekat edoCteeL tsetnoc"
self.assertEqual(output, two_pointer_two.reverseWords(s))
s = "<NAME>"
output = "doG gniD"
self.assertEqual(output, two_pointer_two.reverseWords(s))
def test_valid_palindrome(self):
s = "aba"
self.assertEqual(True, two_pointer.validPalindrome(s))
s = "abca"
self.assertEqual(True, two_pointer.validPalindrome(s))
s = "abc"
self.assertEqual(False, two_pointer.validPalindrome(s))
def test_partition_labels(self):
s = "ababcbacadefegdehijhklij"
output = [9, 7, 8]
self.assertListEqual(output, two_pointer_two.partitionLabels(s))
s = "eccbbbbdec"
output = [10]
self.assertListEqual(output, two_pointer_two.partitionLabels(s))
def test_sort_array_by_parity(self):
nums = [3, 1, 2, 4]
output = [2, 4, 3, 1]
self.assertListEqual(output, two_pointer.sortArrayByParity(nums))
def test_interval_intersection(self):
firstList = [[0, 2], [5, 10], [13, 23], [24, 25]]
secondList = [[1, 5], [8, 12], [15, 24], [25, 26]]
output = [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]]
self.assertListEqual(output, two_pointer.intervalIntersection(firstList, secondList))
firstList = [[1, 3], [5, 9]]
secondList = []
output = []
self.assertListEqual(output, two_pointer.intervalIntersection(firstList, secondList))
firstList = []
secondList = [[4, 8], [10, 12]]
output = []
self.assertListEqual(output, two_pointer.intervalIntersection(firstList, secondList))
firstList = [[1, 7]]
secondList = [[3, 10]]
output = [[3, 7]]
self.assertListEqual(output, two_pointer.intervalIntersection(firstList, secondList))
def test_num_of_sub_arrays(self):
arr = [2, 2, 2, 2, 5, 5, 5, 8]
k = 3
threshold = 4
output = 3
self.assertEqual(output, two_pointer.numOfSubarraysTwo(arr, k, threshold))
self.assertEqual(output, two_pointer.numOfSubarrays(arr, k, threshold))
def test_min_pair_sum(self):
nums = [3, 5, 2, 3]
output = 7
self.assertEqual(output, two_pointer.minPairSum(nums))
nums = [3, 5, 4, 2, 4, 6]
output = 8
self.assertEqual(output, two_pointer.minPairSum(nums))
if __name__ == '__main__':
unittest.main()
| 3.765625 | 4 |
euler/60_1.py | stauntonknight/algorithm | 0 | 12768491 | <reponame>stauntonknight/algorithm
limit = 10000
primes = primelist(limit)
s1 = dict()
s2 = set()
s3 = set()
s4 = set()
for p in primes:
s1[p] = set()
for s in s1:
if isprime(int(str(p) + str(s)), primes) and isprime(int(str(s) + str(p)), primes):
s1[s].add(p)
s1[p].add(s)
s2.add((s, p))
for s in s2:
if p in s1[s[0]] and p in s1[s[1]]: s3.add((s[0], s[1], p))
for s in s3:
if p in s1[s[0]] and p in s1[s[1]] and p in s1[s[2]]: s4.add((s[0], s[1], s[2], p))
for s in s4:
if p in s1[s[0]] and p in s1[s[1]] and p in s1[s[2]] and p in s1[s[3]]: print p, s
| 2.859375 | 3 |
marrow/util/context/__init__.py | isprime/marrow.util | 0 | 12768492 | <filename>marrow/util/context/__init__.py
# encoding: utf-8
import os
from contextlib import contextmanager
__all__ = ['cd', 'path']
@contextmanager
def cd(path, on=os):
"""Change the current working directory within this context.
Preserves the previous working directory and can be applied to remote
connections that offer @getcwd@ and @chdir@ methods using the @on@
argument.
"""
original = on.getcwd()
on.chdir(path)
yield
on.chdir(original)
@contextmanager
def path(append=None, prepend=None, replace=None, on=os):
"""Update the PATH environment variable.
Can append, prepend, or replace the path. Each of these expects a string
or a list of strings (for multiple path elements) and can operate on remote
connections that offer an @environ@ attribute using the @on@ argument.
"""
original = on.environ['PATH']
if replace and (append or prepend):
raise ValueError("You can not combine append or prepend with replace.")
if replace:
if not isinstance(replace, list):
replace = list(replace)
on.environ['PATH'] = ':'.join(replace)
else:
if append:
if not isinstance(append, list):
append = list(append)
append.insert(0, on.environ['PATH'])
on.environ['PATH'] = ':'.join(append)
if prepend:
if not isinstance(prepend, list):
prepend = list(prepend)
prepend.append(on.environ['PATH'])
on.environ['PATH'] = ':'.join(prepend)
yield
on.environ['PATH'] = original
@contextmanager
def environ(on=os, **kw):
"""Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
"""
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value
| 2.5625 | 3 |
bot.py | Rishiraj0100/world-chat | 14 | 12768493 | <filename>bot.py
"""
MIT License
Copyright (c) 2021 deadshot
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
import asyncpg
import traceback
from utils.cache import cache
from discord.ext import commands
from config import *
from discord.flags import MemberCacheFlags
async def get_prefix(bot, message):
if not message.guild:
prefix = 'w!'
elif message.guild:
prefix = bot.prefixes.get(message.guild.id) or "w!" #Just in case you dont have the prefix cached.
return commands.when_mentioned_or(prefix)(bot, message)
intents = discord.Intents.default()
intents.members=True
bot = commands.Bot(command_prefix=get_prefix,
chunk_guilds_at_startup=False,
member_cache_flags=MemberCacheFlags.from_intents(intents),
case_insensitive=True, intents=intents)
bot.color = 0xecd3a1
for e in extensions:
try:
bot.load_extension(e)
print(f'[EXTENSION] {e} was loaded successfully!')
except Exception as e:
tb = traceback.format_exception(type(e), e, e.__traceback__)
tbe = "".join(tb) + ""
print(f'[WARNING] Could not load extension {e}: {tbe}')
async def create_db_pool():
bot.db = await asyncpg.create_pool(**SQL_INFO)
print("----------------\nConnected to Database.")
QUERIES = open('database/migrate.sql', 'r').read()
await bot.db.execute(QUERIES)
print("----------------\nDatabase checkup successfull.")
await cache(bot)
bot.loop.create_task(create_db_pool())
bot.run(token)
| 1.9375 | 2 |
setup.py | M-o-a-T/ping3 | 0 | 12768494 | import setuptools
with open('README.md') as f:
long_desc = f.read()
setuptools.setup(
name='asyncping3',
use_scm_version={"version_scheme": "guess-next-dev", "local_scheme": "dirty-tag"},
setup_requires=["setuptools_scm"],
description='A pure python3 version of ICMP ping implementation using raw socket.',
long_description=long_desc,
long_description_content_type='text/markdown',
url='https://github.com/M-o-a-T/asyncping3',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Topic :: System :: Networking',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='python3 ping icmp socket tool',
packages=["asyncping3"],
python_requires='>=3',
install_requires=["anyio >= 3"],
extras_require={
'dev': ['build', 'twine', 'pycodestyle'],
},
package_data={},
data_files=[],
entry_points={
'console_scripts': ['pping=asyncping3._main:main'],
},
)
| 1.382813 | 1 |
setup.py | neilalbrock/flask-elasticutils | 1 | 12768495 | <reponame>neilalbrock/flask-elasticutils
import os
from setuptools import setup
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
return open(path).read()
setup(
name='Flask-ElasticUtils',
version='0.1.7',
url='https://github.com/neilalbrock/flask-elasticutils/',
license='BSD',
author='<NAME> - Atomised',
author_email='<EMAIL>',
description='ElasticUtils for Flask',
long_description=_read('README.rst'),
py_modules=['flask_elasticutils'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'elasticutils>=0.9',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 1.34375 | 1 |
automl_in_action/3_adanet/1_simple_boston.py | wdxtub/deep-learning-note | 37 | 12768496 | <filename>automl_in_action/3_adanet/1_simple_boston.py
import functools
import os
import adanet
import tensorflow as tf
# 固定种子
RANDOM_SEED = 42
LOG_DIR = 'models'
'''
In this example, we will solve a regression task known as the Boston Housing dataset to
predict the price of suburban houses in Boston, MA in the 1970s. There are 13 numerical features,
the labels are in thousands of dollars, and there are only 506 examples.
'''
(x_train, y_train), (x_test, y_test) = (tf.keras.datasets.boston_housing.load_data())
print('Model Inputs: %s \n' % x_train[0])
print('Model Output (house price): $%s' % (y_train[0] * 1000))
FEATURES_KEY = "x"
def input_fn(partition, training, batch_size):
"""Generate an input function for the Estimator."""
def _input_fn():
if partition == "train":
dataset = tf.data.Dataset.from_tensor_slices(({
FEATURES_KEY: tf.log1p(x_train)
}, tf.log1p(y_train)))
else:
dataset = tf.data.Dataset.from_tensor_slices(({
FEATURES_KEY: tf.log1p(x_test)
}, tf.log1p(y_test)))
if training:
dataset = dataset.shuffle(10 * batch_size, seed=RANDOM_SEED).repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
_NUM_LAYERS_KEY = "num_layers"
class _SimpleDNNBuilder(adanet.subnetwork.Builder):
"""Builds a DNN subnetwork for AdaNet"""
def __init__(self, optimizer, layer_size, num_layers, learn_mixture_weights, seed):
"""Initializes a `_DNNBuilder`.
Args:
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: The number of nodes to output at each hidden layer.
num_layers: The number of hidden layers.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
seed: A random seed.
Returns:
An instance of `_SimpleDNNBuilder`.
"""
self._optimizer = optimizer
self._layer_size = layer_size
self._num_layers = num_layers
self._learn_mixture_weights = learn_mixture_weights
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
input_layer = tf.to_float(features[FEATURES_KEY])
kernel_initializer = tf.glorot_uniform_initializer(seed=self._seed)
last_layer = input_layer
for _ in range(self._num_layers):
last_layer = tf.layers.dense(
last_layer,
units=self._layer_size,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer)
logtis = tf.layers.dense(
last_layer,
units=logits_dimension,
kernel_initializer=kernel_initializer)
persisted_tensors = {_NUM_LAYERS_KEY: tf.constant(self._num_layers)}
return adanet.Subnetwork(
last_layer=last_layer,
logits=logtis,
complexity=self._measure_complexity(),
persisted_tensors=persisted_tensors)
def _measure_complexity(self):
"""Approximates Rademacher complexity as the square-root of the depth."""
return tf.sqrt(tf.to_float(self._num_layers))
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
"""See `adanet.subnetwork.Builder`."""
return self._optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
"""See `adanet.subnetwork.Builder`."""
if not self._learn_mixture_weights:
return tf.no_op()
return self._optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
"""See `adanet.subnetwork.Builder`."""
if self._num_layers == 0:
# A DNN with no hidden layers is a linear model
return "linear"
return "{}_layer_dnn".format(self._num_layers)
class SimpleDNNGenerator(adanet.subnetwork.Generator):
"""Generates a two DNN subnetworks at each iteration.
The first DNN has an identical shape to the most recently added subnetwork
in `previous_ensemble`. The second has the same shape plus one more dense
layer on top. This is similar to the adaptive network presented in Figure 2 of
[Cortes et al. ICML 2017](https://arxiv.org/abs/1607.01097), without the
connections to hidden layers of networks from previous iterations.
"""
def __init__(self,
optimizer,
layer_size=64,
learn_mixture_weights=False,
seed=None):
"""Initializes a DNN `Generator`.
Args:
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: Number of nodes in each hidden layer of the subnetwork
candidates. Note that this parameter is ignored in a DNN with no hidden
layers.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
seed: A random seed.
Returns:
An instance of `Generator`.
"""
self._seed = seed
self._dnn_builder_fn = functools.partial(
_SimpleDNNBuilder,
optimizer=optimizer,
layer_size=layer_size,
learn_mixture_weights=learn_mixture_weights)
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""See `adanet.subnetwork.Generator`."""
num_layers = 0
seed = self._seed
if previous_ensemble:
num_layers = tf.contrib.util.constant_value(
previous_ensemble.weighted_subnetworks[-1].subnetwork.persisted_tensors[_NUM_LAYERS_KEY])
if seed is not None:
seed += iteration_number
return [
self._dnn_builder_fn(num_layers=num_layers, seed=seed),
self._dnn_builder_fn(num_layers=num_layers+1, seed=seed)
]
"""
Train and evaluate
Next we create an adanet.Estimator using the SimpleDNNGenerator we just defined.
In this section we will show the effects of two hyperparamters: learning mixture weights
and complexity regularization.
On the righthand side you will be able to play with the hyperparameters of this model. Until you reach
the end of this section, we ask that you not change them.
At first we will not learn the mixture weights, using their default initial value. Here they will
be scalars initialized to $1/N$ where $N$ is the number of subnetworks in the ensemble, effectively
creating a uniform average ensemble.
"""
#@title AdaNet parameters
LEARNING_RATE = 0.001 #@param {type:"number"}
TRAIN_STEPS = 60000 #@param {type:"integer"}
BATCH_SIZE = 32 #@param {type:"integer"}
LEARN_MIXTURE_WEIGHTS = False #@param {type:"boolean"}
ADANET_LAMBDA = 0 #@param {type:"number"}
ADANET_ITERATIONS = 3 #@param {type:"integer"}
def train_and_evaluate(experiment_name, learn_mixture_weights=LEARN_MIXTURE_WEIGHTS,
adanet_lambda=ADANET_LAMBDA):
"""Trains an `adanet.Estimator` to predict housing prices."""
model_dir = os.path.join(LOG_DIR, experiment_name)
estimator = adanet.Estimator(
# Since we are predicting housing prices, we'll use a regression
# head that optimizes for MSE.
head=tf.contrib.estimator.regression_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE),
# Define the generator, which defines our search space of subnetworks
# to train as candidates to add to the final AdaNet model
subnetwork_generator=SimpleDNNGenerator(
optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE),
learn_mixture_weights=learn_mixture_weights,
seed=RANDOM_SEED),
# Lambda is a the strength of complexity regularization. A larger
# value will penalize more complex subnetworks.
adanet_lambda=adanet_lambda,
# The number of train steps per iteration.
max_iteration_steps=TRAIN_STEPS // ADANET_ITERATIONS,
# The evaluator will evaluate the model on the full training set to
# compute the overall AdaNet loss (train loss + complexity
# regularization) to select the best candidate to include in the
# final AdaNet model.
evaluator=adanet.Evaluator(
input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE)),
# Configuration for Estimators
config=tf.estimator.RunConfig(
save_summary_steps=5000,
save_checkpoints_steps=5000,
tf_random_seed=RANDOM_SEED,
model_dir=model_dir))
# Train and evaluate using using the tf.estimator tooling.
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE),
max_steps=TRAIN_STEPS)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE),
steps=None,
start_delay_secs=1,
throttle_secs=30,
)
return tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def ensemble_architecture(result):
"""Extracts the ensemble architecture from evaluation results."""
architecture = result["architecture/adanet/ensembles"]
# The architecture is a serialized Summary proto for TensorBoard.
summary_proto = tf.summary.Summary.FromString(architecture)
return summary_proto.value[0].tensor.string_val[0]
if __name__ == "__main__":
results, _ = train_and_evaluate("uniform_average_ensemble_baseline")
print("Loss:", results["average_loss"])
print("Architecture:", ensemble_architecture(results)) | 3.28125 | 3 |
Basics/exercises/online_dictionary_problem.py | AMZEnterprise/Python_Course_Jadi | 0 | 12768497 | words_count = int(input())
words_dict = {}
def add_word(word,definition):
words_dict[word] = definition
def translate_sentence(words_list):
sentence = ""
for word in words_list:
if word in words_dict:
sentence += words_dict[word] + " "
else:
sentence += word + " "
return sentence
for i in range(words_count + 1):
text = input()
words = text.split(" ")
if len(words) == 2:
add_word(words[0] ,words[1])
else:
print(translate_sentence(words))
| 3.9375 | 4 |
classNotes/control_structures2,3/Selection3/control_structures.py | minefarmer/Think_like_a_programmer | 0 | 12768498 | ''' Control Structures
A statement used to control the flow of execution in a program is called a control structure.
Types of control structures
1. Sequence ******************************************************
In a sequential structure the statements are executed in the same order in which they are specified in the program, the control flows from one statement to the other in a logical sequence, all sequences are executed exactly as run.
It means that no statement is kept and no statement is executed more than once.
Statement # 1
Statement # 2
Statement # 3
Statement # 4
An example
Start
|
input base
|
input height
|
input height
|
calculate area
Area = 1/2 * base * height
|
Display Area
|
End
2. Selection ******************************************************
A selection structure selects a statement or set of statements to execute on the basis of a condition.
In this structure, a statement or set of statements is executed when the condition is True. And ignored when the condition is False.
An example
Suppose a program that imputs the temperature and then displays the message on the screen.
If the temperature is greater than 35, the it displays the message "Hot day".
When the temperature is between 25 and 35 the it displays the message "Pleasant day".
If it is less than 25, then it displays the message "cool day".
Flowchart of simple selection.
Condition
T or F
T F
(if True) (if False)
Statement #1 Statement #2
3. Repetition
A repetition structure executes a statement or set of statements repeadadly.
It is also known as iterations or loops.
An example.
Suspose we want to display a message to the screen "Hello World" one thousand times!
It takes huge time to write that code and it takes more space as well. (lenghty awkward looking code)
It is very easy to perform this task usig loops or repetition structure.
Flowchart of Repetition
_______
| |
| |
| Condition = F
| T or F |
| | T |
| | |
| Statement |
|_______| |
|
End of loop
4. Relational or Comparision Operators.
Less than, greater than or equal to.
Sometime we need to do a lot of comparisions in order to see whether a specific command should be executed.
The conditional statements are used to specify conditions in programs.
A relatinal operator compares two values. It produces a result as True or False.
The relation operators are sometimes called the conditional operators as they test conditions that are tru or false.
RELATIONAL OR COMPARISION OPERATORS
> Greater than returns true if the value on the left side of > is greater than the value on the righr side. Otherwise, it returns false.
>>> # greator than operator (>)
>>> 10 > 3
True
>>> 10 > 13
False
< Less than operator returns true if the value on the left side < is less than the value on the rught side. Otherwise it returns false.
>>> # Less Than operator (<)
>>> 3 < 7
True
>>> 10 < 7
False
== Equals operator returns true if the value on both sides of == are equal. Otherwise returns false. == The assignment operator is used to assign the value on the right hand side of any expression to the variable on the left hand side while the equal equal operator which is Gradley comparision operator is used to compare the two to values on the left and right hand side of this operator.
>>> 10 == 10
True
>>> 10 == 11
False
>= Greater that or equal to operator returns true if the value on the left side of >= us greator than or equal to the value on the right hand side. Otherwise returns false.
>>> # Greater that or equal to operator (>=)
>>> 10 >= 9
True
>>> 10 >= 10
True
>>> 10 >= 11
False
<= Less that or equal to operator returns true if the value on the left of <= is less than or equal to value on right side. Otherwise returns false.
>>> # Lesser than or equal to operator
>>> 10 <= 10
True
>>> 10 <= 11
True
>>> 10 <= 9
False
!= The not equal to operator. Returns true if the value on the left side of != is not equal to the value on the right. Otherwise returns false.
>>> # not equal to operator
>>> 10 != 10
False
>>> 10 == 10
True
>>> 3 != 5
True
'''
| 3.265625 | 3 |
bii_webapp/apps/accounts/views.py | ISA-tools/bii-webapp | 2 | 12768499 | <filename>bii_webapp/apps/accounts/views.py
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect,render
from forms import ProfileForm,UserForm
from models import UserProfile
@login_required
def profile(request):
user = request.user
profile,created=UserProfile.objects.get_or_create(user=user)
if request.method=='POST':
profileform = ProfileForm(request.POST,instance=profile)
userform = UserForm(request.POST,instance=user)
if profileform.is_valid() and userform.is_valid():
profileform.save()
userform.save()
return redirect('bii_webapp.apps.browse.views.browse')
else:
profileform=ProfileForm(instance=profile)
userform=UserForm(instance=user)
return render(request,'profiles/profile.html',{'profileform':profileform,'userform':userform})
return render(request,'profiles/profile.html',{'profileform':profileform,'userform':userform})
| 2.109375 | 2 |
Python/033 SearchInRotatedArrayI.py | Fiona08/leetcode | 0 | 12768500 | #33
# Time: O(logn)
# Space: O(1)
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
#
# You are given a target value to search. If found in the array return its index, otherwise return -1.
#
# You may assume no duplicate exists in the array.
class binarySearchSol():
def searchInRotatedArrayI(self,nums,target):
left,right=0,len(nums)-1
while left<=right:
mid=(left+right)//2
if nums[mid]==target:
return mid
elif (nums[mid]>nums[left] and nums[left]<=target<nums[mid]) or \
(nums[mid]<nums[left] and not (nums[mid]<target<=nums[right])):
right=mid-1
else:
left=mid+1
return -1
| 3.734375 | 4 |
xlmhg/result.py | flo-compbio/xlmhg | 15 | 12768501 | # Copyright (c) 2016-2019 <NAME>
#
# This file is part of XL-mHG.
"""Contains the `mHGResult` class."""
import sys
import hashlib
import logging
import numpy as np
try:
# This is a duct-tape fix for the Google App Engine, on which importing
# the C extension fails.
from . import mhg_cython
except ImportError:
print('Warning (xlmhg): Failed to import "mhg_cython" C extension.',
file=sys.stderr)
from . import mhg as mhg_cython
logger = logging.getLogger(__name__)
class mHGResult(object):
"""The result of an XL-mHG test.
This class is used by the `get_xlmhg_test_result` function to represent the
result of an XL-mHG test.
Parameters
----------
N: int
See :attr:`N` attribute.
indices
See :attr:`indices` attribute.
X: int
See :attr:`X` attribute.
L: int
See :attr:'L' attribute.
stat: float
See :attr:`stat` attribute.
cutoff: int
See :attr:`cutoff` attribute.
pval: float
See :attr:`pval` attribute.
pval_thresh: float, optional
See :attr:`pval_thresh` attribute.
escore_pval_thresh: float, optional
See :attr:`escore_pval_thresh` attribute.
escore_tol: float, optional
See :attr:`escore_tol` attribute.
Attributes
----------
N: int
The length of the ranked list (i.e., the number of elements in it).
indices: `numpy.ndarray` with ``ndim=1`` and ``dtype=np.uint16``.
A sorted (!) list of indices of all the 1's in the ranked list.
X: int
The XL-mHG X parameter.
L: int
The XL-mHG L parameter.
stat: float
The XL-mHG test statistic.
cutoff: int
The XL-mHG cutoff.
pval: float
The XL-mHG p-value.
pval_thresh: float or None
The user-specified significance (p-value) threshold for this test.
escore_pval_thresh: float or None
The user-specified p-value threshold used in the E-score calculation.
escore_tol: float or None
The floating point tolerance used in the E-score calculation.
"""
def __init__(self, N, indices, X, L, stat, cutoff, pval,
pval_thresh=None, escore_pval_thresh=None, escore_tol=None):
assert isinstance(N, int)
assert isinstance(indices, np.ndarray) and indices.ndim == 1 and \
np.issubdtype(indices.dtype, np.uint16) and \
indices.flags.c_contiguous
assert isinstance(X, int)
assert isinstance(L, int)
assert isinstance(stat, float)
assert isinstance(cutoff, int)
assert isinstance(pval, float)
if pval_thresh is not None:
assert isinstance(pval_thresh, float)
if escore_pval_thresh is not None:
assert isinstance(escore_pval_thresh, float)
if escore_tol is not None:
assert isinstance(escore_tol, float)
self.indices = indices
self.N = N
self.X = X
self.L = L
self.stat = stat
self.cutoff = cutoff
self.pval = pval
self.pval_thresh = pval_thresh
self.escore_pval_thresh = escore_pval_thresh
self.escore_tol = escore_tol
def __repr__(self):
return '<%s object (N=%d, K=%d, pval=%.1e, hash="%s")>' \
% (self.__class__.__name__,
self.N, self.K, self.pval, self.hash)
def __str__(self):
return '<%s object (N=%d, K=%d, X=%d, L=%d, pval=%.1e)>' \
% (self.__class__.__name__,
self.N, self.K, self.X, self.L, self.pval)
def __eq__(self, other):
if self is other:
return True
elif type(self) == type(other):
return self.hash == other.hash
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
@property
def v(self):
"""(property) Returns the list as a `numpy.ndarray`
(with dtype ``np.uint8``).
"""
v = np.zeros(self.N, dtype=np.uint8)
v[self.indices] = 1
return v
@property
def K(self):
"""(property) Returns the number of 1's in the list."""
return self.indices.size
@property
def k(self):
"""(property) Returns the number of 1's above the XL-mHG cutoff."""
return int(np.sum(self.indices < self.cutoff))
@property
def hash(self):
"""(property) Returns a unique hash value for the result."""
data_str = ';'.join(
[str(repr(var)) for var in
[self.N, self.K, self.X, self.L,
self.stat, self.cutoff, self.pval,
self.pval_thresh, self.escore_pval_thresh]])
data_str += ';'
data = data_str.encode('UTF-8') + self.indices.tobytes()
return str(hashlib.md5(data).hexdigest())
@property
def fold_enrichment(self):
"""(property) Returns the fold enrichment at the XL-mHG cutoff."""
return self.k / (self.K*(self.cutoff/float(self.N)))
@property
def escore(self):
"""(property) Returns the E-score associated with the result."""
hg_pval_thresh = self.escore_pval_thresh or self.pval
escore_tol = self.escore_tol or mhg_cython.get_default_tol()
es = mhg_cython.get_xlmhg_escore(
self.indices, self.N, self.K, self.X, self.L,
hg_pval_thresh, escore_tol)
return es | 2.203125 | 2 |
src/sphinx_c_autodoc/napoleon/__init__.py | speedyleion/sphinx-c-doc | 7 | 12768502 | """
Extend napoleon to provide a `Members` section for C structs and unions
similar to the `Attributes` section in python objects.
"""
from functools import partial
from typing import Optional, Any, List, Union, Dict, Callable
from sphinx.config import Config
from sphinx.ext.autodoc import Options
from sphinx.application import Sphinx
from sphinx.ext.napoleon import GoogleDocstring
# pylint: disable=too-few-public-methods
class CAutoDocString(GoogleDocstring):
"""
A docstring that can handle documenting some extra c sections, in
particular, `members` sections of structs and unions and `enumerators`
sections of enums.
"""
def __init__(
self,
docstring: Union[str, List[str]],
config: Optional[Config] = None,
app: Optional[Sphinx] = None,
what: str = "",
name: str = "",
obj: Optional[Any] = None,
options: Optional[Options] = None,
) -> None:
if not hasattr(self, "_sections"):
self._sections = self.get_default_sections()
super().__init__(docstring, config, app, what, name, obj, options)
def get_default_sections(self) -> Dict[str, Callable]:
"""
Creates the dictionary that should be used in :attr:`_sections` for
this instance. If one wants to extend this class simply do::
class MyDocString(CAutoDocString):
def get_default_sections(self) -> Dict[str, Callable]:
sections = super().get_default_sections()
sections["my_custom_section"] = self._some_method
Returns:
Dict[str, Callable]: The dictionary of sections to methods that
should be used :attr:`_sections`.
"""
default_sections: Dict[str, Callable] = {
"args": self._parse_parameters_section,
"arguments": self._parse_parameters_section,
"attention": partial(self._parse_admonition, "attention"),
"attributes": self._parse_attributes_section,
"caution": partial(self._parse_admonition, "caution"),
"danger": partial(self._parse_admonition, "danger"),
"enumerators": partial(self._parse_nested_section, "enumerator"),
"error": partial(self._parse_admonition, "error"),
"example": self._parse_examples_section,
"examples": self._parse_examples_section,
"hint": partial(self._parse_admonition, "hint"),
"important": partial(self._parse_admonition, "important"),
"members": partial(self._parse_nested_section, "member"),
"note": partial(self._parse_admonition, "note"),
"notes": self._parse_notes_section,
"parameters": self._parse_parameters_section,
"return": self._parse_returns_section,
"returns": self._parse_returns_section,
"references": self._parse_references_section,
"see also": self._parse_see_also_section,
"tip": partial(self._parse_admonition, "tip"),
"todo": partial(self._parse_admonition, "todo"),
"warning": partial(self._parse_admonition, "warning"),
"warnings": partial(self._parse_admonition, "warning"),
"warns": self._parse_warns_section,
"yield": self._parse_yields_section,
"yields": self._parse_yields_section,
}
return default_sections
# pylint: disable=unused-argument
def _parse_nested_section(self, nested_title: str, section: str) -> List[str]:
"""
Parse a members section of a comment.
The members section is only expected to be seen in processing of C
files. Each item will be formatted using the ``.. c:member:: <name>``
syntax.
Args:
section (str): The name of the parsed section. Unused.
nested_title (str): The name to give to the nested items.
Returns:
List[str]: The list of lines from `section` converted to the
appropriate reST.
"""
# Place a blank line prior to the section this ensures there is a
# newline prior to the first `.. c:member::` section and thus it
# doesn't get treated as a sentence in the same paragraph
lines = [""]
# Type should be unused, it's not normal in c to do `var (type)` it's
# usually `type var`
for name, _, desc in self._consume_fields():
lines.extend([f".. c:{nested_title}:: {name}", ""])
fields = self._format_field("", "", desc)
lines.extend(self._indent(fields, 3))
lines.append("")
return lines
def process_autodoc_docstring(
app: Any,
what: str,
name: str,
obj: Any,
options: Optional[Options],
lines: List[str],
) -> None:
"""
Call back for autodoc's ``autodoc-process-docstring`` event.
Args:
app (:class:`Sphinx`): The Sphinx application object
what (str): The type of the object which the comment belongs to. One
of "cmodule", "cmember", "ctype", "cfunction", "cstruct".
name (str): The fully qualified name of the object. For C files this
may be a little polluted as it will be
``my_file.c.some_item.some_items_member``.
obj (any): The object itself
options (dict): The options given to the directive.
lines (List[str]): The lines of the comment. This is modified in place.
"""
docstring = CAutoDocString(lines, app.config, app, what, name, obj, options)
result_lines = docstring.lines()
lines[:] = result_lines[:]
def setup(app: Sphinx) -> None:
"""
Extend sphinx to assist sphinx_c_autodocs to allow Google style
docstrings for C constructs.
Args:
app (:class:`Sphinx`): The Sphinx application object
"""
app.setup_extension("sphinx.ext.napoleon")
app.connect("autodoc-process-docstring", process_autodoc_docstring)
| 2.390625 | 2 |
BitTorrent-5.2.2/BTL/likewin32api.py | jpabb7/p2pScrapper | 4 | 12768503 | import ctypes
DWORD = ctypes.c_ulong
MAX_PATH = ctypes.c_int(260)
MAX_PATH_NULL = int(MAX_PATH.value) + 1
def decode(s):
if isinstance(s, unicode):
return s
return s.decode('mbcs')
def GetModuleFileName(handle):
r = 0
if hasattr(ctypes.windll.kernel32, "GetModuleFileNameW"):
name = ctypes.create_unicode_buffer(MAX_PATH_NULL)
r = ctypes.windll.kernel32.GetModuleFileNameW(handle, name, MAX_PATH_NULL)
if r == 0:
name = ctypes.create_string_buffer(MAX_PATH_NULL)
ctypes.windll.kernel32.GetModuleFileNameA(handle, name, MAX_PATH_NULL)
return decode(name.value)
def GetTempPath():
r = 0
if hasattr(ctypes.windll.kernel32, "GetTempPathW"):
name = ctypes.create_unicode_buffer(MAX_PATH_NULL)
r = ctypes.windll.kernel32.GetTempPathW(MAX_PATH_NULL, name)
if r == 0:
name = ctypes.create_string_buffer(MAX_PATH_NULL)
ctypes.windll.kernel32.GetTempPathA(MAX_PATH_NULL, name)
return decode(name.value)
def ShellExecute(hwnd, operation, file, parameters, directory, showCmd):
if hasattr(ctypes.windll.shell32, 'ShellExecuteW'):
SW = ctypes.windll.shell32.ShellExecuteW
operation = decode(operation)
file = decode(file)
parameters = decode(parameters)
directory = decode(directory)
else:
SW = ctypes.windll.shell32.ShellExecuteA
return SW(hwnd, operation, file, parameters, directory, showCmd)
def GetVolumeInformation(rootPathName):
volumeSerialNumber = DWORD()
maximumComponentLength = DWORD()
fileSystemFlags = DWORD()
if hasattr(ctypes.windll.kernel32, "GetVolumeInformationW"):
rootPathName = decode(rootPathName)
volumeNameBuffer = ctypes.create_unicode_buffer(MAX_PATH_NULL)
fileSystemNameBuffer = ctypes.create_unicode_buffer(MAX_PATH_NULL)
GVI = ctypes.windll.kernel32.GetVolumeInformationW
else:
volumeNameBuffer = ctypes.create_string_buffer(MAX_PATH_NULL)
fileSystemNameBuffer = ctypes.create_string_buffer(MAX_PATH_NULL)
GVI = ctypes.windll.kernel32.GetVolumeInformationA
GVI(rootPathName, volumeNameBuffer, MAX_PATH_NULL,
ctypes.byref(volumeSerialNumber), ctypes.byref(maximumComponentLength),
ctypes.byref(fileSystemFlags), fileSystemNameBuffer, MAX_PATH_NULL)
return (volumeNameBuffer.value, volumeSerialNumber.value,
maximumComponentLength.value, fileSystemFlags.value,
fileSystemNameBuffer.value)
CloseHandle = ctypes.windll.kernel32.CloseHandle
GetLastError = ctypes.windll.kernel32.GetLastError
GetCurrentProcessId = ctypes.windll.kernel32.GetCurrentProcessId
OpenProcess = ctypes.windll.kernel32.OpenProcess
TerminateProcess = ctypes.windll.kernel32.TerminateProcess
| 2.15625 | 2 |
fetalnav/transforms/itk_transforms.py | ntoussaint/fetalnav | 24 | 12768504 | <gh_stars>10-100
# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
"""
Relevant Transforms of 2D/3D Biomedical Images using itk (sitk) images.
Transforms
1. ITK image to another ITK image
2. ITK image to pytorch tensors
3. Pytorch tensors to ITK images
"""
# Authors:
# <NAME> <<EMAIL>>
# King's College London, UK
# Department of Computing, Imperial College London, UK
# <NAME> <<EMAIL>>
# King's College London, UK
import SimpleITK as sitk
import math
import torch
import numpy as np
class ToNumpy(object):
"""Convert an itkImage to a ``numpy.ndarray``.
Converts a itkImage (W x H x D) or numpy.ndarray (D x H X W)
NOTA: itkImage ordering is different than of numpy and pytorch.
"""
def __init__(self, outputtype=None):
self.outputtype = outputtype
def __call__(self, *inputs):
"""
Arguments
---------
*inputs : itkImages in SimpleITK format
Images to be converted to numpy.
Returns
-------
Numpy nd arrays
"""
outputs = []
for idx, _input in enumerate(inputs):
outputs.append(self._tonumpy(_input,self.outputtype))
return outputs if idx > 0 else outputs[0]
def _tonumpy(self, input, outputtype):
ret = None
if isinstance(input, sitk.SimpleITK.Image):
# Extract the numpy nparray from the ITK image
narray = sitk.GetArrayFromImage(input);
# The image is now stored as (y,x), transpose it
ret = np.transpose(narray, [1,2,0])
elif isinstance(input, np.array):
# if the input is already numpy, assume it is in the right order
ret = input
# overwrite output type if requested
if self.outputtype is not None:
ret = ret.astype(outputtype)
return ret
class ToTensor(object):
"""Convert an itkImage or ``numpy.ndarray`` to tensor.
Converts a itkImage (W x H x D) or numpy.ndarray (D x H X W) to a
torch.FloatTensor of shape (D X H X W).
i.e. itkImage ordering is different than of numpy and pytorch.
"""
def __call__(self, *inputs):
"""
Arguments
---------
*inputs : itkImages or numpy.ndarrays
Images to be converted to Tensor.
Returns
-------
Tensors
"""
outputs = []
for idx, _input in enumerate(inputs):
_input_is_numpy = False
if isinstance(_input, sitk.SimpleITK.Image):
# Get numpy array (is a deep copy!)
_input = sitk.GetArrayFromImage(_input)
_input_is_numpy = True
#print(f'input or converted numpy array type: {_input.dtype}')
_input = torch.from_numpy(_input.astype(np.double))
#_input = torch.from_numpy(_input)
if _input_is_numpy:
_input = _input.permute(2,1,0) #Change size from
# float for backward compatibility ?
outputs.append(_input.float())
return outputs if idx > 0 else outputs[0]
class ToITKImage(object):
def __init__(self, ref_img=None, itk_infos=None):
"""
Converts one or more torch tensors or numpy ndarrays of shape D x H x W to itk image
of shape W x H x D
Takes metadata from ref_img or itk_infos if given otherwise uses default itk metadata.
Arguments
---------
ref_img : itkImage
Reference image from which to take all meta information.
Supports only one ref image.
itk_infos : dictionary or a sequence (list or tuple) of dictionaries
each dictionary with following keys:
origin, spacing and direction
"""
_l = [x for x in [ref_img, itk_infos] if x is not None]
assert len(_l) <= 1, 'At most one of ref_img, itk_infos can be not none'
self.ref_img = ref_img
self.itk_infos = itk_infos
def __call__(self, *inputs):
outputs = []
if not isinstance(self.itk_infos, (list, tuple)):
self.itk_infos = (self.itk_infos,)*len(inputs)
assert len(inputs) == len(self.itk_infos), 'num of inputs and itk_infos do not match'
for idx, _input in enumerate(inputs):
output_curr = self._toITK_image(_input, self.ref_img, self.itk_infos[idx])
outputs.append(output_curr)
return outputs if idx > 0 else outputs[0]
def _toITK_image(self, tensor, ref_img=None, itk_info=None):
"""
Arguments
---------
tensor : Tensor or numpy.ndarray
Tensor/array to be converted to ITK image.
The ordering of input tensor is in z,y,x which will be converted
to x,y,z.
ref_img : itkImage
Copy origin, spacing and direction from this image
itk_info : dictionary with origin, spacing and direction as keys
Overwrite info take from ref_img if not None
Returns
-------
ITK image
"""
if not isinstance(tensor, np.ndarray):
tensor=tensor.permute(2,1,0)
tensor = (tensor.cpu()).numpy()
else:
tensor = tensor.transpose(2,1,0) #numpy version of permute!
itk_img = sitk.GetImageFromArray(tensor)
if ref_img is not None:
itk_img.CopyInformation(ref_img)
if itk_info is not None:
#print(f'itk_info: {itk_info}')
itk_img.SetDirection(itk_info['direction'])
itk_img.SetOrigin(itk_info['origin'])
itk_img.SetSpacing(itk_info['spacing'])
return itk_img
class Resample(object):
def __init__(self, new_spacing=None, new_size=None, interp = 'linear'):
"""
Resample an ITK image to either:
a desired voxel spacing in mm given by [spXmm, spYmm, spZmm] or,
a desired size [x, y, z]
Arguments
---------
`new_spacing` : tuple or list (e.g [1.,1.,1.])
New spacing in mm. If None, must provide `new_size`
`new_size` : tuple or list of ints (e.g. [100, 100, 100])
If None, must provide `new_spacing`
`interp` : string or list/tuple of string
possible values from this set: {'linear', 'nearest', 'bspline'}
Different types of interpolation can be provided for each input,
e.g. for two inputs, `interp=['linear','nearest']
"""
if new_spacing is None:
assert new_size is not None, "new_spacing or new_size must be provided"
assert len(new_size) == 3, "new_size must be of length 3 (x, y, z)"
self.set_spacing = False
else:
assert new_size is None, "cannot provide both new_spacing and new_size"
assert len(new_spacing) == 3, "new_spacing must be of length 3"
self.set_spacing = True
self.new_spacing = new_spacing
self.new_size = new_size
self.interp = interp
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
outputs = []
for idx, _input in enumerate(inputs):
assert isinstance(_input, sitk.SimpleITK.Image), 'input not an image!'
in_size = _input.GetSize()
in_spacing = _input.GetSpacing()
if self.set_spacing:
out_spacing = self.new_spacing
out_size = [int(math.ceil(in_size[0]*(in_spacing[0]/out_spacing[0]))),
int(math.ceil(in_size[1]*(in_spacing[1]/out_spacing[1]))),
int(math.ceil(in_size[2]*(in_spacing[2]/out_spacing[2])))]
else:
out_size = self.new_size
out_spacing = [in_spacing[0]*in_size[0]/out_size[0],
in_spacing[1]*in_size[1]/out_size[1],
in_spacing[2]*in_size[2]/out_size[2]]
outputs.append(self._resample(
_input, out_spacing, out_size, interp[idx]))
return outputs if idx > 0 else outputs[0]
def _resample(self, img, out_spacing, out_size, interp):
if interp == 'linear':
interp_func = sitk.sitkLinear
elif interp == 'nearest':
interp_func = sitk.sitkNearestNeighbor
elif interp == 'bspline':
interp_func = sitk.sitkBSpline
else:
assert False, "only linear, nearest and bspline interpolation supported"
resampled_img = sitk.Resample(img, out_size, sitk.Transform(), interp_func,
img.GetOrigin(), out_spacing, img.GetDirection(),
0.0, img.GetPixelIDValue())
return resampled_img
| 2.984375 | 3 |
games/detection_game.py | ieee-uh-makers/pi-workshop | 0 | 12768505 | <reponame>ieee-uh-makers/pi-workshop
import time
from typing import List
import random
class Region(object):
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def __repr__(self):
return "X: %d Y: %d W: %d H: %d" % (self.x, self.y, self.w, self.h)
class DetectionGame(object):
def __init__(self, level: int=1):
self.level = level
self.started = False
self.levels = {1: {"win_start": 1, "win_end": 4},
2: {"win_start": 2, "win_end": 4},
3: {"win_start": 2, "win_end": 5},
4: {"win_start": 2, "win_end": 4}}
self.win_window = self.levels[level]['win_end'] - self.levels[level]['win_start']
self.counter = 0
def start(self):
self.started = True
self.start_time = time.time()
def detections(self) -> List[Region]:
time.sleep(0.1)
tss = time.time() - self.start_time
if tss > self.levels[self.level]['win_end']:
print("You missed it!")
if self.level == 1:
return self._detections_level1(tss)
elif self.level == 2:
return self._detections_level2(tss)
elif self.level == 3:
return self._detections_level3(tss)
elif self.level == 4:
return self._detections_level4(tss)
def _detections_level1(self, tss):
if tss >= self.levels[1]['win_start'] and tss <= self.levels[1]['win_end']:
coeff = tss/self.win_window
r = Region(coeff * 500 + int(random.random() * 5), coeff * 500 + int(random.random() * 5)
, 100 + int(random.random() * 100),
100 + int(random.random() * 100))
return [r]
return []
def _detections_level2(self, tss):
if tss >= self.levels[2]['win_start'] and tss <= self.levels[2]['win_end']:
coeff = tss/self.win_window
r = Region(coeff * 500 + int(random.random() * 5), coeff * 500 + int(random.random() * 5)
, 100 + int(random.random() * 100),
100 + int(random.random() * 100))
return [r]
else:
if self.counter == 5:
coeff = tss/self.win_window
r = Region(coeff*500+int(random.random() * 5), coeff*500+int(random.random() * 5)
, 100+int(random.random() * 100),
100+int(random.random() * 100))
self.counter = 0
return [r]
else:
self.counter += 1
return []
def _detections_level3(self, tss):
print(tss)
if tss >= self.levels[3]['win_start'] and tss <= self.levels[3]['win_end']:
if self.counter == 1:
coeff = tss/self.win_window
r = Region(coeff*500+int(random.random() * 5), coeff*500+int(random.random() * 5)
, 100+int(random.random() * 100),
100+int(random.random() * 100))
self.counter = 0
return [r]
else:
self.counter += 1
else:
if self.counter == 5:
coeff = tss/self.win_window
r = Region(coeff*500+int(random.random() * 5), coeff*500+int(random.random() * 5)
, 100+int(random.random() * 100),
100+int(random.random() * 100))
self.counter = 0
return [r]
else:
self.counter += 1
return []
def _detections_level4(self, tss):
rects = []
for i in range(0, int(random.random()*4)):
w = int(random.random() * 100)
h = int(random.random() * 100)
x = int(random.random() * 540)
y = int(random.random() * 380)
rects.append(Region(x, y, w, h))
if tss >= self.levels[4]['win_start'] and tss <= self.levels[4]['win_end']:
if self.counter == 1:
coeff = tss/self.win_window
r = Region(coeff*500+int(random.random() * 5), coeff*500+int(random.random() * 5)
, 100+int(random.random() * 100),
100+int(random.random() * 100))
self.counter = 0
rects.append(r)
else:
self.counter += 1
return rects
def alarm(self) -> bool:
tss = time.time() - self.start_time
if tss >= self.levels[self.level]['win_start'] and tss <= self.levels[self.level]['win_end']:
print("Success!")
return True
else:
print("False positive. Try again.")
return False
| 3.234375 | 3 |
attic/readjsonQ4.py | CitizenScienceInAstronomyWorkshop/pyIBCC | 11 | 12768506 | # created 2013 Feb 23 by MES
# reads in all Q4 simulation json files in directory and outputs to Q4sim_positions.txt
# the start x and end x position of tne injected transit(s) if present
# not all Q4 simulations jsons will have a planet transit in it so if there is no planet transit
# present in a given json file nothing is outputted ofr it
import json
import glob
fjsons=glob.glob("APH4*.json")
output=open("Q4sim_positions.txt", "w")
for fs in fjsons:
print fs
f=open(fs,'r')
r=f.read()
f.close()
if r[0]=='l':
r=r[17:].rstrip('\n;)')
try:
data=json.loads(r)
except:
print 'error reading json file'
print r[:100]+'...'+r[-100:]
print data[0]['tr']
startx=-1.0
endx=-1.0
for i in range(len(data)):
if ((startx < 0) and (data[i]['tr']==1)): # start of a transit
startx=data[i]['x']
print data[i]
if ((startx >= 0 ) and (data[i]['tr']==1)): # keep updating endpoint
print data[i]
endx=data[i]['x']
if (endx >=0 and data[i]['tr']==0): # found the endpoint
# end of transit
print data[i]
print fs, startx, endx
output.write(fs+" "+str(startx)+" "+str(endx)+"\n")
endx=-1.0
startx=-1.0
if(endx>0):
print fs, startx, endx
output.write(fs+" "+str(startx)+" "+str(endx)+"\n")
output.close()
| 2.703125 | 3 |
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Reshape.py | samysweb/dnnv | 5 | 12768507 | import numpy as np
import pytest
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
def test_Reshape():
original_shape = [0, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([3, 4, 0], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape, allowzero=True)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
op = Reshape(
Input((0, 3, 4), np.dtype(np.float32)),
Input((3,), np.dtype(np.int64)),
allowzero=True,
)
tf_op = TensorflowConverter().visit(op)
result = tf_op(data, new_shape).numpy()
assert np.allclose(result, y)
def test_Reshape_reordered_all_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([4, 2, 3], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_reordered_last_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 4, 3], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_reduced_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 12], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_extended_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 3, 2, 2], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_one_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([24], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_negative_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, -1, 2], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_negative_extended_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([-1, 2, 3, 4], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_zero_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 0, 4, 1], dtype=np.int64)
y = np.reshape(data, [2, 3, 4, 1])
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_zero_and_negative_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 0, 1, -1], dtype=np.int64)
y = np.reshape(data, [2, 3, 1, -1])
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
| 2.328125 | 2 |
autosub3/__init__.py | henry40408/autosub | 10 | 12768508 | <gh_stars>1-10
#!/usr/bin/env python3
"""
Usage:
autosub3.py -h | --help
autosub3.py --list-formats
autosub3.py --list-languages
autosub3.py [options] <source>
Options:
-h --help Show this screen
-q --quiet Do NOT show progress bar
-C --concurrency=<concurrency> Number of concurrent API requests to make [default: 10]
-o --output=<output> Output path for subtitles (by default, subtitles are saved in the same directory and
name as the source path)
-F --format=<format> Destination subtitle format [default: srt]
-S --src-language=<language> Language spoken in source file [default: en]
--debug-audio Extract regions of audio file for debugging
--list-formats List all available subtitle formats
--list-languages List all available source languages
"""
import audioop
import contextlib
import json
import math
import multiprocessing
import os
import sys
import tempfile
import wave
from json import JSONDecodeError
from typing import List
import docopt
import ffmpeg
import requests
from progressbar import Percentage, Bar, ETA
from autosub3.constants import LANGUAGE_CODES, GOOGLE_SPEECH_API_KEY, GOOGLE_SPEECH_API_URL
from autosub3.formatters import FORMATTERS, BaseFormatter
from autosub3.optional_progressbar import OptionalProgressBar
DEFAULT_SUBTITLE_FORMAT = 'srt'
DEFAULT_CONCURRENCY = 10
DEFAULT_SRC_LANGUAGE = 'en'
DEFAULT_DST_LANGUAGE = 'en'
def percentile(arr: List, percent: float):
if not arr:
raise RuntimeError('array cannot be empty')
arr = sorted(arr)
k = (len(arr) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return arr[int(k)]
d0 = arr[int(f)] * (c - k)
d1 = arr[int(c)] * (k - f)
return d0 + d1
class FLACConverter(object):
def __init__(self, source_path, include_before=0.25, include_after=0.25):
self.source_path = source_path
self.include_before = include_before
self.include_after = include_after
def __call__(self, region):
try:
start, end = region
start = max(0, start - self.include_before)
end += self.include_after
temp = tempfile.NamedTemporaryFile(suffix='.flac')
stream = ffmpeg.input(self.source_path)
stream = ffmpeg.output(stream, temp.name, ss=start, t=end - start, loglevel='error')
ffmpeg.run(stream, overwrite_output=True)
return temp.read()
except KeyboardInterrupt:
return
class SpeechRecognizer(object):
def __init__(self, language='en', rate=44100, retries=3, api_key=GOOGLE_SPEECH_API_KEY):
self.language = language
self.rate = rate
self.api_key = api_key
self.retries = retries
def __call__(self, data):
try:
for i in range(self.retries):
url = GOOGLE_SPEECH_API_URL.format(lang=self.language, key=self.api_key)
headers = {'Content-Type': 'audio/x-flac; rate=%d' % self.rate}
try:
resp = requests.post(url, data=data, headers=headers)
except requests.exceptions.ConnectionError:
continue
for line in resp.content.decode().split('\n'):
try:
line = json.loads(line)
line = line['result'][0]['alternative'][0]['transcript']
return line[:1].upper() + line[1:]
except (IndexError, JSONDecodeError, KeyError):
# no result
continue
except KeyboardInterrupt:
return
def extract_audio(filename, channels=1, rate=16000):
temp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
if not os.path.isfile(filename):
raise RuntimeError('The given file does not exist: {0}'.format(filename))
stream = ffmpeg.input(filename)
stream = ffmpeg.output(stream, temp.name, ac=channels, ar=rate, loglevel='error')
ffmpeg.run(stream, overwrite_output=True)
return temp.name, rate
def find_speech_regions(filename, frame_width=4096, min_region_size=0.5, max_region_size=6):
reader = wave.open(filename)
sample_width = reader.getsampwidth()
rate = reader.getframerate()
n_channels = reader.getnchannels()
chunk_duration = float(frame_width) / rate
n_chunks = int(math.ceil(reader.getnframes() * 1.0 / frame_width))
energies = []
for i in range(n_chunks):
chunk = reader.readframes(frame_width)
energies.append(audioop.rms(chunk, sample_width * n_channels))
threshold = percentile(energies, 0.2)
elapsed_time = 0
regions = []
region_start = None
for energy in energies:
is_silence = energy <= threshold
max_exceeded = region_start and elapsed_time - region_start >= max_region_size
if (max_exceeded or is_silence) and region_start:
if elapsed_time - region_start >= min_region_size:
regions.append((region_start, elapsed_time))
region_start = None
elif (not region_start) and (not is_silence):
region_start = elapsed_time
elapsed_time += chunk_duration
return regions
def output_speech_regions(source_path):
audio_filename, audio_rate = extract_audio(source_path)
converter = FLACConverter(source_path=audio_filename)
regions = find_speech_regions(audio_filename)
if not os.path.exists('regions'):
os.mkdir('regions')
for index, region in enumerate(regions):
region_audio = converter(region=region)
with open('regions/{index}.flac'.format(index=index), 'wb') as region_file:
region_file.write(region_audio)
def main():
version = open('VERSION', 'r').read()
args = docopt.docopt(__doc__, version=version)
if args['--debug-audio']:
output_speech_regions(args['<source>'])
return 0
if args['--list-formats']:
for subtitle_format in FORMATTERS.keys():
print('{format}'.format(format=subtitle_format))
return 0
if args['--list-languages']:
for code, language in sorted(LANGUAGE_CODES.items()):
print('{code}\t{language}'.format(code=code, language=language))
return 0
if args['--format'] not in FORMATTERS.keys():
print(
'Subtitle format not supported. '
'Run with --list-formats to see all supported formats.'
)
return 1
if args['--src-language'] not in LANGUAGE_CODES.keys():
print(
'Source language not supported. '
'Run with --list-languages to see all supported languages.'
)
return 1
verbose = not args['--quiet']
try:
generate_subtitles(args['<source>'],
concurrency=int(args['--concurrency']),
src_language=args['--src-language'],
subtitle_file_format=args['--format'],
output=args['--output'],
verbose=verbose)
except KeyboardInterrupt:
return 1
return 0
def generate_subtitles(source_path, *,
concurrency=DEFAULT_CONCURRENCY,
src_language=DEFAULT_SRC_LANGUAGE,
subtitle_file_format=DEFAULT_SUBTITLE_FORMAT,
output=None,
verbose=False) -> str:
audio_filename, audio_rate = extract_audio(source_path)
regions = find_speech_regions(audio_filename)
pool = multiprocessing.Pool(concurrency)
converter = FLACConverter(source_path=audio_filename)
recognizer = SpeechRecognizer(language=src_language,
rate=audio_rate,
api_key=GOOGLE_SPEECH_API_KEY)
transcripts = []
if regions:
widgets = ['Converting speech regions to FLAC files: ', Percentage(), ' ', Bar(), ' ', ETA()]
p_bar = OptionalProgressBar(verbose=verbose, widgets=widgets, maxval=len(regions))
try:
p_bar.start()
extracted_regions = []
for i, extracted_region in enumerate(pool.imap(converter, regions)):
extracted_regions.append(extracted_region)
p_bar.update(i)
p_bar.finish()
widgets = ['Performing speech recognition: ', Percentage(), ' ', Bar(), ' ', ETA()]
p_bar = OptionalProgressBar(verbose=verbose, widgets=widgets, maxval=len(regions)).start()
for i, transcript in enumerate(pool.imap(recognizer, extracted_regions)):
transcripts.append(transcript)
p_bar.update(i)
p_bar.finish()
except KeyboardInterrupt:
p_bar.finish()
pool.terminate()
pool.join()
print('Cancelling transcription')
raise
timed_subtitles = [(r, t) for r, t in zip(regions, transcripts) if t]
formatter: BaseFormatter = FORMATTERS.get(subtitle_file_format)()
formatted_subtitles = formatter.generate(timed_subtitles)
with smart_open(output) as f:
f.write(formatted_subtitles)
os.remove(audio_filename)
if output:
print('Subtitles file created at {subtitle_file_path}'.format(subtitle_file_path=output))
return formatted_subtitles
# credit: https://stackoverflow.com/a/17603000
@contextlib.contextmanager
def smart_open(filename=None):
if filename and filename != '-':
fh = open(filename, 'w')
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
if __name__ == '__main__':
sys.exit(main())
| 2.453125 | 2 |
tayph/ccf.py | bmorris3/tayph | 0 | 12768509 | <reponame>bmorris3/tayph
__all__ = [
"xcor",
"clean_ccf",
"filter_ccf",
"shift_ccf",
"construct_KpVsys"
]
def xcor(list_of_wls,list_of_orders,wlm,fxm,drv,RVrange,plot=False,list_of_errors=None):
"""
This routine takes a combined dataset (in the form of lists of wl spaces,
spectral orders and possible a matching list of errors on those spectal orders),
as well as a template (wlm,fxm) to cross-correlate with, and the cross-correlation
parameters (drv,RVrange). The code takes on the order of ~10 minutes for an entire
HARPS dataset, which appears to be superior to my old IDL pipe.
The CCF used is the Geneva-style weighted average; not the Pearson CCF. Therefore
it measures true 'average' planet lines, with flux on the y-axis of the CCF.
The template must therefore be (something close to) a binary mask, with values
inside spectral lines (the CCF is scale-invariant so their overall scaling
doesn't matter),
It returns the RV axis and the resulting CCF in a tuple.
Thanks to <NAME> (bmorris3), this code now implements a clever numpy broadcasting trick to
instantly apply and interpolate the wavelength shifts of the model template onto
the data grid in 2 dimensions. The matrix multiplication operator (originally
recommended to me by Matteo Brogi) allowed this 2D template matrix to be multiplied
with a 2D spectral order. np.hstack() is used to concatenate all orders end to end,
effectively making a giant single spectral order (with NaNs in between due to masking).
All these steps have eliminated ALL the forloops from the equation, and effectuated a
speed gain of a factor between 2,000 and 3,000. The time to do cross correlations is now
typically measured in 100s of milliseconds rather than minutes.
This way of calculation does impose some strict rules on NaNs, though. To keep things fast,
NaNs are now used to set the interpolated template matrix to zero wherever there are NaNs in the data.
These NaNs are found by looking at the first spectrum in the stack, with the assumption that
every NaN is in an all-NaN column. In the standard cross-correlation work-flow, isolated
NaNs are interpolated over (healed), after all.
The places where there are NaN columns in the data are therefore set to 0 in the template matrix.
The NaN values themselves are then set to to an arbitrary value, since they will never
weigh into the average by construction.
Parameters
----------
list_of_wls : list
List of wavelength axes of the data.
list_of_orders : list
List of corresponding 2D orders.
list_of_errors : list
Optional, list of corresponding 2D error matrices.
wlm : np.ndarray
Wavelength axis of the template.
fxm : np.ndarray
Weight-axis of the template.
drv : int,float
The velocity step onto which the CCF is computed. Typically ~1 km/s.
RVrange : int,float
The velocity range in the positive and negative direction over which to
evaluate the CCF. Typically >100 km/s.
plot : bool
Set to True for diagnostic plotting.
Returns
-------
RV : np.ndarray
The radial velocity grid over which the CCF is evaluated.
CCF : np.ndarray
The weighted average flux in the spectrum as a function of radial velocity.
CCF_E : np.ndarray
Optional. The error on each CCF point propagated from the error on the spectral values.
Tsums : np.ndarray
The sum of the template for each velocity step. Used for normalising the CCFs.
"""
import tayph.functions as fun
import astropy.constants as const
import tayph.util as ut
from tayph.vartests import typetest,dimtest,postest,nantest
import numpy as np
import scipy.interpolate
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import sys
import pdb
#===FIRST ALL SORTS OF TESTS ON THE INPUT===
if len(list_of_wls) != len(list_of_orders):
raise ValueError(f'In xcor(): List of wls and list of orders have different length ({len(list_of_wls)} & {len(list_of_orders)}).')
dimtest(wlm,[len(fxm)],'wlm in ccf.xcor()')
typetest(wlm,np.ndarray,'wlm in ccf.xcor')
typetest(fxm,np.ndarray,'fxm in ccf.xcor')
typetest(drv,[int,float],'drv in ccf.xcor')
typetest(RVrange,float,'RVrange in ccf.xcor()',)
postest(RVrange,'RVrange in ccf.xcor()')
postest(drv,'drv in ccf.xcor()')
nantest(wlm,'fxm in ccf.xcor()')
nantest(fxm,'fxm in ccf.xcor()')
nantest(drv,'drv in ccf.xcor()')
nantest(RVrange,'RVrange in ccf.xcor()')
drv = float(drv)
N=len(list_of_wls)#Number of orders.
if np.ndim(list_of_orders[0]) == 1.0:
n_exp=1
else:
n_exp=len(list_of_orders[0][:,0])#Number of exposures.
#===Then check that all orders indeed have n_exp exposures===
for i in range(N):
if len(list_of_orders[i][:,0]) != n_exp:
raise ValueError(f'In xcor(): Not all orders have {n_exp} exposures.')
#===END OF TESTS. NOW DEFINE CONSTANTS===
c=const.c.to('km/s').value#In km/s
RV=fun.findgen(2.0*RVrange/drv+1)*drv-RVrange#..... CONTINUE TO DEFINE THE VELOCITY GRID
beta=1.0-RV/c#The doppler factor with which each wavelength is to be shifted.
n_rv = len(RV)
#===STACK THE ORDERS IN MASSIVE CONCATENATIONS===
stack_of_orders = np.hstack(list_of_orders)
stack_of_wls = np.concatenate(list_of_wls)
if list_of_errors is not None:
stack_of_errors = np.hstack(list_of_errors)#Stack them horizontally
#Check that the number of NaNs is the same in the orders as in the errors on the orders;
#and that they are in the same place; meaning that if I add the errors to the orders, the number of
#NaNs does not increase (NaN+value=NaN).
if (np.sum(np.isnan(stack_of_orders)) != np.sum(np.isnan(stack_of_errors+stack_of_orders))) and (np.sum(np.isnan(stack_of_orders)) != np.sum(np.isnan(stack_of_errors))):
raise ValueError(f"in CCF: The number of NaNs in list_of_orders and list_of_errors is not equal ({np.sum(np.isnan(list_of_orders))},{np.sum(np.isnan(list_of_errors))})")
#===HERE IS THE JUICY BIT===
shifted_wavelengths = stack_of_wls * beta[:, np.newaxis]#2D broadcast of wl_data, each row shifted by beta[i].
T = scipy.interpolate.interp1d(wlm,fxm, bounds_error=False, fill_value=0)(shifted_wavelengths)#...making this a 2D thing.
T[:,np.isnan(stack_of_orders[0])] = 0.0#All NaNs are assumed to be in all-NaN columns. If that is not true, the below nantest will fail.
T_sums = np.sum(T,axis = 1)
#We check whether there are isolated NaNs:
n_nans = np.sum(np.isnan(stack_of_orders),axis=0)#This is the total number of NaNs in each column.
n_nans[n_nans==len(stack_of_orders)]=0#Whenever the number of NaNs equals the length of a column, set the flag to zero.
if np.max(n_nans)>0:#If there are any columns which still have NaNs in them, we need to crash.
raise ValueError(f"in CCF: Not all NaN values are purely in columns. There are still isolated NaNs. Remove those.")
stack_of_orders[np.isnan(stack_of_orders)] = 47e20#Set NaNs to arbitrarily high values.
CCF = stack_of_orders @ T.T/T_sums#Here it the entire cross-correlation. Over all orders and velocity steps. No forloops.
CCF_E = CCF*0.0
#If the errors were provided, we do the same to those:
if list_of_errors is not None:
stack_of_errors[np.isnan(stack_of_errors)] = 42e20#we have already tested that these NaNs are in the same place.
CCF_E = stack_of_errors**2 @ (T.T/T_sums)**2#This has been mathematically proven.
#===THAT'S ALL. TEST INTEGRITY AND RETURN THE RESULT===
nantest(CCF,'CCF in ccf.xcor()')#If anything went wrong with NaNs in the data, these tests will fail because the matrix operation @ is non NaN-friendly.
nantest(CCF_E,'CCF_E in ccf.xcor()')
if list_of_errors != None:
return(RV,CCF,np.sqrt(CCF_E),T_sums)
return(RV,CCF,T_sums)
def clean_ccf(rv,ccf,ccf_e,dp):
"""
This routine normalizes the CCF fluxes and subtracts the average out of
transit CCF, using the transit lightcurve as a mask.
Parameters
----------
rv : np.ndarray
The radial velocity axis
ccf : np.ndarray
The CCF with second dimension matching the length of rv.
ccf_e : np.ndarray
The error on ccf.
dp : str or path-like
The datapath of the present dataset, to establish which exposures in ccf
are in or out of transit.
Returns
-------
ccf_n : np.ndarray
The transit-lightcurve normalised CCF.
ccf_ne : np.ndarray
The error on ccf_n
ccf_nn : np.ndarray
The CCF relative to the out-of-transit time-averaged, if sufficient (>25%
of the time-series) out of transit exposures were available. Otherwise, the
average over the entire time-series is used.
ccf_ne : np.array
The error on ccf_nn.
"""
import numpy as np
import tayph.functions as fun
import tayph.util as ut
from matplotlib import pyplot as plt
import pdb
import math
import tayph.system_parameters as sp
import tayph.operations as ops
import astropy.io.fits as fits
import sys
import copy
from tayph.vartests import typetest,dimtest,nantest
typetest(rv,np.ndarray,'rv in clean_ccf()')
typetest(ccf,np.ndarray,'ccf in clean_ccf')
typetest(ccf_e,np.ndarray,'ccf_e in clean_ccf')
dp=ut.check_path(dp)
dimtest(ccf,[0,len(rv)])
dimtest(ccf_e,[0,len(rv)])
nantest(rv,'rv in clean_ccf()')
nantest(ccf,'ccf in clean_ccf()')
nantest(ccf_e,'ccf_e in clean_ccf()')
#ADD PARAMGET DV HERE.
transit=sp.transit(dp)
# transitblock = fun.rebinreform(transit,len(rv))
Nrv = int(math.floor(len(rv)))
baseline_ccf = np.hstack((ccf[:,0:int(0.25*Nrv)],ccf[:,int(0.75*Nrv):]))
baseline_ccf_e= np.hstack((ccf_e[:,0:int(0.25*Nrv)],ccf_e[:,int(0.75*Nrv):]))
baseline_rv = np.hstack((rv[0:int(0.25*Nrv)],rv[int(0.75*Nrv):]))
meanflux=np.median(baseline_ccf,axis=1)#Normalize the baseline flux, but away from the signal of the planet.
meanflux_e=1.0/len(baseline_rv)*np.sqrt(np.nansum(baseline_ccf_e**2.0,axis=1))#1/N times sum of squares.
#I validated that this is approximately equal to ccf_e/sqrt(N).
meanblock=fun.rebinreform(meanflux,len(rv))
meanblock_e=fun.rebinreform(meanflux_e,len(rv))
ccf_n = ccf/meanblock.T
ccf_ne = np.abs(ccf_n) * np.sqrt((ccf_e/ccf)**2.0 + (meanblock_e.T/meanblock.T)**2.0)#R=X/Z -> dR = R*sqrt( (dX/X)^2+(dZ/Z)^2 )
#I validated that this is essentially equal to ccf_e/meanblock.T; as expected because the error on the mean spectrum is small compared to ccf_e.
if np.sum(transit==1) == 0:
print('------WARNING in Cleaning: The data contains only in-transit exposures.')
print('------The mean ccf is taken over the entire time-series.')
meanccf=np.nanmean(ccf_n,axis=0)
meanccf_e=1.0/len(transit)*np.sqrt(np.nansum(ccf_ne**2.0,axis=0))#I validated that this is approximately equal
#to sqrt(N)*ccf_ne, where N is the number of out-of-transit exposures.
elif np.sum(transit==1) <= 0.25*len(transit):
print('------WARNING in Cleaning: The data contains very few (<25%) out of transit exposures.')
print('------The mean ccf is taken over the entire time-series.')
meanccf=np.nanmean(ccf_n,axis=0)
meanccf_e=1.0/len(transit)*np.sqrt(np.nansum(ccf_ne**2.0,axis=0))#I validated that this is approximately equal
#to sqrt(N)*ccf_ne, where N is the number of out-of-transit exposures.
else:
meanccf=np.nanmean(ccf_n[transit == 1.0,:],axis=0)
meanccf_e=1.0/np.sum(transit==1)*np.sqrt(np.nansum(ccf_ne[transit == 1.0,:]**2.0,axis=0))#I validated that this is approximately equal
#to sqrt(N)*ccf_ne, where N is the number of out-of-transit exposures.
if np.min(transit) == 1.0:
print('------WARNING in Cleaning: The data is not predicted to contain in-transit exposures.')
print('------If you expect to be dealing with transit-data, please check the ephemeris at %s.'%dp)
sys.exit()
meanblock2=fun.rebinreform(meanccf,len(meanflux))
meanblock2_e=fun.rebinreform(meanccf_e,len(meanflux))
ccf_nn = ccf_n/meanblock2#MAY NEED TO DO SUBTRACTION INSTEAD TOGETHER W. NORMALIZATION OF LIGHTCURVE. SEE ABOVE.
ccf_nne = np.abs(ccf_n/meanblock2)*np.sqrt((ccf_ne/ccf_n)**2.0 + (meanblock2_e/meanblock2)**2.0)
#I validated that this error is almost equal to ccf_ne/meanccf
#ONLY WORKS IF LIGHTCURVE MODEL IS ACCURATE, i.e. if Euler observations are available.
print("---> WARNING IN CLEANING.CLEAN_CCF(): NEED TO ADD A FUNCTION THAT YOU CAN NORMALIZE BY THE LIGHTCURVE AND SUBTRACT INSTEAD OF DIVISION!")
return(ccf_n,ccf_ne,ccf_nn-1.0,ccf_nne)
def filter_ccf(rv,ccf,v_width):
"""
Performs a high-pass filter on a CCF.
"""
import copy
import tayph.operations as ops
ccf_f = copy.deepcopy(ccf)
wiggles = copy.deepcopy(ccf)*0.0
dv = rv[1]-rv[0]#Assumes that the RV axis is constant.
w = v_width / dv
for i,ccf_row in enumerate(ccf):
wiggle = ops.smooth(ccf_row,w,mode='gaussian')
wiggles[i] = wiggle
ccf_f[i] = ccf_row-wiggle
return(ccf_f,wiggles)
def shift_ccf(RV,CCF,drv):
"""
This shifts the rows of a CCF based on velocities provided in drv.
Improve those tests. I got functions for that.
"""
import tayph.functions as fun
import tayph.util as ut
import numpy as np
#import matplotlib.pyplot as plt
import scipy.interpolate
import pdb
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import sys
import scipy.ndimage.interpolation as scint
if np.ndim(CCF) == 1.0:
print("ERROR in shift_ccf: CCF should be a 2D block.")
sys.exit()
else:
n_exp=len(CCF[:,0])#Number of exposures.
n_rv=len(CCF[0,:])
if len(RV) != n_rv:
print('ERROR in shift_ccf: RV does not have the same length as the base size of the CCF block.')
sys.exit()
if len(drv) != n_exp:
print('ERROR in shift_ccf: drv does not have the same height as the CCF block.')
sys.exit()
dv = RV[1]-RV[0]
CCF_new=CCF*0.0
for i in range(n_exp):
#C_i=scipy.interpolate.interp1d(RV,CCF[i],fill_value=(0.0,0.0))
#CCF_new[i,:] = C_i(RV-drv[i]*2.0)
CCF_new[i,:] = scint.shift(CCF[i],drv[i]/dv,mode='nearest',order=1)
return(CCF_new)
def construct_KpVsys(rv,ccf,ccf_e,dp,kprange=[0,300],dkp=1.0):
"""The name says it all. Do good tests."""
import tayph.functions as fun
import tayph.operations as ops
import numpy as np
import tayph.system_parameters as sp
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import tayph.util as ut
import sys
import pdb
Kp = fun.findgen((kprange[1]-kprange[0])/dkp+1)*dkp+kprange[0]
n_exp = np.shape(ccf)[0]
KpVsys = np.zeros((len(Kp),len(rv)))
KpVsys_e = np.zeros((len(Kp),len(rv)))
transit = sp.transit(dp)-1.0
transit /= np.nansum(transit)
transitblock = fun.rebinreform(transit,len(rv)).T
j = 0
ccfs = []
for i in Kp:
dRV = sp.RV(dp,vorb=i)*(-1.0)
ccf_shifted = shift_ccf(rv,ccf,dRV)
ccf_e_shifted = shift_ccf(rv,ccf_e,dRV)
ccfs.append(ccf_shifted)
KpVsys[j,:] = np.nansum(transitblock * ccf_shifted,axis=0)
KpVsys_e[j,:] = (np.nansum((transitblock*ccf_e_shifted)**2.0,axis=0))**0.5
# plt.plot(rv,KpVsys[j,:])
# plt.fill_between(rv, KpVsys[j,:]-KpVsys_e[j,:], KpVsys[j,:]+KpVsys_e[j,:],alpha=0.5)
# plt.show()
# pdb.set_trace()
j+=1
ut.statusbar(i,Kp)
return(Kp,KpVsys,KpVsys_e)
# CCF_total = np.zeros((n_exp,n_rv))
#
# Tsums = fun.findgen(n_rv)*0.0*float('NaN')
# T_i = scipy.interpolate.interp1d(wlm,fxm, bounds_error=False, fill_value=0)
# t1=ut.start()
#
# for i,order in enumerate(list_of_orders):
# CCF = np.zeros((n_exp,n_rv))*float('NaN')
# shifted_wavelengths = list_of_wls[i] * beta[:, np.newaxis]#2D broadcast of wl_data, each row shifted by beta[i].
# T=T_i(shifted_wavelengths)
# masked_order = np.ma.masked_array(order,np.isnan(order))
#
# for j,spectrum in enumerate(masked_order):
# x = np.repeat(spectrum[:, np.newaxis],n_rv, axis=1).T
# CCF[j] = np.ma.average(x,weights=T, axis=1)
#
# CCF_total+=CCF
# ut.statusbar(i,len(list_of_orders))
#
# ut.end(t1)
# ut.save_stack('test_ccf_compared.fits',[fits.getdata('test_ccf.fits'),CCF])
# pdb.set_trace()
#
# #===Define the output CCF array.
# CCF = np.zeros((n_exp,len(shift)))#*float('NaN')
# CCF_E = CCF*0.0
# Tsums = fun.findgen(len(shift))*0.0*float('NaN')
# #===Then comes the big forloop.
# #The outer loop is the shifts. For each, we loop through the orders.
#
#
# counter = 0
# for i in range(len(shift)):
# T_sum = 0.0
# wlms=wlm*shift[i]
# for j in range(N):
# wl=list_of_wls[j]
# order=list_of_orders[j]
#
# T_i=scipy.interpolate.interp1d(wlms[(wlms >= np.min(wl)-10.0) & (wlms <= np.max(wl)+10.0)],fxm[(wlms >= np.min(wl)-10.0) & (wlms <= np.max(wl)+10.0)],bounds_error=False,fill_value=0.0)
# T = T_i(wl)
# T_matrix=fun.rebinreform(T,n_exp)
# CCF[:,i]+=np.nansum(T_matrix*order,1)
# if list_of_errors != None:
# sigma=list_of_errors[j]
# CCF_E[:,i]+=np.nansum((T_matrix*sigma)**2.0,1)#CHANGE THIS WITH PYTHON @ OPERATOR AS PER HOW MATTEO CODES THIS. Matrix multiplication is 20x faster than normal multiplication + summing.
# T_sum+=np.sum(np.abs(T))
#
#
# CCF[:,i] /= T_sum
# CCF_E[:,i] /= T_sum**2.0
# Tsums[i] = T_sum
# T_sum = 0.0
# counter += 1
# ut.statusbar(i,shift)
# nantest(CCF,'CCF in ccf.xcor()')
# nantest(CCF_E,'CCF_E in ccf.xcor()')
#
#
# if plot == True:
# fig, (a0,a1,a2) = plt.subplots(3,1,gridspec_kw = {'height_ratios':[1,1,1]},figsize=(10,7))
# a02 = a0.twinx()
# for i in range(N):
# meanspec=np.nanmean(list_of_orders[i],axis=0)
# meanwl=list_of_wls[i]
# T_i=scipy.interpolate.interp1d(wlm[(wlm >= np.min(meanwl)-0.0) & (wlm <= np.max(meanwl)+0.0)],fxm[(wlm >= np.min(meanwl)-0.0) & (wlm <= np.max(meanwl)+0.0)],fill_value='extrapolate')
# T = T_i(meanwl)
# # meanspec-=min(meanspec)
# # meanspec/=np.max(meanspec)
# # T_plot-=np.min(T)
# # T_plot/=np.median(T_plot)
# a02.plot(meanwl,T,color='orange',alpha=0.3)
# a0.plot(meanwl,meanspec,alpha=0.5)
# a1.plot(RV,Tsums,'.')
# if list_of_errors != None:
# a2.errorbar(RV,np.mean(CCF,axis=0),fmt='.',yerr=np.mean(np.sqrt(CCF_E),axis=0)/np.sqrt(n_exp),zorder=3)
# else:
# a2.plot(RV,np.mean(CCF,axis=0),'.')
# a0.set_title('t-averaged data and template')
# a1.set_title('Sum(T)')
# a2.set_title('t-averaged CCF')
# a0.tick_params(axis='both',labelsize='5')
# a02.tick_params(axis='both',labelsize='5')
# a1.tick_params(axis='both',labelsize='5')
# a2.tick_params(axis='both',labelsize='5')
# fig.tight_layout()
# plt.show()
# # a0.set_xlim((674.5,675.5))
# # a1.set_xlim((674.5,675.5))
# if list_of_errors != None:
# return(RV,CCF,np.sqrt(CCF_E),Tsums)
# return(RV,CCF,Tsums)
| 2.34375 | 2 |
platform/core/tests/test_encryptor/test_service.py | hackerwins/polyaxon | 0 | 12768510 | <filename>platform/core/tests/test_encryptor/test_service.py<gh_stars>0
import pytest
from django.test import override_settings
import conf
import encryptor
from encryptor.manager import EncryptionManager
from options.registry.core import ENCRYPTION_KEY, ENCRYPTION_SECRET
from tests.base.case import BaseTest
@pytest.mark.encryption_mark
class TestEncryptor(BaseTest):
def test_default_encryption(self):
assert conf.get(ENCRYPTION_KEY) is None
assert conf.get(ENCRYPTION_SECRET) is None
assert encryptor.encrypt('foo') == 'foo'
assert encryptor.decrypt('foo') == 'foo'
@override_settings(ENCRYPTION_KEY='my_key',
ENCRYPTION_SECRET='<KEY>
def test_secret_encryption(self):
assert conf.get(ENCRYPTION_KEY) == 'my_key'
assert conf.get(ENCRYPTION_SECRET) == '<KEY>
encryptor.validate()
encryptor.setup()
value = 'foo'
encrypted_value = encryptor.encrypt(value)
assert encrypted_value.startswith('{}my_key$'.format(EncryptionManager.MARKER))
assert encryptor.decrypt(encrypted_value) == 'foo'
| 2.21875 | 2 |
thresh/__main__.py | 06f7b1afdb2a4801b0dbde6635f227b7/thresh | 0 | 12768511 | <gh_stars>0
import sys
from . import main
main(sys.argv[1:])
| 1.140625 | 1 |
superradiance_plot.py | matthewjstott/AlpPy | 0 | 12768512 | #!/usr/bin/python
import sys, platform, os
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib import pyplot
import numpy as np
from matplotlib.patches import Ellipse
import camb
from camb import model, initialpower
from pysm.nominal import models
import healpy as hp
import site
plt.rcParams["figure.facecolor"] = 'w'
plt.rcParams["axes.facecolor"] = 'w'
plt.rcParams["savefig.facecolor"] = 'w'
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def conf_legend():
conf=[99,95,68]
concolor=['orangered','red','maroon']
Ep_handle={}
Ep_label={}
for i in range(0,3):
Ep_handle[i]=[]
Ep_label[i]=[]
Ep_handle[i] = [mpatches.Patch(color=concolor[i], alpha=0.6, linewidth=0)]
Ep_label[i] = [u'${0}\%$ CL'.format(conf[i])]
handles2=[]
labels2=[]
for i in range(0,3):
handles2.extend(Ep_handle[i])
labels2.extend(Ep_label[i])
legend22 = plt.legend(handles2,labels2,loc='center right',bbox_to_anchor = [0.9325, 0.61],
ncol=2,prop={'size':12},numpoints=1)
pyplot.gca().add_artist(legend22)
plt.legend(loc='center right',bbox_to_anchor = [0.99, 0.27])
def quantum_levels_legend(colours,l):
p_handle={}
p_label={}
for i in range(0,5):
p_handle[i]=[]
p_label[i]=[]
p_handle[i] = [mpatches.Patch(color=colours[i], alpha=1.0, linewidth=1.5)]
p_label[i] = [u'$l=m={0}$'.format(l[i])]
plt.text(13.11, 0.34, r'$\mu_{\rm ax}=10^{-11}eV$', fontsize=15,bbox={'facecolor':'white', 'alpha':1.0, 'pad':12})
#handle, label = ax.get_legend_handles_labels()
handles=[]
labels=[]
for i in range(0,5):
handles.extend(p_handle[i])
labels.extend(p_label[i])
legend2 = plt.legend(handles,labels,loc='lower right',
ncol=2,prop={'size':12},numpoints=1)
pyplot.gca().add_artist(legend2)
def regge_plane_plot(x1,y1,colours,sr_spins,sr_masses,sr_spin_up,sr_spin_low,sr_mass_up,sr_mass_low):
fig, ax = plt.subplots(figsize=(10,6))
for i in range(4,-1,-1):
ax.fill_between(x1[i], y1[i], 1,facecolor=colours[i],linewidth=2.0,zorder=2)
labels=(r'$\rm Continuum\ Fit \ Black$'
'\n'
r'$\rm Hole \ Data$')
ax.errorbar(sr_masses, sr_spins, yerr=[sr_spin_up,sr_spin_low], xerr=[sr_mass_up,sr_mass_low], fmt='o',color='k',label=labels)
plt.legend(loc='lower right',prop={'size':12})
plt.xlabel(r'$\rm Black \ Hole \ Mass \ \left(\rm{M_{\rm BH}} \ / M_{\odot} \right)$', ha='center', va='center',size=20,labelpad=15)
plt.ylabel(r'$\rm Black \ Hole \ Spin \ \left( a_{*}\right)$',size=21)
plt.ylim(0,1)
plt.xlim(0,x1[4].max())
def regge_region_plot(fx,fy,blackholes,rt,xtem,ytem,dytem,dxtem,example_mass,example_spin,example_spin_error,example_mass_error,error_ellipse,bmhu):
plt.plot(fx,fy,linestyle='-',color='black')
print(xtem)
plt.fill_between(fx, fy,1, color='deepskyblue',alpha=0.3)
plt.xlim(fx.min(),fx.max())
if blackholes == True:
for i in range(len(ytem)):
plt.errorbar(xtem[i], ytem[i], yerr=dytem[i], xerr=dxtem[i], fmt='o',color='k')
plt.errorbar(example_mass,example_spin,yerr=example_spin_error,xerr=example_mass_error, fmt='o',color='k')
if error_ellipse==True:
for i in range (len(example_mass_error)):
plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=3, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8)
plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=2, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8)
plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=1, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8)
plt.xlabel(r'${\rm M_{BH}} \left( M_{\odot} \right)$', ha='center', va='center',size=20,labelpad=15)
plt.ylabel(r'$ a_{*}$',size=21)
plt.ylim(0,1)
plt.xlim(0,70)
def intersection_plot(nx,ny,indx,indx2):
plt.plot(nx[4][indx2[3]], ny[4][indy2[3]], 'ro')
plt.plot(nx[0][0:indx[0]],ny[0][0:indx[0]])
plt.plot(nx[1][indx2[0]:indx[1]],ny[1][indx2[0]:indx[1]])
plt.plot(nx[2][indx2[1]:indx[2]],ny[2][indx2[1]:indx[2]])
plt.plot(nx[3][indx2[2]:indx[3]],ny[3][indx2[2]:indx[3]])
plt.plot(nx[4][indx2[3]:-1],ny[4][indx2[3]:-1])
def superradiance_rates_plot(alpha,rates):
for i in range(0,5):
plt.plot(alpha,rates[i]*2,linewidth=2)
plt.yscale('log')
plt.xlabel(r'$\mu_{\rm ax} r_g$', size=24,labelpad=4.15)
plt.ylabel(r'$ \log_{10}(M_{\rm BH} \ IM(\omega))$',size=21,labelpad=2)
plt.xlim(0,2.55)
plt.ylim(10**-16.5,10**-6.5)
| 2.265625 | 2 |
tests/test_compose/test_write/test_tag/test_div.py | schireson/htmxl | 2 | 12768513 | from tests.utils import WriteTests
class WriteDiv(WriteTests):
fixture_dir = "tests/fixtures/templates/tags/div"
class TestAdjacentDivs(WriteDiv):
template_file = "adjacent_divs.html.jinja2"
expected_result_file = "adjacent_divs.xlsx"
class TestContainedDivs(WriteDiv):
template_file = "contained_divs.html.jinja2"
expected_result_file = "contained_divs.xlsx"
class TestNoContent(WriteDiv):
template_file = "div_no_content.html.jinja2"
expected_result_file = "div_no_content.xlsx"
| 2.125 | 2 |
tests/test_lsf_config.py | NBMueller/lsf | 0 | 12768514 | from io import StringIO
from tests.src.lsf_config import Config
class TestBool:
def test_empty_returns_false(self):
config = Config({})
assert not config
def test_non_empty_returns_true(self):
config = Config({1: 1})
assert config
class TestDefaultConstructor:
def test_no_options_given_returns_empty(self):
config = Config()
assert not config
class TestContains:
def test_item_not_in_config(self):
stream = StringIO("key: 'foo'")
item = "bar"
config = Config.from_stream(stream)
assert item not in config
def test_item_in_config(self):
stream = StringIO("key: 'foo'")
item = "key"
config = Config.from_stream(stream)
assert item in config
def test_only_keys_are_tested_for_membership(self):
stream = StringIO("key: 'foo'")
item = "foo"
config = Config.from_stream(stream)
assert item not in config
class TestConcatenateParams:
def test_str_returns_str(self):
params = "-q queue"
actual = Config.concatenate_params(params)
expected = params
assert actual == expected
def test_empty_list_returns_empty_str(self):
params = []
actual = Config.concatenate_params(params)
expected = ""
assert actual == expected
def test_list_returns_str(self):
params = ["-q queue", "-P project"]
actual = Config.concatenate_params(params)
expected = "-q queue -P project"
assert actual == expected
class TestGet:
def test_get_empty_returns_default(self):
stream = StringIO("")
config = Config.from_stream(stream)
key = "key"
actual = config.get(key)
assert actual is None
def test_get_key_in_yaml(self):
stream = StringIO("key: 'foo'")
key = "key"
config = Config.from_stream(stream)
actual = config.get(key)
expected = "foo"
assert actual == expected
def test_get_key_not_in_yaml_returns_default(self):
stream = StringIO("key: 'foo'")
key = "bar"
default = "default"
config = Config.from_stream(stream)
actual = config.get(key, default)
expected = default
assert actual == expected
class TestDefaultParams:
def test_no_default_returns_empty(self):
stream = StringIO("key: 'foo'")
config = Config.from_stream(stream)
actual = config.default_params()
expected = ""
assert actual == expected
def test_default_present_returns_params(self):
stream = StringIO("__default__: '-q foo'")
config = Config.from_stream(stream)
actual = config.default_params()
expected = "-q foo"
assert actual == expected
def test_default_present_params_are_list_returns_params(self):
stream = StringIO("__default__:\n - '-q foo'\n - '-P project'")
config = Config.from_stream(stream)
actual = config.default_params()
expected = "-q foo -P project"
assert actual == expected
def test_default_present_without_underscores_returns_empty(self):
stream = StringIO("default:\n - '-q foo'\n - '-P project'")
config = Config.from_stream(stream)
actual = config.default_params()
expected = ""
assert actual == expected
class TestParamsForRule:
def test_no_default_or_rule_returns_empty(self):
stream = StringIO("key: 'foo'")
config = Config.from_stream(stream)
rulename = "a"
actual = config.params_for_rule(rulename)
expected = ""
assert actual == expected
def test_default_present_but_not_rule_returns_default_params(self):
stream = StringIO("__default__: '-q foo'")
config = Config.from_stream(stream)
rulename = "a"
actual = config.params_for_rule(rulename)
expected = "-q foo"
assert actual == expected
def test_rule_and_default_present_returns_default_and_rule_params(self):
stream = StringIO("__default__: '-q foo'\nrule:\n - '-P project'\n")
config = Config.from_stream(stream)
rulename = "rule"
actual = config.params_for_rule(rulename)
expected = "-q foo -P project"
assert actual == expected
def test_rule_present_but_not_default_returns_rule_params(self):
stream = StringIO("rule:\n - '-P project'\n - '-q bar'")
config = Config.from_stream(stream)
rulename = "rule"
actual = config.params_for_rule(rulename)
expected = "-P project -q bar"
assert actual == expected
def test_rule_and_default_have_same_params_rule_params_take_precedent(self):
stream = StringIO(
"__default__: '-q foo'\nrule:\n - '-P project'\n - '-q bar'"
)
config = Config.from_stream(stream)
rulename = "rule"
actual = config.params_for_rule(rulename)
expected = "-q bar -P project"
assert actual == expected
def test_args_to_dict():
args = '-W 0:01 -W 0:02 -J "test name"'
actual = Config.args_to_dict(args)
expected = {"-W": "0:02", "-J": "test name"}
assert actual == expected
| 2.515625 | 3 |
models/MF/IALS.py | yigitozgumus/PolimiRecSys2018 | 0 | 12768515 | import numpy as np
from base.RecommenderUtils import check_matrix
from base.BaseRecommender import RecommenderSystem
from tqdm import tqdm
import models.MF.Cython.MF_RMSE as mf
class IALS_numpy(RecommenderSystem):
'''
binary Alternating Least Squares model (or Weighed Regularized Matrix Factorization)
Reference: Collaborative Filtering for binary Feedback Datasets (Hu et al., 2008)
Factorization model for binary feedback.
First, splits the feedback matrix R as the element-wise a Preference matrix P and a Confidence matrix C.
Then computes the decomposition of them into the dot product of two matrices X and Y of latent factors.
X represent the user latent factors, Y the item latent factors.
The model is learned by solving the following regularized Least-squares objective function with Stochastic Gradient Descent
\operatornamewithlimits{argmin}\limits_{x*,y*}\frac{1}{2}\sum_{i,j}{c_{ij}(p_{ij}-x_i^T y_j) + \lambda(\sum_{i}{||x_i||^2} + \sum_{j}{||y_j||^2})}
'''
# TODO: Add support for multiple confidence scaling functions (e.g. linear and log scaling)
def __init__(self,
num_factors=50,
reg=0.011,
iters=30,
scaling='log',
alpha=40,
epsilon=1.0,
init_mean=0.0,
init_std=0.1,
rnd_seed=42):
super(IALS_numpy, self).__init__()
assert scaling in ['linear', 'log'], 'Unsupported scaling: {}'.format(scaling)
self.num_factors = num_factors
self.reg = reg
self.iters = iters
self.scaling = scaling
self.alpha = alpha
self.epsilon = epsilon
self.init_mean = init_mean
self.init_std = init_std
self.rnd_seed = rnd_seed
self.parameters = "num_factors={}, reg={}, iters={}, scaling={}, alpha={}, episilon={}, init_mean={}, " \
"init_std={}, rnd_seed={}".format(
self.num_factors, self.reg, self.iters, self.scaling, self.alpha, self.epsilon, self.init_mean,
self.init_std, self.rnd_seed)
def __str__(self):
return "WRMF-iALS Implementation"
def _linear_scaling(self, R):
C = R.copy().tocsr()
C.data *= self.alpha
C.data += 1.0
return C
def _log_scaling(self, R):
C = R.copy().tocsr()
C.data = 1.0 + self.alpha * np.log(1.0 + C.data / self.epsilon)
return C
def fit(self, R):
self.dataset = R
# compute the confidence matrix
if self.scaling == 'linear':
C = self._linear_scaling(R)
else:
C = self._log_scaling(R)
Ct = C.T.tocsr()
M, N = R.shape
# set the seed
np.random.seed(self.rnd_seed)
# initialize the latent factors
self.X = np.random.normal(self.init_mean, self.init_std, size=(M, self.num_factors))
self.Y = np.random.normal(self.init_mean, self.init_std, size=(N, self.num_factors))
for it in tqdm(range(self.iters)):
self.X = self._lsq_solver_fast(C, self.X, self.Y, self.reg)
self.Y = self._lsq_solver_fast(Ct, self.Y, self.X, self.reg)
def recommend(self, playlist_id, n=None, exclude_seen=True,export= False):
scores = np.dot(self.X[playlist_id], self.Y.T)
ranking = scores.argsort()[::-1]
# rank items
if exclude_seen:
ranking = self._filter_seen(playlist_id, ranking)
if not export:
return ranking[:n]
elif export:
return str(ranking[:n]).strip("[]")
def _lsq_solver(self, C, X, Y, reg):
# precompute YtY
rows, factors = X.shape
YtY = np.dot(Y.T, Y)
for i in range(rows):
# accumulate YtCiY + reg*I in A
A = YtY + reg * np.eye(factors)
# accumulate Yt*Ci*p(i) in b
b = np.zeros(factors)
for j, cij in self._nonzeros(C, i):
vj = Y[j]
A += (cij - 1.0) * np.outer(vj, vj)
b += cij * vj
X[i] = np.linalg.solve(A, b)
return X
def _lsq_solver_fast(self, C, X, Y, reg):
# precompute YtY
rows, factors = X.shape
YtY = np.dot(Y.T, Y)
for i in range(rows):
# accumulate YtCiY + reg*I in A
A = YtY + reg * np.eye(factors)
start, end = C.indptr[i], C.indptr[i + 1]
j = C.indices[start:end] # indices of the non-zeros in Ci
ci = C.data[start:end] # non-zeros in Ci
Yj = Y[j] # only the factors with non-zero confidence
# compute Yt(Ci-I)Y
aux = np.dot(Yj.T, np.diag(ci - 1.0))
A += np.dot(aux, Yj)
# compute YtCi
b = np.dot(Yj.T, ci)
X[i] = np.linalg.solve(A, b)
return X
def _nonzeros(self, R, row):
for i in range(R.indptr[row], R.indptr[row + 1]):
yield (R.indices[i], R.data[i])
def _get_user_ratings(self, playlist_id):
self.dataset = check_matrix(self.dataset, "csr")
return self.dataset[playlist_id]
def _get_item_ratings(self, track_id):
self.dataset = check_matrix(self.dataset, "csc")
return self.dataset[:, track_id]
def _filter_seen(self, playlist_id, ranking):
user_profile = self._get_user_ratings(playlist_id)
seen = user_profile.indices
unseen_mask = np.in1d(ranking, seen, assume_unique=True, invert=True)
return ranking[unseen_mask]
| 2.578125 | 3 |
django_crud/views.py | Francislley/crudDjango170118 | 0 | 12768516 | from copy import copy
import uuid
from pprint import pprint
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.template.defaultfilters import striptags
from django.conf import settings
from django.utils.http import urlquote, urlquote_plus
from django.views.generic import ListView, DeleteView, CreateView, UpdateView
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, BaseListView
from django.views.generic.edit import ProcessFormView, FormView, ModelFormMixin
from mongoforms.forms import MongoFormMetaClass, MongoForm
import yaml
try:
import json
except ImportError:
import simplejson as json
registered_cruds = set()
DEFAULT_FORMAT = getattr(settings, 'CRUD_DEFAULT_FORMAT', "yaml")
class MongoSingleObjectMixin(object):
def get_queryset(self):
try:
return SingleObjectMixin.get_queryset(self)
except AttributeError:
return self.model.objects()
def get_form_class(self):
try:
self.model._meta.app_label
except AttributeError:
class Meta:
document = self.model
try:
for key,value in self.model._fields.items():
if value.primary_key:
prep_fields = self.model._fields.keys()
Meta.fields = prep_fields
except KeyError:
pass
if self.form_class is not None:
return self.form_class
else:
class_name = self.model.__name__ + 'Form'
return MongoFormMetaClass(class_name, (MongoForm,), {'Meta': Meta})
def get_template_names(self):
try:
return SingleObjectTemplateResponseMixin.get_template_names(self)
except AttributeError:
return [self.template_name]
def get_model_name(self):
return self.model.__name__.lower()
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
try:
return SingleObjectMixin.get_context_object_name(self, obj)
except AttributeError:
return "object"
def get_context_data(self, **kwargs):
context = kwargs
context_object_name = self.get_context_object_name(self.object)
context['model_verbose_name_plural'] = self.model.__name__+"s"
context['model_verbose_name'] = self.model.__name__
context['model_name'] = model_name = self.model.__name__.lower()
context["uuid"] = uuid.uuid4()
context["crud_urls"] = {
"cruds":reverse("cruds_list"),
"create":reverse("crud_%s_create"%model_name),
"load":reverse("crud_%s_load"%model_name, args=[DEFAULT_FORMAT]),
"dump":reverse("crud_%s_dump"%model_name, args=[DEFAULT_FORMAT]),
"index":reverse("crud_%s_list"%model_name),
}
if context_object_name:
context[context_object_name] = self.object
return context
class MongoMultipleObjectsMixin(object):
def get_queryset(self):
try:
return MultipleObjectMixin.get_queryset(self)
except AttributeError:
return self.model.objects()
class CRUDDumpView(MongoMultipleObjectsMixin, BaseListView):
def get(self, request, **kwargs):
self.object_list = self.get_queryset()
dumps = [obj.dump() for obj in self.object_list]
if kwargs['format']=="yaml":
yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
return HttpResponse(yaml.dump(dumps), mimetype='application/yaml')
elif kwargs['format']=="json":
return HttpResponse(json.dumps(dumps, ensure_ascii=False), mimetype='application/json')
else:
raise NotImplementedError
class FormUpload(forms.Form):
"""
Form for uploading file
"""
file = forms.FileField()
erase_existent = forms.BooleanField(initial=True)
class CRUDLoadView(MongoMultipleObjectsMixin, FormView):
model=None
template_name = "crud_load.html"
def get_form_class(self):
return FormUpload
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form, format=kwargs["format"]))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
if form.cleaned_data["erase_existent"]:
self.model.drop_collection()
d = form.cleaned_data["file"].read()
print d
if kwargs["format"]=="json":
data = json.loads(d)
elif kwargs["format"]=="yaml":
data = yaml.load(d)
else:
raise NotImplementedError
for objd in data:
newobj = self.model.from_data(objd)
newobj.save()
return self.form_valid(form)
else:
return self.form_invalid(form, format=kwargs["format"])
def form_invalid(self, form, format):
return self.render_to_response(self.get_context_data(form=form, format=format))
def get_context_data(self, **kwargs):
context = super(CRUDLoadView, self).get_context_data(**kwargs)
context["format"] = kwargs["format"]
context['model_verbose_name_plural'] = self.model.__name__+"s"
context['model_verbose_name'] = self.model.__name__
context['model_name'] = model_name = self.model.__name__.lower()
return context
def get_success_url(self):
return reverse("crud_%s_list"%self.model.__name__.lower())
class CRUDListView(MongoMultipleObjectsMixin, ListView):
template_name = "crud_list.html"
def get_context_data(self, **kwargs):
context = super(CRUDListView, self).get_context_data(**kwargs)
#TODO: unify the following piece of sh... code in all views. Also ability to customize verbose_name
#TODO: and verbose_name_plural for models needed. Maybe create a subclass of Document for crud
#TODO: since mongoengine does not provide support for meta options in theirs document?
context['model_verbose_name_plural'] = self.model.__name__+"s"
context['model_verbose_name'] = self.model.__name__
context['model_name'] = model_name = self.model.__name__.lower()
context['crud_object_list'] = []
context["crud_urls"] = {
"cruds":reverse("cruds_list"),
"create":reverse("crud_%s_create"%model_name),
"load":reverse("crud_%s_load"%model_name, args=[DEFAULT_FORMAT]),
"dump":reverse("crud_%s_dump"%model_name, args=[DEFAULT_FORMAT]),
}
for obj in context['object_list']:
crud_obj = {}
crud_obj["object"] = obj
crud_obj["name"] = unicode(obj)
crud_obj["url_edit"] = reverse("crud_%s_update"%model_name, kwargs={"pk":urlquote_plus(obj.pk)})
crud_obj["url_delete"] = reverse("crud_%s_delete"%model_name, kwargs={"pk":urlquote_plus(obj.pk)})
context['crud_object_list'].append(crud_obj)
context["url_create"] = reverse("crud_%s_create"%model_name)
return context
class CRUDDeleteView(MongoSingleObjectMixin, DeleteView):
template_name = "crud_delete.html"
def get_success_url(self):
return reverse("crud_%s_list"%self.get_model_name())
class CRUDUpdateView(MongoSingleObjectMixin, UpdateView):
template_name = "crud_update.html"
def get_success_url(self):
return reverse("crud_%s_update"%self.get_model_name(), kwargs={"pk":urlquote_plus(self.object.pk)})
class AjaxForm(object):
def errors_as_json(self, strip_tags=False):
error_summary = {}
errors = {}
for error in self.errors.iteritems():
errors.update({error[0]: unicode(striptags(error[1])\
if strip_tags else error[1])})
error_summary.update({'errors': errors})
return error_summary
class AjaxMixin(object):
def form_valid(self, form):
ModelFormMixin.form_valid(self, form)
return HttpResponse(json.dumps({'success':True, 'object':form.instance.dump(), "object_pk":unicode(form.instance.pk)}, ensure_ascii=False),
mimetype='application/json')
def form_invalid(self, form):
return HttpResponse(json.dumps(form.errors_as_json(), ensure_ascii=False),
mimetype='application/json')
def get_form_class(self):
try:
#fixme: detect mongo model
self.model._meta.app_label
except AttributeError:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = self.model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
document = tmp_model
if self.form_class is not None:
class_name = self.model.__name__ + 'CrudAjaxForm'
return MongoFormMetaClass(class_name, (AjaxForm, self.form_class,), {'Meta': self.form_class.Meta})
else:
class_name = self.model.__name__ + 'CrudAjaxForm'
return MongoFormMetaClass(class_name, (AjaxForm, MongoForm,), {'Meta': Meta})
class CRUDCreateView(MongoSingleObjectMixin, CreateView):
template_name = "crud_create.html"
def get_success_url(self):
return reverse("crud_%s_update"%self.get_model_name(), kwargs={"pk":urlquote_plus(self.object.pk)})
class CRUDCreateAjaxView(AjaxMixin, CRUDCreateView):
template_name = "crud_create_ajax.html"
def get_context_data(self, **kwargs):
context = super(CRUDCreateAjaxView, self).get_context_data(**kwargs)
context["url_self"] = reverse("crud_%s_create.ajax"%self.model.__name__.lower())
return context
class CRUDUpdateAjaxView(AjaxMixin, CRUDUpdateView):
template_name = "crud_update_ajax.html"
def get_context_data(self, **kwargs):
context = super(CRUDUpdateAjaxView, self).get_context_data(**kwargs)
context["url_self"] = reverse("crud_%s_update.ajax"%self.model.__name__.lower(), kwargs={"pk":urlquote_plus(self.object.pk)})
return context
def cruds_list_view(request):
cruds = []
for model_name in copy(registered_cruds):
cruds.append({ "url_list":reverse("crud_%s_list"%model_name),
"url_create":reverse("crud_%s_create"%model_name),
"name":model_name.capitalize()})
return render_to_response("cruds_list.html", {"cruds": cruds})
| 1.914063 | 2 |
shap/plots/_bar.py | YotamElor/shap | 2 | 12768517 | <reponame>YotamElor/shap<gh_stars>1-10
import warnings
try:
import matplotlib.pyplot as pl
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from ._labels import labels
from ..utils import format_value
from . import colors
import numpy as np
# TODO: improve the bar chart to look better like the waterfall plot (gray feature values, and a "the rest..." feature at the bottom)
def bar(shap_values, max_display=10, show=True):
""" Create a bar plot of a set of SHAP values.
If a single sample is passed then we plot the SHAP values as a bar chart. If an
Explanation with many samples is passed then we plot the mean absolute value for
each feature column as a bar chart.
Parameters
----------
shap_values : shap.Explanation
A single row of a SHAP Explanation object (i.e. shap_values[0]) or a multi-row Explanation
object that we want to summarize.
max_display : int
The maximum number of bars to display.
show : bool
If show is set to False then we don't call the matplotlib.pyplot.show() function. This allows
further customization of the plot by the caller after the bar() function is finished.
"""
assert str(type(shap_values)).endswith("Explanation'>"), "The shap_values paramemter must be a shap.Explanation object!"
features = shap_values.data
feature_names = shap_values.feature_names
shap_values = shap_values.values
# doing a global bar plot
if len(shap_values.shape) == 2:
shap_values = np.abs(shap_values).mean(0)
features = None
# unwrap pandas series
if str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(shap_values))])
if max_display is None:
max_display = len(feature_names)
num_features = min(max_display, len(shap_values))
feature_order = np.argsort(-np.abs(shap_values))
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds), 0, -1)
# see how many individual (vs. grouped at the end) features we are plotting
if num_features < len(shap_values):
shap_values[feature_order[num_features-1]] = np.sum([shap_values[feature_order[i]] for i in range(num_features-1, len(shap_values))])
yticklabels = []
for i in feature_inds:
if features is not None:
yticklabels.append(format_value(features[i], "%0.03f") + " = " + feature_names[i])
else:
yticklabels.append(feature_names[i])
if num_features < len(shap_values):
yticklabels[-1] = "%d other features" % (len(shap_values) - num_features + 1)
row_height = 0.5
pl.gcf().set_size_inches(8, num_features * row_height + 1.5)
#pl.axvline(0, 0, 1, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
negative_values_present = np.sum(shap_values[feature_order[:num_features]] < 0) > 0
if negative_values_present:
pl.axvline(0, 0, 1, color="#000000", linestyle="-", linewidth=1, zorder=1)
pl.barh(
y_pos, shap_values[feature_inds],
0.7, align='center',
color=[colors.blue_rgb if shap_values[feature_inds[i]] <= 0 or features is None else colors.red_rgb for i in range(len(y_pos))]
)
pl.yticks(list(y_pos) + list(y_pos), yticklabels + [l.split('=')[-1] for l in yticklabels], fontsize=13)
xlen = pl.xlim()[1] - pl.xlim()[0]
fig = pl.gcf()
ax = pl.gca()
xticks = ax.get_xticks()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
bbox_to_xscale = xlen/width
for i in range(len(y_pos)):
ind = feature_order[i]
if shap_values[ind] < 0:
txt_obj = pl.text(
shap_values[ind] - (5/72)*bbox_to_xscale, y_pos[i], format_value(shap_values[ind], '%+0.02f'),
horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,
fontsize=12
)
else:
txt_obj = pl.text(
shap_values[ind] + (5/72)*bbox_to_xscale, y_pos[i], format_value(shap_values[ind], '%+0.02f'),
horizontalalignment='left', verticalalignment='center', color=colors.blue_rgb if features is None else colors.red_rgb,
fontsize=12
)
if features is not None:
features = list(features)
# try and round off any trailing zeros after the decimal point in the feature values
for i in range(len(features)):
try:
if round(features[i]) == features[i]:
features[i] = int(features[i])
except TypeError:
pass # features[i] must not be a number
#pl.gca().set_yticklabels(yticklabels)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
if negative_values_present:
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params('x', labelsize=11)
xmin,xmax = pl.gca().get_xlim()
if negative_values_present:
pl.gca().set_xlim(xmin - (xmax-xmin)*0.05, xmax + (xmax-xmin)*0.05)
else:
pl.gca().set_xlim(xmin, xmax + (xmax-xmin)*0.05)
if features is None:
pl.xlabel(labels["GLOBAL_VALUE"], fontsize=13)
else:
pl.xlabel(labels["VALUE"], fontsize=13)
# color the y tick labels that have the feature values as gray
# (these fall behind the black ones with just the feature name)
tick_labels = pl.gca().yaxis.get_majorticklabels()
for i in range(num_features):
tick_labels[i].set_color("#999999")
if show:
pl.show()
def bar_legacy(shap_values, features=None, feature_names=None, max_display=None, show=True):
# unwrap pandas series
if str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(shap_values))])
if max_display is None:
max_display = 7
else:
max_display = min(len(feature_names), max_display)
feature_order = np.argsort(-np.abs(shap_values))
#
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds), 0, -1)
pl.barh(
y_pos, shap_values[feature_inds],
0.7, align='center',
color=[colors.red_rgb if shap_values[feature_inds[i]] > 0 else colors.blue_rgb for i in range(len(y_pos))]
)
pl.yticks(y_pos, fontsize=13)
if features is not None:
features = list(features)
# try and round off any trailing zeros after the decimal point in the feature values
for i in range(len(features)):
try:
if round(features[i]) == features[i]:
features[i] = int(features[i])
except TypeError:
pass # features[i] must not be a number
yticklabels = []
for i in feature_inds:
if features is not None:
yticklabels.append(feature_names[i] + " = " + str(features[i]))
else:
yticklabels.append(feature_names[i])
pl.gca().set_yticklabels(yticklabels)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
#pl.gca().spines['left'].set_visible(False)
pl.xlabel("SHAP value (impact on model output)")
if show:
pl.show() | 3.390625 | 3 |
exercicio3.py | FelipeFerreiraSS/trabalho-logica-de-programacao-python-ads | 0 | 12768518 | <filename>exercicio3.py
# Número de quantos alunos vão ser cadastrados
numeroAlunos = int(input('Quantos alunos quer cadastrar? '))
notasAlunos = {}
todosAlunos = []
# Usei um for que se repete na quantidade de vezes no número de alunos
for i in range(numeroAlunos):
notasAlunos['aluno'] = input('Qual o nome do aluno? ') # Pegando o nome do aluno
n1 = float(input('Qual a 1° nota? ')) # Pegando as notas
n2 = float(input('Qual a 2° nota? '))
n3 = float(input('Qual a 3° nota? '))
n4 = float(input('Qual a 4° nota? '))
notasAlunos['notas'] = n1, n2, n3, n4 # Atribuindo todas as notas
media = (n1 + n2 + n3 + n4) / 4 # Calculando a média
# if para aprovado se a média for maior ou igual a 7
# if para reprovado se a média for menor a 7
if media >= 7:
notasAlunos['status'] = 'Aprovado'
if media < 7:
notasAlunos['status'] = 'Reprovado'
# Fazendo uma cópia dos dados e adicionando ao dicionário todosAlunos
todosAlunos.append(notasAlunos.copy())
# Print na tela do resultado
print('_' * 30)
print('Notas dos alunos:')
print('_' * 30)
# Dois for uma para a lista e o outro para o dicionário
for e in todosAlunos:
for i,j in e.items():
print('{} = {}'.format(i, j))
print('_' * 30)
| 4 | 4 |
tests/adapters/test_array.py | zackw/pyamf | 14 | 12768519 | <filename>tests/adapters/test_array.py
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for the L{array} L{miniamf.adapters._array} module.
@since: 0.5
"""
from __future__ import absolute_import
import array
import unittest
import miniamf
class ArrayTestCase(unittest.TestCase):
"""
"""
def setUp(self):
self.orig = [ord('f'), ord('o'), ord('o')]
self.obj = array.array('b')
self.obj.append(ord('f'))
self.obj.append(ord('o'))
self.obj.append(ord('o'))
def encdec(self, encoding):
return next(miniamf.decode(
miniamf.encode(self.obj, encoding=encoding),
encoding=encoding))
def test_amf0(self):
self.assertEqual(self.encdec(miniamf.AMF0), self.orig)
def test_amf3(self):
self.assertEqual(self.encdec(miniamf.AMF3), self.orig)
| 2.40625 | 2 |
tests/v2/test_auth_api.py | emdeechege/Questionaire-API | 0 | 12768520 | import os,unittest, json
from app import create_app
from instance.config import app_config
from app.connect import QuestionerDB
app = create_app(app_config['testing'])
class UserTestCases(unittest.TestCase):
""" Base test class """
def setUp(self):
""" Defining test variables """
self.app = create_app(app_config['testing'])
self.client = self.app.test_client()
self.app_context = self.app
self.app.testing = True
self.user = {
"firstname": "StandUps",
"lastname": "Sky",
"othername": "Tea",
"email": "<EMAIL>",
"phone_number": "123456789",
"is_admin": "True",
"username": "Scupper",
"password": "<PASSWORD>"
}
self.user1 = {
"firstname": "StandUps"
}
self.user2 = {
"firstname": "Tom",
"lastname": "Hunter",
"othername": "Caps",
"email": "<EMAIL>",
"phone_number": "123498rttt",
"is_admin": "False",
"username": "Awesome",
"password": "<PASSWORD>"
}
self.user3 = {
"firstname": "Truthy",
"lastname": "Stoway",
"othername": "Birth",
"email": "t<EMAIL>",
"phone_number": "1234534",
"is_admin": "True",
"username": "Scupperdf",
"password": "<PASSWORD>"
}
self.user6 = {
"firstname": " ",
"lastname": "Stoway",
"othername": "Birth",
"email": "<EMAIL>",
"phone_number": "1234534",
"is_admin": "True",
"username": "Scupperdf",
"password": "<PASSWORD>"
}
self.login = {
"username": "Scupper",
"password": "<PASSWORD>"
}
self.login1 = {
"username": "Champ",
"password": "<PASSWORD>"
}
self.login2 = {
"username": "",
"password": "<PASSWORD>"
}
self.login3 = {
"username": "Scuppersds",
"password": ""
}
self.login4 = {
"username": "Kijana",
"password": "<PASSWORD>"
}
def tear_down(self):
"""This function destroys objests created during the test run"""
destroy_tests()
def test_user_signup(self):
""" Test signup user """
check = self.client.post(
"/api/v2/signup", data=json.dumps(self.user), content_type="application/json")
result = json.loads(check.data.decode())
self.assertEqual(check.status_code, 201)
self.assertEqual(result[1].get("status"), 201)
self.assertIn("<EMAIL>", result[0].get('email'))
def test_validate_phone_number(self):
"""test phone number"""
response = self.client.post(
'/api/v2/signup', data=json.dumps(self.user2), content_type="application/json")
result = json.loads(response.data)
self.assertTrue(result["message"],
"Please input valid phone number")
self.assertTrue(response.status_code, 400)
def test_validate_email(self):
""" validate email"""
response = self.client.post(
'/api/v2/signup', data=json.dumps(self.user3), content_type="application/json")
result = json.loads(response.data)
self.assertEqual(result["message"],
"Invalid email")
self.assertEqual(response.status_code, 400)
# def test_username_exists(self):
# """username exists"""
# response = self.client.post(
# '/api/v2/signup', data=json.dumps(self.user), content_type="application/json")
# result = json.loads(response.data)
# # import pdb; pdb.set_trace()
# self.assertEqual(result[1]["message"], "Username exists")
# self.assertEqual(response.status_code, 400)
#
# def test_user_login(self):
# """ Test login user """
# check_login = self.client.post(
# "/api/v2/login", data=json.dumps(self.login), content_type="application/json")
# result = json.loads(check_login.data.decode())
#
# self.assertEqual(result["status"], 200)
# self.assertEqual(result["message"], "User logged in successfully")
#
def test_user_exists(self):
response1 = self.client.post(
"/api/v2/login", data=json.dumps(self.login1), content_type="application/json")
result1 = json.loads(response1.data.decode())
self.assertEqual(response1.status_code, 404)
self.assertEqual(result1["status"], 404)
self.assertEqual(result1["message"], "User does not exist")
def test_username_required(self):
"""username test"""
response2 = self.client.post(
"/api/v2/login", data=json.dumps(self.login2), content_type="application/json")
result2 = json.loads(response2.data.decode())
self.assertEqual(response2.status_code, 400)
self.assertEqual(result2["status"], 400)
self.assertEqual(result2["message"], "Username is required")
def test_password_required(self):
"""password required"""
response3 = self.client.post(
"/api/v2/login", data=json.dumps(self.login3), content_type="application/json")
result3 = json.loads(response3.data.decode())
self.assertEqual(response3.status_code, 400)
self.assertEqual(result3["status"], 400)
self.assertEqual(result3["message"], "Password is required")
def tearDown(self):
"""Method to destroy test database tables"""
QuestionerDB.drop_tables()
if __name__ == "__main__":
unittest.main()
| 3.0625 | 3 |
pythontutorials/books/CrackingCodes/Ch08/PracticeQuestions/Question3.py | JoseALermaIII/python-tutorials | 2 | 12768521 | """Chapter 8 Practice Question 3
Draw the complete truth tables for the and, or, and not operators.
"""
def notTruthTable() -> None:
"""Not truth table.
Prints a truth table for the not operator.
Returns:
None. Only prints out a table.
"""
print(" _________________________\n",
"|not A | Evaluates to:|\n",
"|_________|______________|\n",
"|not False| True |\n",
"|not True | False |\n",
"|_________|______________|\n")
return None
def andTruthTable() -> None:
"""And truth table.
Prints a truth table for the and operator.
Returns:
None. Only prints out a table.
"""
print(" _______________________________\n",
"|A and B | Evaluates to:|\n",
"|_______________|______________|\n",
"|False and False| False |\n",
"|False and True | False |\n",
"|True and False | False |\n",
"|True and True | True |\n",
"|_______________|______________|\n")
return None
def orTruthTable() -> None:
"""Or truth table.
Prints a truth table for the or operator.
Returns:
None. Only prints out a table.
"""
print(" ______________________________\n",
"|A or B | Evaluates to:|\n",
"|______________|______________|\n",
"|False or False| False |\n",
"|False or True | True |\n",
"|True or False | True |\n",
"|True or True | True |\n",
"|______________|______________|\n")
return None
def main():
notTruthTable()
andTruthTable()
orTruthTable()
# If Question3.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| 4.4375 | 4 |
applications/pytorch/conformer/tests/convolution.py | payoto/graphcore_examples | 260 | 12768522 | <filename>applications/pytorch/conformer/tests/convolution.py
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from src.layers.layer_norm import LayerNorm
class ConvolutionModule_cpu(ConvolutionModule):
"""ConvolutionModule in Conformer model.
Args:
channels_ (int): The number of channels_ of conv layers.
kernel_size_ (int): Kernerl size of conv layers.
"""
def __init__(self, channels_, kernel_size_, activation_=nn.ReLU(), bias=True):
"""Construct an ConvolutionModule object."""
super(ConvolutionModule_cpu, self).__init__(channels=channels_, kernel_size=kernel_size_, activation=activation_)
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size_ - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(
channels_,
2 * channels_,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.depthwise_conv = nn.Conv1d(
1 * channels_,
1 * channels_,
kernel_size_,
stride=1,
padding=(kernel_size_ - 1) // 2,
groups=channels_,
bias=bias,
)
# Replace the original batch_norm with layer_norm
self.norm = LayerNorm(1 * channels_, -2)
self.pointwise_conv2 = nn.Conv1d(
1 * channels_,
channels_,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.activation = activation_
| 2.40625 | 2 |
vocoder_inference.py | jireh-father/Real-Time-Voice-Cloning | 0 | 12768523 | <filename>vocoder_inference.py
from utils.argutils import print_args
from vocoder import inference as vocoder
import numpy as np
import librosa
import argparse
import torch
import traceback
if __name__ == '__main__':
## Info & args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-v", "--voc_model_fpath", type=str,
default="vocoder/saved_models/pretrained/pretrained.pt",
help="Path to a saved vocoder")
parser.add_argument("--low_mem", action="store_true", help= \
"If True, the memory used by the synthesizer will be freed after each use. Adds large "
"overhead but allows to save some GPU memory for lower-end GPUs.")
parser.add_argument("--no_sound", action="store_true", help= \
"If True, audio won't be played.")
args = parser.parse_args()
print_args(args, parser)
if not args.no_sound:
import sounddevice as sd
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
## Load the models one by one.
print("Preparing the encoder, the synthesizer and the vocoder...")
vocoder.load_model(args.voc_model_fpath)
## Run a test
print("Testing your configuration with small inputs.")
# Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's
# sampling rate, which may differ.
# If you're unfamiliar with digital audio, know that it is encoded as an array of floats
# (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1.
# The sampling rate is the number of values (samples) recorded per second, it is set to
# 16000 for the encoder. Creating an array of length <sampling_rate> will always correspond
# to an audio of 1 second.
print("\tTesting the encoder...")
num_generated = 0
while True:
try:
# Get the reference audio filepath
message = "Reference voice: enter an audio filepath of a voice to be cloned (mp3, " \
"wav, m4a, flac, ...):\n"
mel_path = input(message)
## Computing the embedding
# First, we load the wav using the function that the speaker encoder provides. This is
# important: there is preprocessing that must be applied.
# The following two methods are equivalent:
# - Directly load from the filepath:
melspec = torch.load(mel_path)
# Synthesizing the waveform is fairly straightforward. Remember that the longer the
# spectrogram, the more time-efficient the vocoder.
generated_wav = vocoder.infer_waveform(melspec.unsqueeze(0), normalize=True, from_numpy=False)
## Post-generation
# There's a bug with sounddevice that makes the audio cut one second earlier, so we
# pad it.
generated_wav = np.pad(generated_wav, (0, 22050), mode="constant")
# Save it on the disk
fpath = "vocoder_output_%02d.wav" % num_generated
print(generated_wav.dtype)
librosa.output.write_wav(fpath, generated_wav.astype(np.float32), 22050)
num_generated += 1
print("\nSaved output as %s\n\n" % fpath)
except Exception as e:
print(traceback.print_exc())
print("Caught exception: %s" % repr(e))
print("Restarting\n")
| 2.359375 | 2 |
src/M_star.py | Eashwar-S/Multi-agent-Pathplanning | 0 | 12768524 | import numpy as np
import heapq
import math
import time
import pygame
class Node:
def __init__(self, state=None, cost=float('inf'), costToCome=float('inf'), parent=None, collision=None):
self.state = state
self.parent = parent
self.cost = cost
self.costToCome = costToCome
self.collision = collision
class CoupledPlannerNode:
def __init__(self, state=None, collision=None, parent=None, f_score=float('inf'), cost_to_come=float('inf')):
self.state = state
self.parent = parent
self.collision = collision
self.f_score = f_score
self.cost_to_come = cost_to_come
class CoupledNode:
def __init__(self, state=None, collision=None, parent=None, f_score=None, cost_to_go=None, cost_to_come=None):
self.state = state
self.parent = parent
self.collision = collision
self.f_score = f_score
self.cost_to_go = cost_to_go
self.cost_to_come = cost_to_come
def pointInValidWorkspace(point, res, radiusClearance, scale):
x, y = point
# --------------------------------------------------------------------------------
# Checking whether point inside obstacles
# --------------------------------------------------------------------------------
X = np.float32([8, 12.5, 12.5, 8]) * scale / res
Y = np.float32([9, 9, 9.5, 9.5]) * scale / res
ptInRectangle = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([10, 10.5, 10.5, 10]) * scale / res
Y = np.float32([7, 7, 11.5, 11.5]) * scale / res
ptInRectangle1 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([4, 4.25, 4.25, 4]) * scale / res
Y = np.float32([8, 8, 10.5, 10.5]) * scale / res
ptInRectangle2 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([1.5, 3, 3, 1.5]) * scale / res
Y = np.float32([9, 9, 9.25, 9.25]) * scale / res
ptInRectangle3 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([16, 16.25, 16.25, 16]) * scale / res
Y = np.float32([8, 8, 10.5, 10.5]) * scale / res
ptInRectangle4 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([17, 18.5, 18.5, 17]) * scale / res
Y = np.float32([9, 9, 9.25, 9.25]) * scale / res
ptInRectangle5 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([9, 11.5, 11.5, 9]) * scale / res
Y = np.float32([3, 3, 3.25, 3.25]) * scale / res
ptInRectangle6 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([10.15, 10.40, 10.40, 10.15]) * scale / res
Y = np.float32([0.8, 0.8, 2.3, 2.3]) * scale / res
ptInRectangle7 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([9, 11.5, 11.5, 9]) * scale / res
Y = np.float32([15, 15, 15.25, 15.25]) * scale / res
ptInRectangle8 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([10.15, 10.40, 10.40, 10.15]) * scale / res
Y = np.float32([16, 16, 17.5, 17.5]) * scale / res
ptInRectangle9 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
if ptInRectangle or ptInRectangle1 or ptInRectangle2 or ptInRectangle3 or ptInRectangle4 or \
ptInRectangle5 or ptInRectangle6 or ptInRectangle7 or ptInRectangle8 or ptInRectangle9:
return False
return True
# checks whether next action is near an obstacle or ill defined
def isSafe(newState, scale, r=1, radiusClearance=0):
col = math.floor(800 / r)
row = math.floor(800 / r)
newState = list(newState)
if not isinstance(newState[0], list):
if newState[0] < 0 or newState[0] > col or newState[1] < 0 or newState[1] > row:
return False
return pointInValidWorkspace(newState[0:2], r, radiusClearance, scale)
else:
check = True
for i in range(len(newState)):
check = check or newState[i][0] < 0 or newState[i][0] > col or newState[i][1] < 0 or newState[i][1] > row
if check:
check = pointInValidWorkspace(newState[i][0:2], r, radiusClearance, scale)
else:
return False
return check
# prints solution path
def printPath(node):
l = []
current = node
while current:
l.append(current.state)
current = current.parent
return l
3
def normalize(startPosition, startOrientation, threshDistance=0.5, threshAngle=30):
x, y = startPosition
t = startOrientation
x = round(x / threshDistance) * threshDistance
y = round(y / threshDistance) * threshDistance
t = round(t / threshAngle) * threshAngle
return [x, y, t]
# Calculating the Euclidean distance
def distance(startPosition, goalPosition):
sx, sy = startPosition
gx, gy = goalPosition
return math.sqrt((gx - sx) ** 2 + (gy - sy) ** 2)
# generates optimal path for robot
def Astar(q, startPosition, startOrientation, goalPosition, nodesExplored, scale, threshDistance=0.5, threshAngle=30,
radiusClearance=0):
# normalize goal and start positions
sx, sy, st = normalize(startPosition, startOrientation, threshDistance, threshAngle)
gx, gy, gt = normalize(goalPosition, 0, threshDistance, threshAngle)
# Initializing root node
key = str(sx) + str(sy) + str(st)
root = Node(np.array([sx, sy, st]), 0.0, 0.0, None)
if key not in nodesExplored:
nodesExplored[key] = root
count = 1
heapq.heappush(q, (root.cost, count, root))
while len(q) > 0:
_, _, currentNode = heapq.heappop(q)
if distance(currentNode.state[0:2], goalPosition) <= 3 * 1.5:
sol = printPath(currentNode)
return [True, sol]
angle = 360 // threshAngle
for theta in range(angle):
x, y, t = currentNode.state
newOrientation = math.radians((threshAngle * theta + t) % 360)
newPosX = threshDistance * math.cos(newOrientation) + x
newPosY = threshDistance * math.sin(newOrientation) + y
newState = np.array(normalize([newPosX, newPosY], newOrientation, threshDistance, threshAngle))
s = str(newState[0]) + str(newState[1]) + str(newState[2])
if s not in nodesExplored:
if isSafe(newState, scale, 1, radiusClearance):
newCostToCome = currentNode.costToCome + distance([newState[0], newState[1]], [x, y])
newCost = newCostToCome + distance([newState[0], newState[1]], [gx, gy])
newNode = Node(state=newState, cost=newCost, costToCome=newCostToCome, parent=currentNode)
nodesExplored[s] = newNode
heapq.heappush(q, (newNode.cost, count, newNode))
count += 1
else:
if nodesExplored[s].collision is None or (
isinstance(nodesExplored[s].collision, list) and len(nodesExplored[s].collision) == 0):
if (nodesExplored[s].cost > currentNode.costToCome + distance([newState[0], newState[1]],
[x, y]) + distance(
[newState[0], newState[1]], [gx, gy])):
nodesExplored[s].costToCome = currentNode.costToCome + distance([newState[0], newState[1]],
[x, y])
nodesExplored[s].cost = nodesExplored[s].costToCome + distance([newState[0], newState[1]],
[gx, gy])
nodesExplored[s].parent = currentNode
return [False, None] # checks whether next action is near an obstacle or ill defined
def determineCollision(robotPosition):
collisionSet = []
for i in range(len(robotPosition) - 1):
collision = []
for j in range(i + 1, len(robotPosition)):
if list(robotPosition[i]) == list(robotPosition[j]):
collision.append(i)
collision.append(j)
collision = list(set(collision))
if collision:
collisionSet.append(collision)
return collisionSet
def coupledPlanner(collision, startPosition, startOrientation, goalPosition, coupledNodesExplored, nodesExplored,
solPaths1,
iterateSolPaths1, scale, threshDistance=0.5, threshAngle=30, radiusClearance=0):
nonCollisionRobots = np.array([Node()] * len(startPosition))
goalChecker = {}
solution = {}
solution1 = {}
nodeE = {}
co = 0
currentPos = startPosition.copy()
count = [0] * len(startPosition)
q = {}
col = []
for i in range(len(startPosition)):
q[i] = []
if i not in collision:
s = str(solPaths1[i][iterateSolPaths1[i]][0]) + str(solPaths1[i][iterateSolPaths1[i]][1]) + str(
solPaths1[i][iterateSolPaths1[i]][2])
nonCollisionRobots[i] = coupledNodesExplored[s]
iterateSolPaths1[i] -= 1
else:
goalChecker[i] = False
nodeE[i] = {}
root = Node(startPosition[i], 0.0, 0.0, None)
s = str(startPosition[i][0]) + str(startPosition[i][1]) + str(startPosition[i][2])
nodeE[i][s] = root
count[i] += 1
heapq.heappush(q[i], (root.cost, count[i], root))
while not all(ele for ele in goalChecker.values()):
co += 1
# print(currentPos, determineCollision(currentPos.copy()), len(currentPos), co)
if determineCollision(currentPos.copy()):
col = determineCollision(currentPos.copy())
for i in col[0]:
s = str(currentPos[i][0]) + str(currentPos[i][1]) + str(currentPos[i][2])
q[i].clear()
nodesExplored[i][s].collision = col
heapq.heappush(q[i], (nodesExplored[i][s].parent.cost, count[i], nodesExplored[i][s].parent))
nodesExplored[i][s].parent = None
nodeE[i].clear()
# collision = list(set(collision + col[0]))
# print(collision)
for i in range(len(startPosition)):
if i in collision:
if not goalChecker[i]:
_, _, currentNode = heapq.heappop(q[i])
currentPos[i] = currentNode.state
if distance(currentNode.state[0:2], goalPosition[i][0:2]) <= 3 * 1.5:
solution[i] = printPath(currentNode)
goalChecker[i] = True
continue
angle = 360 // threshAngle
for theta in range(angle):
x, y, t = currentNode.state
newOrientation = math.radians((threshAngle * theta + t) % 360)
newPosX = threshDistance * math.cos(newOrientation) + x
newPosY = threshDistance * math.sin(newOrientation) + y
newState = np.array(normalize([newPosX, newPosY], newOrientation, threshDistance, threshAngle))
s = str(newState[0]) + str(newState[1]) + str(newState[2])
if s not in nodeE[i]:
if (s in nodesExplored[i] and not nodesExplored[i][s].collision) or (
s not in nodesExplored[i]):
if isSafe(newState, scale, 1, radiusClearance):
newCostToCome = currentNode.costToCome + distance([newState[0], newState[1]],
[x, y])
newCost = newCostToCome + distance([newState[0], newState[1]], goalPosition[i][0:2])
newNode = Node(state=newState, cost=newCost, costToCome=newCostToCome,
parent=currentNode)
nodesExplored[i][s] = newNode
nodeE[i][s] = newNode
heapq.heappush(q[i], (newNode.cost, count[i], newNode))
count[i] += 1
else:
if (s in nodesExplored[i] and not nodesExplored[i][s].collision) or (
s not in nodesExplored[i]):
if (nodeE[i][s].cost > currentNode.costToCome + distance([newState[0], newState[1]],
[x, y]) + distance(
[newState[0], newState[1]], goalPosition[i][0:2])):
nodeE[i][s].costToCome = currentNode.costToCome + distance(
[newState[0], newState[1]], [x, y])
nodeE[i][s].cost = nodeE[i][s].costToCome + distance([newState[0], newState[1]],
goalPosition[i][0:2])
nodeE[i][s].parent = currentNode
# print(currentNode.state)
# print(currentPos[i])
else:
if iterateSolPaths1[i] > 0:
s = str(solPaths1[i][iterateSolPaths1[i]][0]) + str(solPaths1[i][iterateSolPaths1[i]][1]) + str(
solPaths1[i][iterateSolPaths1[i]][2])
nonCollisionRobots[i] = nodesExplored[i][s]
currentPos[i] = nonCollisionRobots[i].state.copy()
iterateSolPaths1[i] -= 1
else:
goalChecker[i] = True
return solution, nodesExplored
def updateCollsionPath(colset, previousPos, coupledNodesExplored, nodesExplored, nodesExplored1):
for i, pos in enumerate(previousPos):
s = str(pos[0]) + str(pos[1]) + str(pos[2])
while nodesExplored1[i][s].parent is not None:
for collision in colset:
if i in collision:
if coupledNodesExplored[s].collision:
for col in coupledNodesExplored[s].collision:
col = list(set(col + [i]))
else:
coupledNodesExplored[s].collision = colset
if nodesExplored[s].collision:
for col in nodesExplored[s].collision:
col = list(set(col + [i]))
else:
nodesExplored[s].collision = colset
if nodesExplored1[i][s].collision:
for col in nodesExplored1[i][s].collision:
col = list(set(col + [i]))
else:
nodesExplored1[i][s].collision = colset
st = nodesExplored1[i][s].parent.state
s = str(st[0]) + str(st[1]) + str(st[2])
def subdimensionalExpansion(solPaths, nodesExplored, nodesExplored1, iterateSolPaths, scale, threshDistance,
threshAngle,
radiusClearance):
currentPos = []
sol = []
nodeE = []
startPosition = []
goalPosition = []
previousPos = []
colset = []
count = -1
exp = False
previousNode = [Node()] * len(solPaths)
node = [Node()] * len(solPaths)
coupledNodesExplored = {}
solPaths1 = solPaths.copy()
iterateSolPaths1 = iterateSolPaths.copy()
for index, path in enumerate(solPaths):
startPosition.append(list(path[iterateSolPaths[index]]))
goalPosition.append(list(path[0]))
while not all(ele == 0 for ele in iterateSolPaths):
previousPos = currentPos.copy()
currentPos.clear()
for index, path in enumerate(solPaths):
currentPos.append(list(path[iterateSolPaths[index]]))
colset = determineCollision(currentPos)
count += 1
if count == 0:
previousPos = currentPos
if not colset:
for i, pos in enumerate(currentPos):
s = str(pos[0]) + str(pos[1]) + str(pos[2])
if count == 0:
node[i] = Node(state=pos, collision=colset, parent=None)
previousNode[i] = node[i]
else:
previousNode[i] = node[i]
node[i] = Node(state=pos, collision=colset, parent=previousNode[i])
if s not in nodesExplored:
nodesExplored[s] = node[i]
coupledNodesExplored[s] = node[i]
if iterateSolPaths[i] > 0:
iterateSolPaths[i] -= 1
else:
exp = True
# print(currentPos)
break
for i, pos in enumerate(currentPos):
s = str(pos[0]) + str(pos[1]) + str(pos[2])
for collision in colset:
if i in collision:
node[i] = Node(state=pos, collision=colset, parent=None)
coupledNodesExplored[s] = node[i]
nodesExplored[s].collision = colset
nodesExplored1[i][s].collision = colset
else:
while iterateSolPaths[i] > 0:
s = str(pos[0]) + str(pos[1]) + str(pos[2])
previousNode[i] = node[i]
node[i] = Node(state=pos, collision=[], parent=previousNode[i])
coupledNodesExplored[s] = node[i]
iterateSolPaths[i] -= 1
pos = solPaths[i][iterateSolPaths[i]]
break
if exp:
print('Collision found')
# print(colset)
updateCollsionPath(colset, previousPos, coupledNodesExplored, nodesExplored, nodesExplored1)
for collision in colset:
a = time.time()
sol, nodeE = coupledPlanner(collision, startPosition, 0, goalPosition, coupledNodesExplored, nodesExplored1,
solPaths1,
iterateSolPaths1, scale, threshDistance,
30, radiusClearance)
b = time.time()
print(b - a)
return exp, sol, colset[0], nodeE, currentPos
return exp, sol, colset, nodeE, currentPos
def triangleCoordinates(start, end, triangleSize=5):
rotation = (math.atan2(start[1] - end[1], end[0] - start[0])) + math.pi / 2
rad = math.pi / 180
coordinateList = np.array([[end[0], end[1]],
[end[0] + triangleSize * math.sin(rotation - 165 * rad),
end[1] + triangleSize * math.cos(rotation - 165 * rad)],
[end[0] + triangleSize * math.sin(rotation + 165 * rad),
end[1] + triangleSize * math.cos(rotation + 165 * rad)]])
return coordinateList
def visualizeMStar():
###################################################
# Parameters
###################################################
clearance = 10
radius = 0
stepSize = 11
threshDistance = stepSize # Step size of movement
res = 1 # resolution of grid
scale = 40 # scale of grid
# 1 Robot
# start = [[1 * scale, 16 * scale]] # Starting position of the robots
# goal = [[16 * scale, 1 * scale]] # Goal position of the robots
# 2 Robots
# start = [[1 * scale, 6 * scale], [6 * scale, 1 * scale]] # Starting position of the robots
# goal = [[14 * scale, 10 * scale], [9 * scale, 14 * scale]] # Goal position of the robots
# 3 Robots
start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
[6 * scale, 1 * scale]] # Starting position of the robots
goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
[9 * scale, 14 * scale]] # Goal position of the robots
# 4 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale]] # Goal position of the robots
# 5 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale], [17 * scale, 4 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale], [19 * scale, 19 * scale]] # Goal position of the robots
# 6 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale], [17 * scale, 4 * scale], [1 * scale, 19 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale], [19 * scale, 19 * scale], [14 * scale, 16 * scale]] # Goal position of the robots
# 7 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale],
# [17 * scale, 4 * scale], [1 * scale, 19 * scale], [2 * scale, 2 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale],
# [19 * scale, 19 * scale], [14 * scale, 16 * scale], [14 * scale, 3 * scale]] # Goal position of the robots
drawing = True
threshAngle = 90 # Angle between actions
startOrientation = 0
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
lred = (255, 102, 102)
green = (0, 102, 0)
lgreen = (153, 255, 153)
orange = (255, 165, 0)
dorange = (240, 94, 35)
blue = (0, 0, 255)
lblue = (153, 204, 255)
purple = (75, 0, 130)
yellow = (255, 255, 0)
pink = (255,192,203)
dpink = (199,21,133)
gray = (220,220,220)
dgray = (105,105,105)
cyan = (0, 255, 255)
maroon = (255,160,122)
dmaroon = (128, 0, 0)
pathColours = [blue, red, green, dmaroon, orange, dpink, dgray]
colors = [lblue, lred, lgreen, maroon, dorange, pink, gray]
solutionPaths = []
size_x = 20
size_y = 20
TotalNodesExplored = {}
TotalNodesExplored1 = {}
totalTime = 0
if drawing:
pygame.init()
gameDisplay = pygame.display.set_mode((size_x * scale, size_y * scale))
gameDisplay.fill(white)
pygame.display.set_caption("M* Algorithm Implementation")
basicfont = pygame.font.SysFont('timesnewroman', 20, bold=True)
############################################################
# Display Obstacles
############################################################
pygame.draw.rect(gameDisplay, black,
[int(scale * 8), int(scale * 9), int(scale * 4.5), int(scale * 0.5)]) # plus
pygame.draw.rect(gameDisplay, black,
[int(scale * 10), int(scale * 7), int(scale * 0.5), int(scale * 4.5)]) # plus
pygame.draw.rect(gameDisplay, black, [int(scale * 4), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 1.5), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 16), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 17), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black, [int(scale * 9), int(scale * 3), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 0.8), int(scale * 0.25), int(scale * 1.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 9), int(scale * 15), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 16), int(scale * 0.25), int(scale * 1.5)]) # |
############################################################
# Display start and end points of the robots
############################################################
for i in range(len(start)):
pygame.draw.circle(gameDisplay, pathColours[i], start[i], 0.1 * scale)
pygame.draw.circle(gameDisplay, pathColours[i], goal[i], 0.1 * scale)
text = basicfont.render('s' + str(i + 1), False, pathColours[i])
text1 = basicfont.render('g' + str(i + 1), False, pathColours[i])
gameDisplay.blit(text, (start[i][0] + 5, start[i][1] + 5))
gameDisplay.blit(text1, (goal[i][0] + 5, goal[i][1] + 5))
pygame.display.update()
pygame.time.delay(500)
############################################################
# Draw Explored Nodes and solution path
############################################################
for i in range(len(start)):
nodesExplored = {}
q = []
startPosition = np.round((np.array(start[i])) / res)
goalPosition = np.round((np.array(goal[i])) / res)
if not isSafe(startPosition, scale, res, clearance + radius) or not isSafe(goalPosition, scale, res,
clearance + radius):
print('Start or goal configuration of robot ' + str(i + 1) + ' is not in a valid workspace')
else:
print('Exploring workspace for robot ' + str(i + 1))
startTime = time.time() # Start time of simulation
success, solution = Astar(q, startPosition, startOrientation, goalPosition, nodesExplored, scale,
threshDistance,
threshAngle, clearance + radius)
endTime = time.time()
TotalNodesExplored.update(nodesExplored)
TotalNodesExplored1[i] = nodesExplored
#############################################
# Drawing
#############################################
if success:
solutionPaths.append(solution)
print('Optimal path found for robot ' + str(i + 1))
print("Total time taken for exploring nodes " + str(endTime - startTime) + " seconds.")
totalTime += endTime - startTime
print('-------------------------')
if drawing:
draw = True
while draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# draw nodesExplored
for s in nodesExplored:
if nodesExplored[s].parent:
pt = nodesExplored[s].state[0:2]
ptParent = nodesExplored[s].parent.state[0:2]
x, y = pt * res
x2, y2 = ptParent * res
# draw explored nodes
pygame.draw.line(gameDisplay, colors[i], (x2, y2), (x, y), 1)
triangle = triangleCoordinates([x2, y2], [x, y], 5)
pygame.draw.polygon(gameDisplay, colors[i],
[tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])])
# draw start and goal locations
pygame.draw.rect(gameDisplay, colors[i],
(int(startPosition[0] * res * scale), int(startPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.draw.circle(gameDisplay, colors[i],
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale)),
math.floor(3 * 1.5 * res * scale))
pygame.draw.rect(gameDisplay, white,
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.display.update()
draw = False
else:
solutionPaths.append(success)
print("Total time " + str(totalTime))
print("solution Paths " + str(len(solutionPaths)))
print('Robots following their own individual optimal Paths')
print()
print()
iterateSolutionPaths = []
for i in range(len(solutionPaths)):
if solutionPaths[i]:
iterateSolutionPaths.append(len(solutionPaths[i]) - 1)
else:
iterateSolutionPaths.append(-1)
iterateSolutionPathsCopy = iterateSolutionPaths.copy()
iterateSolutionPathsCopy1 = iterateSolutionPaths.copy()
solutionPathsCopy = solutionPaths.copy()
failure, sol, collision, nodeE, currentPos = subdimensionalExpansion(solutionPathsCopy, TotalNodesExplored,
TotalNodesExplored1,
iterateSolutionPathsCopy,
scale,
threshDistance, 45,
radius + clearance)
if drawing:
temp = [True] * len(iterateSolutionPaths)
while not all(ele == -2 for ele in iterateSolutionPaths) and not all(not p for p in temp):
for i in range(len(solutionPaths)):
if list(solutionPaths[i][iterateSolutionPaths[i]]) == currentPos[i] and failure:
pt = solutionPaths[i][iterateSolutionPaths[i]][0:2]
x, y = pt[0] * res, pt[1] * res
pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
pygame.display.update()
iterateSolutionPaths[i] -= 1
temp[i] = False
else:
if iterateSolutionPaths[i] != -2:
if iterateSolutionPaths[i] == -1:
print("There is no Path for Robot " + str(i + 1))
iterateSolutionPaths[i] = -2
elif iterateSolutionPaths[i] >= 0:
pt = solutionPaths[i][iterateSolutionPaths[i]][0:2]
x, y = pt[0] * res, pt[1] * res
pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
iterateSolutionPaths[i] -= 1
if iterateSolutionPaths[i] == 0:
print("Robot " + str(i + 1) + " reached its goal")
iterateSolutionPaths[i] = -2
pygame.display.update()
pygame.time.delay(1000)
if failure:
s = ''
for i in collision:
s += str(i + 1) + ' '
print("--------------------")
print('Robot - Robot collision detected between robots ' + s)
print('Starting subdimesional Expansion')
print('Running Back propogation and updating Collision list')
temp = []
for i in range(len(iterateSolutionPaths)):
if i in collision:
temp.append(False)
else:
temp.append(True)
if drawing:
while not all(ele for ele in temp):
for i in range(len(iterateSolutionPaths)):
if i in collision:
if iterateSolutionPaths[i] != iterateSolutionPathsCopy1[i]:
pt = solutionPaths[i][iterateSolutionPaths[i]][0:2]
x, y = pt[0] * res, pt[1] * res
iterateSolutionPaths[i] += 1
pygame.draw.circle(gameDisplay, yellow, (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
pygame.display.update()
else:
temp[i] = True
pygame.time.delay(500)
# pygame.quit()
print()
print('Implementing coupled planner for robots ' + s)
print()
print('Robots following collision free path')
if drawing:
gameDisplay.fill(white)
pygame.display.set_caption("M* Algorithm Implementation")
basicfont = pygame.font.SysFont('timesnewroman', 20, bold=True)
############################################################
# Display Obstacles
############################################################
pygame.draw.rect(gameDisplay, black,
[int(scale * 8), int(scale * 9), int(scale * 4.5), int(scale * 0.5)]) # plus
pygame.draw.rect(gameDisplay, black,
[int(scale * 10), int(scale * 7), int(scale * 0.5), int(scale * 4.5)]) # plus
pygame.draw.rect(gameDisplay, black,
[int(scale * 4), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 1.5), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 16), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 17), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 9), int(scale * 3), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 0.8), int(scale * 0.25), int(scale * 1.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 9), int(scale * 15), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 16), int(scale * 0.25), int(scale * 1.5)]) # |
pygame.display.update()
solutionPaths2 = solutionPathsCopy.copy()
sol = list(np.load('sol.npy'))
sol.reverse()
for i in range(len(solutionPaths2)):
if i in collision:
solutionPaths2[i] = sol.pop(0)
iterateSolutionPaths2 = []
if drawing:
for i in range(len(start)):
pygame.draw.circle(gameDisplay, black, start[i], 0.1 * scale)
pygame.draw.circle(gameDisplay, black, goal[i], 0.1 * scale)
text = basicfont.render('s' + str(i + 1), False, black)
text1 = basicfont.render('g' + str(i + 1), False, black)
gameDisplay.blit(text, (start[i][0] + 5, start[i][1] + 5))
gameDisplay.blit(text1, (goal[i][0] + 5, goal[i][1] + 5))
pygame.display.update()
pygame.time.delay(500)
for i in range(len(start)):
# if i not in collision:
startPosition = np.round((np.array(start[i])) / res)
goalPosition = np.round((np.array(goal[i])) / res)
draw = True
while draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# draw nodesExplored
for s in nodeE[i]:
if nodeE[i][s].parent:
pt = nodeE[i][s].state[0:2]
ptParent = nodeE[i][s].parent.state[0:2]
x, y = pt * res
x2, y2 = ptParent * res
# draw explored nodes
if i in collision:
pygame.draw.line(gameDisplay, yellow, (x2, y2), (x, y), 1)
triangle = triangleCoordinates([x2, y2], [x, y], 5)
pygame.draw.polygon(gameDisplay, yellow,
[tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])])
else:
pygame.draw.line(gameDisplay, colors[i], (x2, y2), (x, y), 1)
triangle = triangleCoordinates([x2, y2], [x, y], 5)
pygame.draw.polygon(gameDisplay, colors[i],
[tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])])
# draw start and goal locations
pygame.draw.rect(gameDisplay, colors[i],
(int(startPosition[0] * res * scale), int(startPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.draw.circle(gameDisplay, colors[i],
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale)),
math.floor(3 * 1.5 * res * scale))
pygame.draw.rect(gameDisplay, white,
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.display.update()
draw = False
for i in range(len(solutionPaths2)):
iterateSolutionPaths2.append(len(solutionPaths2[i]) - 1)
print(iterateSolutionPaths2)
# draw solution path
while not all(ele == -2 for ele in iterateSolutionPaths2):
for i in range(len(solutionPaths2)):
if iterateSolutionPaths2[i] != -2:
if iterateSolutionPaths2[i] == -1:
print("There is no Path for Robot " + str(i + 1))
iterateSolutionPaths2[i] = -2
elif iterateSolutionPaths2[i] >= 0:
pt = solutionPaths2[i][iterateSolutionPaths2[i]][0:2]
x, y = pt[0] * res, pt[1] * res
pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
iterateSolutionPaths2[i] -= 1
if iterateSolutionPaths2[i] == 0:
print("Robot " + str(i + 1) + " reached its goal")
iterateSolutionPaths2[i] = -2
pygame.display.update()
pygame.time.delay(4000)
pygame.quit()
def main():
visualizeMStar()
if __name__ == "__main__":
main()
| 3.0625 | 3 |
Problems/Miscellaneous/Medium/35_champagne_tower.py | andor2718/LeetCode | 1 | 12768525 | <reponame>andor2718/LeetCode
# https://leetcode.com/problems/champagne-tower/
from collections import namedtuple
Coord = namedtuple('Coord', ['row', 'col'])
Glass = namedtuple('Glass', ['in_glass', 'excess'])
class Solution:
def champagneTower(
self, poured: int, query_row: int, query_glass: int) -> float:
row, col = query_row, query_glass
glass_volume = 1.0
if not 0 <= col <= row or poured < glass_volume:
return float(poured) if row == col == 0 else 0.0
# If this is reached, then the top glass must be full!
memo = dict()
def _get_glass(_row: int, _col: int) -> Glass:
if _col == _row == 0:
return Glass(glass_volume, poured - glass_volume)
if not 0 <= _col <= _row:
return Glass(0.0, 0.0)
coord = Coord(_row, _col)
if coord not in memo:
_, top_left_excess = _get_glass(_row - 1, _col - 1)
_, top_right_excess = _get_glass(_row - 1, _col)
# Current bottle accumulates halves of top excesses.
accumulated_excess = (top_left_excess + top_right_excess) / 2
if accumulated_excess >= glass_volume:
memo[coord] = Glass(
glass_volume, accumulated_excess - glass_volume)
else:
memo[coord] = Glass(accumulated_excess, 0.0)
return memo[coord]
curr_glass = _get_glass(row, col)
return curr_glass.in_glass
| 3.6875 | 4 |
app/models/res.py | Hansybx/guohe3 | 1 | 12768526 | """
-*- coding: utf-8 -*-
Time : 2019/7/19 8:25
Author : Hansybx
"""
class Res:
code = 200
msg = ''
info = {}
def __init__(self, code, msg, info):
self.code = code
self.msg = msg
self.info = info
| 2.703125 | 3 |
tests/test_select.py | timgates42/goless | 266 | 12768527 | import goless
from goless.backends import current as be
from . import BaseTests
class RecvCaseTests(BaseTests):
chansize = 1
def setUp(self):
BaseTests.setUp(self)
self.ch = goless.chan(self.chansize)
self.ca = goless.rcase(self.ch)
def test_ready(self):
self.assertFalse(self.ca.ready())
be.run(self.ch.send, 1)
self.assertTrue(self.ca.ready())
be.run(self.ch.recv)
self.assertFalse(self.ca.ready())
def test_executes(self):
be.run(self.ch.send, 'a')
x = self.ca.exec_()
self.assertEqual(x, 'a')
def test_exec_with_no_body(self):
be.run(self.ch.send, 'a')
ca = goless.rcase(self.ch)
self.assertEqual(ca.exec_(), 'a')
class RecvCaseUnbufferedTests(RecvCaseTests):
chansize = 0
class SendCaseTests(BaseTests):
chansize = 1
def setUp(self):
BaseTests.setUp(self)
self.ch = goless.chan(self.chansize)
self.sendval = 1
self.ca = goless.scase(self.ch, self.sendval)
def test_ready(self):
def assert_default_readiness():
self.assertEquals(self.ca.ready(), self.chansize > 0)
assert_default_readiness()
be.run(self.ch.send)
self.assertFalse(self.ca.ready())
be.run(self.ch.recv)
assert_default_readiness()
be.run(self.ch.send)
self.assertFalse(self.ca.ready())
be.run(self.ch.recv)
assert_default_readiness()
def test_executes(self):
def recv():
a.append(self.ch.recv())
a = []
be.run(recv)
self.ca.exec_()
self.assertEqual(a, [self.sendval])
def test_exec_no_onselected(self):
be.run(self.ch.recv)
self.ca.exec_()
class SendCaseUnbufferedTests(SendCaseTests):
chansize = 0
class SelectTests(BaseTests):
def setUp(self):
BaseTests.setUp(self)
self.chan1 = goless.chan()
def test_select_uses_default(self):
cases = [goless.rcase(self.chan1), goless.dcase()]
result, val = goless.select(cases)
self.assertIs(result, cases[1])
self.assertIsNone(val)
def test_select_chooses_ready_selection(self):
readychan = goless.chan(1)
notreadychan = goless.chan(1)
readychan.send(3)
cases = [goless.rcase(notreadychan), goless.rcase(readychan), goless.dcase()]
result, val = goless.select(cases)
self.assertIs(result, cases[1])
self.assertEqual(val, 3)
def test_select_no_default_no_ready_blocks(self):
chan1 = goless.chan()
chan2 = goless.chan()
a = []
cases = [goless.rcase(chan2), goless.rcase(chan1)]
def sel():
a.append(goless.select(cases))
be.run(sel)
self.assertEqual(a, [])
chan1.send(5)
be.yield_()
self.assertEqual(len(a), 1)
chosen, val = a[0]
self.assertEqual(chosen, cases[1])
self.assertEqual(val, 5)
def test_main_tasklet_can_select(self):
chan1 = goless.chan(1)
cases = [goless.scase(chan1, 3)]
chosen, val = goless.select(cases)
self.assertIs(chosen, cases[0])
self.assertIsNone(val)
def test_raises_if_multiple_default_cases(self):
with self.assertRaises(AssertionError):
goless.select([goless.dcase(), goless.dcase()])
def test_select_accepts_args(self):
chan1 = goless.chan(1)
scase = goless.scase(chan1, 1)
chosen, val = goless.select(scase)
self.assertIs(chosen, scase)
self.assertIsNone(val)
def test_select_raises_for_list_and_args(self):
chan1 = goless.chan(1)
chan2 = goless.chan(1)
chan3 = goless.chan(1)
cases = [goless.scase(chan1, 1), goless.scase(chan2, 2)]
with self.assertRaises(TypeError):
goless.select(cases, chan3)
def test_select_with_no_args_should_do_nothing(self):
goless.select()
goless.select([])
def test_raises_deadlock_if_no_goroutines(self):
with self.assertRaises(goless.Deadlock):
goless.select(goless.rcase(goless.chan()))
| 2.21875 | 2 |
fetch.py | JonathanKryza/TankFetch | 2 | 12768528 | # Copyright 2013 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import telnetlib
import sys
import datetime
ip = '' #The IP address of the ATG you are polling. Must be in quotations.
port = #The default for most is 10001
command = 'i201' #Lowercase 'i' is used for the hexadecimal format. Uppercase 'I' for a human readable output.
command2 = 'I202' #Go to the docs folder for a full listing of commands or visit https://www.veeder.com
tank = '00' #'00' means ALL. For a specific tank, enter as follows: Tank 1 = "01", Tank 12 = "12", etc
def fetch_tcpip():
tn = telnetlib.Telnet(ip, port)
tn.write('\x01' + str(command) + str(tank))
tn.read_until(str(command) + str(tank))
global capture1
capture1 = tn.read_until('\x03',10).replace('\x03','')
tn.write('\x01' + str(command2) + str(tank))
tn.read_until(str(command2) + str(tank))
global capture2
capture2 = tn.read_until('\x03',10).replace('\x03','')
tn.close()
def report_txt(): #Creating a simple txt file out of the output
now = datetime.datetime.now()
date = now.strftime('%m-%d-%Y') #Date format - visit https://docs.python.org/2/library/datetime.html
#Below is an example. State the directory and name of the file.
open('\user\directory\\' + 'filename' + date + '.txt', 'w').write(capture2) #.write can be anything you capture or you can combine multiples. Ex. .wrint(capture1 + capture2)
if __name__=="__main__":
fetch_tcpip()
report_txt()
| 2.6875 | 3 |
cupy/cuda/cutensor.py | prkhrsrvstv1/cupy | 6,180 | 12768529 | """
cuTENSOR Wrapper
Use `cupy_backends.cuda.libs.cutensor` directly in CuPy codebase.
"""
available = True
try:
from cupy_backends.cuda.libs.cutensor import * # NOQA
except ImportError as e:
available = False
from cupy._environment import _preload_warning
_preload_warning('cutensor', e)
| 1.179688 | 1 |
src/spaceone/statistics/model/schedule_model.py | choonho/statistics | 0 | 12768530 | from mongoengine import *
from spaceone.core.model.mongo_model import MongoModel
class Scheduled(EmbeddedDocument):
cron = StringField(max_length=1024, default=None, null=True)
interval = IntField(min_value=1, max_value=60, default=None, null=True)
minutes = ListField(IntField(), default=None, null=True)
hours = ListField(IntField(), default=None, null=True)
def to_dict(self):
return self.to_mongo()
class JoinQuery(EmbeddedDocument):
keys = ListField(StringField(max_length=40))
type = StringField(max_length=20, default='LEFT', choices=('LEFT', 'RIGHT', 'OUTER', 'INNER'))
resource_type = StringField(max_length=80)
data_source_id = StringField(max_length=40, default=None, null=True)
resource_type = StringField(max_length=80)
query = DictField()
def to_dict(self):
return self.to_mongo()
class Formula(EmbeddedDocument):
name = StringField(max_length=40, default=None, null=True)
formula = StringField()
operator = StringField(max_length=40, default='EVAL', choices=('EVAL', 'QUERY'))
def to_dict(self):
return self.to_mongo()
class QueryOption(EmbeddedDocument):
data_source_id = StringField(max_length=40, default=None, null=True)
resource_type = StringField(max_length=80)
query = DictField()
join = ListField(EmbeddedDocumentField(JoinQuery))
formulas = ListField(EmbeddedDocumentField(Formula))
def to_dict(self):
return self.to_mongo()
class Schedule(MongoModel):
schedule_id = StringField(max_length=40, generate_id='sch', unique=True)
topic = StringField(max_length=255, unique_with='domain_id')
state = StringField(max_length=20, default='ENABLED', choices=('ENABLED', 'DISABLED'))
options = EmbeddedDocumentField(QueryOption, required=True)
schedule = EmbeddedDocumentField(Scheduled, default=Scheduled)
tags = DictField()
domain_id = StringField(max_length=255)
created_at = DateTimeField(auto_now_add=True)
last_scheduled_at = DateTimeField(default=None, null=True)
meta = {
'updatable_fields': [
'schedule',
'state',
'tags',
'last_scheduled_at'
],
'exact_fields': [
'schedule_id',
'state',
'options.data_source_id',
'domain_id'
],
'minimal_fields': [
'schedule_id',
'topic',
'state'
],
'ordering': [
'topic'
],
'indexes': [
'schedule_id',
'topic',
'state',
'options.data_source_id',
'options.resource_type',
'domain_id'
]
}
| 2.328125 | 2 |
pysaintcoinach/xiv/territory_type.py | icykoneko/saintcoinach-py | 7 | 12768531 | from ..ex.relational import IRelationalRow
from . import xivrow, XivRow, IXivSheet
@xivrow
class TerritoryType(XivRow):
_weather_groups = None
_maps_by_index = None
@property
def name(self): return self.as_string('Name')
@property
def bg(self): return self.as_string('Bg')
@property
def map(self):
from .map import Map
return self.as_T(Map)
@property
def place_name(self):
from .placename import PlaceName
return self.as_T(PlaceName, 'PlaceName')
@property
def region_place_name(self):
from .placename import PlaceName
return self.as_T(PlaceName, 'PlaceName{Region}')
@property
def zone_place_name(self):
from .placename import PlaceName
return self.as_T(PlaceName, 'PlaceName{Zone}')
@property
def weather_rate(self):
if self._weather_rate is not None:
return self._weather_rate
rate_key = self.as_int32('WeatherRate')
try:
self._weather_rate = self.sheet.collection.get_sheet('WeatherRate')[rate_key]
return self._weather_rate
except KeyError:
# Weather Groups appear to be deprecated.
# Just throw an error
raise
def __init__(self, sheet: IXivSheet, source_row: IRelationalRow):
super(TerritoryType, self).__init__(sheet, source_row)
self._weather_rate = None
self._maps_by_index = None
def get_related_map(self, index: int):
if self._maps_by_index is None:
self._maps_by_index = self._build_map_index()
_map = self._maps_by_index.get(index)
if _map is not None:
return _map
# Fallback to the default map. This may not be accurate.
return self.map
def _build_weather_groups(self):
_map = {}
for weather_group in self.sheet.collection.get_sheet('WeatherGroup'):
if weather_group.key != 0:
continue
_map[weather_group.parent_row.key] = weather_group['WeatherRate']
return _map
def _build_map_index(self):
_maps = filter(lambda m: m['TerritoryType'] == self.key,
self.sheet.collection.get_sheet('Map'))
_index = {}
for _map in _maps:
map_id = str(_map.as_string('Id'))
if map_id is None or map_id == '':
continue
map_index = map_id[map_id.index('/') + 1:]
converted_index = int(map_index)
if converted_index in _index:
continue # skip it for now
_index[converted_index] = _map
return _index
| 2.0625 | 2 |
Compressing data/functions_1.py | zhengzhang96/Hardware-efficient-MUA-compression | 0 | 12768532 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 9 20:00:55 2021
@author: oscar
"""
import numpy as np
import math
def bin_MUA_data(MUA,bin_res):
counter = 0
binned_MUA = np.zeros([math.ceil(len(MUA[:,1])/bin_res),len(MUA[1,:])])
for bin in range(math.ceil(len(MUA[:,1])/bin_res)):
if bin != math.ceil(len(MUA[:,1])/bin_res):
temp = np.sum(MUA[counter:counter+bin_res,:],0)
else:
temp = np.sum(MUA[counter:len(MUA[:,1]),:],0)
binned_MUA[bin,:] = temp
counter = counter + bin_res
binned_MUA = binned_MUA.astype(int)
return binned_MUA
def online_histogram_w_sat_based_nb_of_samples(data_in,sample_val_cutoff, max_firing_rate):
# We consider the histogram to be full when "sample_val_cutoff" values have
# been entered into it.
# Inputs:
# data_in = 1d vector of MUA data from 1 channel.
# sample_val_cutoff = how mnay values the histogram will measure until we
# consider the histogram training period to have ended.
# max_firing_rate: S-1, max value that we consider in the MUA data.
# Outputs:
# approx sorted histogram, how many samples we measure (just for testing purposes)
hist = {'0':0}
flag_1 = False
i = 0
while not flag_1: # the histogram isn't full yet
# Saturate the histogram at the max firing rate
if data_in[i] >= max_firing_rate:
data_in[i] = max_firing_rate
symbol = str(data_in[i])
if symbol in hist: # If this symbol is represented in the histogram
hist[symbol] += 1
else: # If this symbol is new in the histogram
hist[symbol] = 1
# If the histogram is full, end the while loop
hist_count = 0
for symbol_hist in hist:
hist_count += int(hist.get(str(symbol_hist)))
if hist_count > sample_val_cutoff-1:
flag_1 = True
# If we've exceeded the number of samples in the data, end the while loop
if i+1 == len(data_in):
flag_1 = True
i += 1 # Increment counter
return hist, i
# Approx sort used in the work, where the histogram is assumed to follow a
# unimodal distribution. The peak in the histogram is identified and given an
# index of 0, and values on either side are iteratively assigned the next
# indices.
def approx_sort(hist):
idx = np.arange(0,len(hist))
p_idx = np.argmax(hist)
if (p_idx>len(hist)/2): # peak shows on right half
right = np.arange(2,(len(hist)-1-p_idx)*2+1,2) #idx on the right (even or odd doesn't matter)
idx = np.delete(idx,right) # remove used idx
left = idx
else: # peak shows on left half
left = np.arange(1,(2*p_idx-1)+1,2)
idx = np.delete(idx,left)
right = idx
idx = np.hstack((np.flip(left),right))
idx = np.argsort(idx)
return idx.astype(int), hist[idx.astype(int)]
| 2.8125 | 3 |
gridpath/project/operations/carbon_cap.py | nmgeek/gridpath | 0 | 12768533 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Carbon emissions from each carbonaceous project.
"""
import csv
import os.path
from pyomo.environ import Param, Set
from gridpath.auxiliary.auxiliary import cursor_to_df, subset_init_by_param_value
from gridpath.auxiliary.db_interface import (
update_prj_zone_column,
determine_table_subset_by_start_and_column,
)
from gridpath.auxiliary.validations import write_validation_to_database, validate_idxs
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Sets |
+=========================================================================+
| | :code:`CRBN_PRJS` |
| | *Within*: :code:`PROJECTS` |
| |
| Two set of carbonaceous projects we need to track for the carbon cap. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Required Input Params |
+=========================================================================+
| | :code:`carbon_cap_zone` |
| | *Defined over*: :code:`CRBN_PRJS` |
| | *Within*: :code:`CARBON_CAP_ZONES` |
| |
| This param describes the carbon cap zone for each carbonaceous project. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Derived Sets |
+=========================================================================+
| | :code:`CRBN_PRJS_BY_CARBON_CAP_ZONE` |
| | *Defined over*: :code:`CARBON_CAP_ZONES` |
| | *Within*: :code:`CRBN_PRJS` |
| |
| Indexed set that describes the list of carbonaceous projects for each |
| carbon cap zone. |
+-------------------------------------------------------------------------+
| | :code:`CRBN_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| Two-dimensional set that defines all project-timepoint combinations |
| when a carbonaceous project can be operational. |
+-------------------------------------------------------------------------+
"""
# Sets
###########################################################################
m.CRBN_PRJS = Set(within=m.PROJECTS)
# Input Params
###########################################################################
m.carbon_cap_zone = Param(m.CRBN_PRJS, within=m.CARBON_CAP_ZONES)
# Derived Sets
###########################################################################
m.CRBN_PRJS_BY_CARBON_CAP_ZONE = Set(
m.CARBON_CAP_ZONES,
within=m.CRBN_PRJS,
initialize=lambda mod, co2_z: subset_init_by_param_value(
mod, "CRBN_PRJS", "carbon_cap_zone", co2_z
),
)
m.CRBN_PRJ_OPR_TMPS = Set(
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [
(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS if p in mod.CRBN_PRJS
],
)
# Input-Output
###############################################################################
def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
data_portal.load(
filename=os.path.join(
scenario_directory, str(subproblem), str(stage), "inputs", "projects.tab"
),
select=("project", "carbon_cap_zone"),
param=(m.carbon_cap_zone,),
)
data_portal.data()["CRBN_PRJS"] = {
None: list(data_portal.data()["carbon_cap_zone"].keys())
}
# Database
###############################################################################
def get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn):
"""
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
subproblem = 1 if subproblem == "" else subproblem
stage = 1 if stage == "" else stage
c = conn.cursor()
project_zones = c.execute(
"""SELECT project, carbon_cap_zone
FROM
-- Get projects from portfolio only
(SELECT project
FROM inputs_project_portfolios
WHERE project_portfolio_scenario_id = {}
) as prj_tbl
LEFT OUTER JOIN
-- Get carbon cap zones for those projects
(SELECT project, carbon_cap_zone
FROM inputs_project_carbon_cap_zones
WHERE project_carbon_cap_zone_scenario_id = {}
) as prj_cc_zone_tbl
USING (project)
-- Filter out projects whose carbon cap zone is not one included in
-- our carbon_cap_zone_scenario_id
WHERE carbon_cap_zone in (
SELECT carbon_cap_zone
FROM inputs_geography_carbon_cap_zones
WHERE carbon_cap_zone_scenario_id = {}
);
""".format(
subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID,
subscenarios.PROJECT_CARBON_CAP_ZONE_SCENARIO_ID,
subscenarios.CARBON_CAP_ZONE_SCENARIO_ID,
)
)
return project_zones
def write_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn
):
"""
Get inputs from database and write out the model input
projects.tab file (to be precise, amend it).
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
project_zones = get_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
)
# Make a dict for easy access
prj_zone_dict = dict()
for (prj, zone) in project_zones:
prj_zone_dict[str(prj)] = "." if zone is None else str(zone)
with open(
os.path.join(
scenario_directory, str(subproblem), str(stage), "inputs", "projects.tab"
),
"r",
) as projects_file_in:
reader = csv.reader(projects_file_in, delimiter="\t", lineterminator="\n")
new_rows = list()
# Append column header
header = next(reader)
header.append("carbon_cap_zone")
new_rows.append(header)
# Append correct values
for row in reader:
# If project specified, check if BA specified or not
if row[0] in list(prj_zone_dict.keys()):
row.append(prj_zone_dict[row[0]])
new_rows.append(row)
# If project not specified, specify no BA
else:
row.append(".")
new_rows.append(row)
with open(
os.path.join(
scenario_directory, str(subproblem), str(stage), "inputs", "projects.tab"
),
"w",
newline="",
) as projects_file_out:
writer = csv.writer(projects_file_out, delimiter="\t", lineterminator="\n")
writer.writerows(new_rows)
def process_results(db, c, scenario_id, subscenarios, quiet):
"""
:param db:
:param c:
:param subscenarios:
:param quiet:
:return:
"""
if not quiet:
print("update carbon cap zones")
tables_to_update = determine_table_subset_by_start_and_column(
conn=db, tbl_start="results_project_", cols=["carbon_cap_zone"]
)
for tbl in tables_to_update:
update_prj_zone_column(
conn=db,
scenario_id=scenario_id,
subscenarios=subscenarios,
subscenario="project_carbon_cap_zone_scenario_id",
subsc_tbl="inputs_project_carbon_cap_zones",
prj_tbl=tbl,
col="carbon_cap_zone",
)
# Validation
###############################################################################
def validate_inputs(scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and validate the inputs
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
project_zones = get_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
)
# Convert input data into pandas DataFrame
df = cursor_to_df(project_zones)
zones_w_project = df["carbon_cap_zone"].unique()
# Get the required carbon cap zones
# TODO: make this into a function similar to get_projects()?
# could eventually centralize all these db query functions in one place
c = conn.cursor()
zones = c.execute(
"""SELECT carbon_cap_zone FROM inputs_geography_carbon_cap_zones
WHERE carbon_cap_zone_scenario_id = {}
""".format(
subscenarios.CARBON_CAP_ZONE_SCENARIO_ID
)
)
zones = [z[0] for z in zones] # convert to list
# Check that each carbon cap zone has at least one project assigned to it
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_project_carbon_cap_zones",
severity="High",
errors=validate_idxs(
actual_idxs=zones_w_project,
req_idxs=zones,
idx_label="carbon_cap_zone",
msg="Each carbon cap zone needs at least 1 " "project assigned to it.",
),
)
# TODO: need validation that projects with carbon cap zones also have fuels
| 1.851563 | 2 |
webtopay/models.py | motiejus/django-webtopay | 2 | 12768534 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from webtopay.signals import payment_was_successful, payment_was_flagged
class WebToPayResponse(models.Model):
# Non-webtopay params
def __unicode__(self):
amount = self.amount / 100 if self.amount else 0
return "%s %.2f" % (self.currency, amount)
query = models.TextField(blank=True)
ipaddress = models.IPAddressField(blank=True)
flag = models.BooleanField(blank=True, default=False)
flag_info = models.TextField(blank=True)
# The thing we got from server
projectid = models.BigIntegerField(null=True,
help_text="Unikalus projekto numeris. "+\
"Tik patvirtinti projektai gali priimti įmokas")
orderid = models.CharField(max_length=40,
help_text="Užsakymo numeris iš jūsų sistemos")
lang = models.CharField(max_length=3, blank=True,
help_text="Galima nurodyti naudotojo kalbą, jeigu tokios kalbos "+\
"mokėjimai.lt nepalaiko bus parinkta kalba pagal "+\
"lankytojo IP adresą arba anglų kalba pagal nutylėjimą. "+\
"(LIT, LAV, EST, RUS, ENG, GER, POL)")
amount = models.BigIntegerField(null=True,
help_text="Suma centais, kurią klientas turi apmokėti")
currency = models.CharField(max_length=3,
help_text="Mokėjimo valiuta (LTL, USD, EUR), kuria pageidaujate, "+\
"kad klientas mokėtų. Jeigu nurodyta valiuta per "+\
"pasirinktą mokėjimo būdą negali būti priimta, "+\
"sistema automatiškai pagal dienos kursą konvertuos "+\
"į palaikomą valiutą. Atsakyme į Jūsų svetainę bus "+\
"paduoti payamount ir paycurrency")
payment = models.CharField(max_length=20,
help_text="Mokėjimo būdas. Parametras, kuriame nieko nenurodoma "+\
"(paliekamas tuščias). Naudotojui bus pateikta lentelė "+\
"su mokėjimo būdų sąrašu, pasirinkimui. Jis naudojamas "+\
"tik tuo atveju, jeigu norima, kad mokėjimas būtų "+\
"atliktas tik per konkretų mokėjimo būdą")
country = models.CharField(max_length=2,
help_text="Mokėtojo šalis (LT, EE, LV, GB, PL, DE). Nurodžius "+\
"šalį, mokėtojui iš karto pateikiami mokėjimo būdai, "+\
"galimi toje šalyje. Jeigu šalis nenurodoma, sistema "+\
"pagal mokėtojo IP adresą nustato jo šalį. Mokėtojui "+\
"paliekama galimybė pasikeisti šalį")
paytext = models.TextField(
help_text="Mokėjimo paskirtis, kuri matosi darant pavedimą.")
_ss2 = models.CharField(blank=True, max_length=255,
help_text="Parametras, kurio pagalba yra tikrinama, ar iš mūsų "+\
"serverio gavote atsakymą. Tai aukščiausio patikimumo "+\
"lygio tikrinimo būdas. Atsisiųskite skripto pavyzdį")
_ss1 = models.CharField(blank=True, max_length=64,
help_text="Parametras, kurio pagalba yra tikrinama, ar iš mūsų "+\
"serverio gavote atsakymą. Tai -- žemesnio nei _ss2 "+\
"patikimumo lygio tikrinimo būdas. Atsisiųskite pavyzdį")
name = models.CharField(max_length=255, blank=True,
help_text="Mokėtojo vardas, gautas iš mokėjimo sistemos. "+\
"Siunčiamas tik jeigu mokėjimo sistema tokį suteikia")
surename = models.CharField(max_length=255, blank=True,
help_text="Mokėtojo pavardė, gauta iš mokėjimo sistemos. "+\
"Siunčiamas tik jeigu mokėjimo sistema tokį suteikia")
status = models.IntegerField(max_length=255, help_text="Mokėjimo būklė: "+\
"0 - apmokėjimas neįvyko, "+\
"1 - apmokėta sėkmingai, "+\
"2 - mokėjimo nurodymas priimtas, bet dar neįvykdytas",
choices=((0, _('payment did not succeed')),
(1, _('payment succeeded')),
(2, _('payment accepted, but not yet processed'))),
default=0
)
# Error codes are stored separately
error = models.CharField(max_length=20, blank=True,
help_text="Klaidos kodas")
test = models.SmallIntegerField(choices=((0, 'Production'), (1, 'Test')),
null=True,
help_text="Parametras, kuriam esant galima testuoti sujungimą, "+\
"tokiu būdu apmokėjimas nevykdomas ir rezultatas "+\
"grąžinamas iš karto, tartum būtų sumokėta. Norint "+\
"testuoti, būtina aktyvuoti testavimo režimą prie "+\
"konkretaus projekto, kai prisijungiate: \"Paslaugų "+\
"valdymas\" -> \"įmokų surinkimas\" (prie konkretaus "+\
"projekto) -> \"Leisti testinius mokėjimus\" (pažymėkite)")
# In API 1.4, this field has zero allowed length, therefore textfield...
p_email = models.TextField(
help_text="Pirkėjo el. paštas privalomas. Jeigu adresas nebus "+\
"gautas, kliento bus prašoma jį įvesti. Šiuo adresu "+\
"mokėjimai.lt sistema informuos mokėtoją apie apmokėjimo"+\
" būklę")
requestid = models.CharField(max_length=40, blank=True,
help_text="Tai užklausos numeris, kurį gauname, kai žmogus "+\
"nuspaudžia ant banko ir kurį pateikiame į "+\
"\"callbackurl\" laukelyje nurodytą nuorodą")
payamount = models.IntegerField(null=True,
help_text="Suma centais, kurią pervedė. Gali skirtis jeigu buvo"+\
"konvertuota į kitą valiutą")
# This is suspicious. According to the spec, its max length is 0. So in
# theory, it should be a text field.
paycurrency = models.CharField(max_length=10,
help_text="Mokėjimo valiuta (LTL, USD, EUR), kurią pervedė. Gali "+\
"skirtis nuo tos kurios prašėte, jeigu pasirinktas "+\
"mokėjimo būdas negalėjo priimti prašomos valiutos")
version = models.CharField(max_length=9,
help_text="Mokėjimai.lt mokėjimų sistemos specifikacijos (API) "+\
"versijos numeris")
"""
0x1 - Mokėjimo suma per maža
0x2 - Mokėjimo suma per didelė
0x3 - Nurodyta valiuta neaptarnaujama
0x4 - Nėra sumos arba valiutos
0x6 - Neįrašytas projectID arba tokio ID nėra
0x7 - Išjungtas testavimo rėžimas, tačiau mėginote atlikti testinį mokėjimą
0x8 - Jūs uždraudėte šį mokėjimo būdą
0x9 - Blogas "paytext" kintamojo kodavimas (turi būti utf-8)
0x10 - Tuščias arba neteisingai užpildytas "orderID"
0x11xError - Toks projektas neegzistuoja
0x11x0 - Projektas nėra patikrintas mūsų administratoriaus
0x11x2 - Projektas yra sustabdytas kliento
0x11x4 - Projektas yra blokuotas mūsų administratoriaus
0x11x5 - Projektas yra ištrintas iš mūsų sistemos
0x12 - Negautas projectid (projekto numeris) parametras, nors jis yra privalomas
0x13 - Accepturl, cancellurl, callbacurl arba referer bazinis adresas skiriasi nuo projekte patvirtintų adresų
0x14 - Klaidingas "sign" parametras
0x15 - Klaidingi kai kurie iš perduotų parametrų
0x15x0 - Neteisingas vienas iš šių parametrų: cancelurl, accepturl, callbackurl
"""
def set_flag(self, info):
self.flag = True
self.flag_info += info
def send_signals(self):
if self.flag:
payment_was_flagged.send(sender=self)
else:
payment_was_successful.send(sender=self)
| 1.976563 | 2 |
code_week6_61_614/ping_heng_er_cha_shu_lcof.py | dylanlee101/leetcode | 0 | 12768535 | <filename>code_week6_61_614/ping_heng_er_cha_shu_lcof.py
'''
输入一棵二叉树的根节点,判断该树是不是平衡二叉树。如果某二叉树中任意节点的左右子树的深度相差不超过1,那么它就是一棵平衡二叉树。
示例 1:
给定二叉树 [3,9,20,null,null,15,7]
3
/ \
9 20
/ \
15 7
返回 true 。
示例 2:
给定二叉树 [1,2,2,3,3,null,null,4,4]
1
/ \
2 2
/ \
3 3
/ \
4 4
返回 false 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ping-heng-er-cha-shu-lcof
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def recur(root):
if not root:return 0
left = recur(root.left)
if left == -1 : return -1
right = recur(root.right)
if right == -1 : return -1
return max(left,right)+1 if abs(left - right) <= 1 else -1
return recur(root) != -1
| 3.53125 | 4 |
flask_cognito_auth/decorators.py | ankit-shrivastava/flask-cognito-auth | 1 | 12768536 | <gh_stars>1-10
#!/usr/bin/env python3
"""
File handle the decorators for AWS Cognito login / logout features.
On successfull login, add "groups" in session object if user is
part of AWS Cognito group. This helps application for authorization.
"""
import logging
import json
import requests
from requests.auth import HTTPBasicAuth
from functools import wraps
from flask import redirect
from flask import request
from jose import jwt
from .config import Config
from flask import session
from flask import url_for
logger = logging.getLogger(__name__)
config = Config()
def login_handler(fn):
"""
A decorator to redirect users to AWS Cognito login if they aren't already.
If already logged in user will redirect redirect uri.
Use this decorator on the login endpoint.
This handle will not return to handle the respose rather redirect to
redirect uri.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
aws_cognito_login = config.login_uri
# https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
res = redirect(aws_cognito_login)
logger.info("Got Cognito Login, redirecting to AWS Cognito for Auth")
return res
return wrapper
def callback_handler(fn):
"""
A decorator to handle redirects from AWS Cognito login and signup. It
handles and verifies and exchangs the code for tokens.
This decorator also pushes the basic informations in Flask session.
Basic informations are:
* username
* group (List of Cognito groups if any)
* id
* email
* expires
* refresh_token
Use this decorator on the redirect endpoint on your application.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
auth_success = False
logger.info("Login is successfull from AWS Cognito.")
logger.info(
"Authenticating AWS Cognito application / client, with code exchange.")
csrf_token = config.state
csrf_state = None
if csrf_token:
csrf_state = request.args.get('state')
code = request.args.get('code')
request_parameters = {'grant_type': 'authorization_code',
'client_id': config.client_id,
'code': code,
"redirect_uri": config.redirect_uri}
response = requests.post(config.jwt_code_exchange_uri,
data=request_parameters,
auth=HTTPBasicAuth(config.client_id,
config.client_secret))
# the response:
# http://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
if response.status_code == requests.codes.ok:
logger.info("Code exchange is successfull.")
logger.info("Validating CSRF state exchange of AWS Cognito")
if csrf_state == csrf_token:
auth_success = True
if csrf_token:
logger.info(
"CSRF state validation successfull. Login is successfull for AWS Cognito")
logger.info("Decode the access token from response.")
verify(response.json()["access_token"])
id_token = verify(
response.json()["id_token"], response.json()["access_token"])
username = id_token["cognito:username"]
groups = None
if "cognito:groups" in id_token:
groups = id_token['cognito:groups']
update_session(username=username,
id=id_token["sub"],
groups=groups,
email=id_token["email"],
expires=id_token["exp"],
refresh_token=response.json()["refresh_token"])
if not auth_success:
error_uri = config.redirect_error_uri
if error_uri:
resp = redirect(url_for(error_uri))
return resp
else:
msg = f"Somthing went wrong during authentication"
return json.dumps({'Error': msg}), 500
return fn(*args, **kwargs)
return wrapper
def update_session(username: str, id, groups, email: str, expires, refresh_token):
"""
Method to update the Flase Session object with the informations after
successfull login.
:param username (str): AWS Cognito authenticated user.
:param id (str): ID of AWS Cognito authenticated user.
:param groups (list): List of AWS Cognito groups if authenticated
user is subscribed.
:param email (str): AWS Cognito email if of authenticated user.
:param expires (str): AWS Cognito session timeout.
:param refresh_token (str): JWT refresh token received in respose.
"""
session['username'] = username
session['id'] = id
session['groups'] = groups
session['email'] = email
session['expires'] = expires
session['refresh_token'] = refresh_token
def verify(token: str, access_token: str = None):
"""
Verifies a JWT string's signature and validates reserved claims.
Get the key id from the header, locate it in the cognito keys and verify
the key
:param token (str): A signed JWS to be verified.
:param access_token (str): An access token string. If the "at_hash" claim
is included in the
:return id_token (dict): The dict representation of the claims set,
assuming the signature is valid and all
requested data validation passes.
"""
header = jwt.get_unverified_header(token)
key = [k for k in config.jwt_cognito_key if k["kid"] == header['kid']][0]
id_token = jwt.decode(token,
key,
audience=config.client_id,
access_token=access_token)
return id_token
def logout_handler(fn):
"""
A decorator to logout from AWS Cognito and return to signout uri.
Use this decorator on the cognito logout endpoint.
This handle will not return to handle any respose rather redirect to
signout uri.
This decorator also clears the basic informations from Flask session.
Basic informations are:
* username
* group (List of Cognito groups if any)
* id
* email
* expires
* refresh_token
"""
@wraps(fn)
def wrapper(*args, **kwargs):
update_session(username=None,
id=None,
groups=None,
email=None,
expires=None,
refresh_token=None)
logger.info(
"AWS Cognito Login, redirecting to AWS Cognito for logout and terminating sessions")
aws_cognito_logout = config.logout_uri
# https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
res = redirect(aws_cognito_logout)
return res
return wrapper
| 2.953125 | 3 |
challenges/array_shift/test_array_shift.py | ravewillow6383/data-structures-and-algorithms-python | 0 | 12768537 | from array_shift import insert_shift_array
# def test_insert_to_middle():
# expected = [1, 2, 3]
# actual = insert_shift_array([1, 3], 2)
# assert expected == actual
def test_insert_to_middle():
expected = [1, 2, 3, 4, 5, 6]
actual = insert_shift_array([1, 2, 3, 5, 6], 4)
assert expected == actual
| 3.328125 | 3 |
tests/pages/login_page.py | Schveitzer/selenium-python-bdd-behave-example | 0 | 12768538 | from selenium.webdriver.common.by import By
from browser import Driver
class LoginPage(Driver):
instance = None
@classmethod
def get_instance(cls):
if cls.instance is None:
cls.instance = LoginPage()
return cls.instance
def open(self, context):
self.driver.get(context.baseUrl)
def button_login(self):
return self.driver.find_element(By.CLASS_NAME, "login")
def input_email(self):
return self.driver.find_element(By.ID, "email")
def input_password(self):
return self.driver.find_element(By.ID, "passwd")
def button_submit_login(self):
return self.driver.find_element(By.ID, "SubmitLogin")
def welcome_message(self):
return self.driver.find_element(By.CLASS_NAME, "info-account")
def invalid_credentials_message(self):
return self.driver.find_element(By.XPATH, "//li[text() ='Authentication failed.']")
def button_sign_in_out(self):
return self.driver.find_element(By.XPATH, "//a[contains(@class, 'log')]")
def user_name(self):
return self.driver.find_element(By.XPATH, "//div[@class='header_user_info']//span")
def login(self, user, password):
self.button_login().click()
self.input_email().send_keys(user)
self.input_password().send_keys(password)
self.button_submit_login().click()
LOGIN_PAGE = LoginPage.get_instance()
| 2.9375 | 3 |
rlkit/torch/irl/fetch_task_design.py | yifan-you-37/rl_swiss | 56 | 12768539 | from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from rlkit.core import logger
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_meta_irl_algorithm import TorchMetaIRLAlgorithm
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.core.train_util import linear_schedule
from rlkit.torch.core import PyTorchModule
from rlkit.torch.sac.policies import PostCondMLPPolicyWrapper
from rlkit.data_management.path_builder import PathBuilder
from gym.spaces import Dict
from rlkit.torch.irl.encoders.aggregators import sum_aggregator
from rlkit.torch.distributions import ReparamMultivariateNormalDiag
OUTER_RADIUS = 2.0
TASK_RADIUS = 2.0
SAME_COLOUR_RADIUS = 1.0
def concat_trajs(trajs):
new_dict = {}
for k in trajs[0].keys():
if isinstance(trajs[0][k], dict):
new_dict[k] = concat_trajs([t[k] for t in trajs])
else:
new_dict[k] = np.concatenate([t[k] for t in trajs], axis=0)
return new_dict
def subsample_traj(traj, num_samples):
traj_len = traj['observations'].shape[0]
idxs = np.random.choice(traj_len, size=num_samples, replace=traj_len<num_samples)
new_traj = {k: traj[k][idxs,...] for k in traj}
return new_traj
class R2ZMap(PyTorchModule):
def __init__(
self,
r_dim,
z_dim,
hid_dim,
# this makes it be closer to deterministic, makes it easier to train
# before we turn on the KL regularization
LOG_STD_SUBTRACT_VALUE=2.0
):
self.save_init_params(locals())
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(r_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Linear(hid_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
)
self.mean_fc = nn.Linear(hid_dim, z_dim)
self.log_sig_fc = nn.Linear(hid_dim, z_dim)
self.LOG_STD_SUBTRACT_VALUE = LOG_STD_SUBTRACT_VALUE
print('LOG STD SUBTRACT VALUE IS FOR APPROX POSTERIOR IS %f' % LOG_STD_SUBTRACT_VALUE)
def forward(self, r):
trunk_output = self.trunk(r)
mean = self.mean_fc(trunk_output)
log_sig = self.log_sig_fc(trunk_output) - self.LOG_STD_SUBTRACT_VALUE
return mean, log_sig
class Encoder(PyTorchModule):
def __init__(self, z_dim):
self.save_init_params(locals())
super().__init__()
HID_DIM = 64
self.encoder_mlp = nn.Sequential(
nn.Linear(6, HID_DIM),
nn.BatchNorm1d(HID_DIM),
nn.ReLU(),
nn.Linear(HID_DIM, HID_DIM),
nn.BatchNorm1d(HID_DIM),
nn.ReLU(),
nn.Linear(HID_DIM, HID_DIM)
)
self.agg = sum_aggregator
self.r2z_map = R2ZMap(HID_DIM, z_dim, HID_DIM)
def forward(self, context, mask):
N_tasks, N_max_cont, N_dim = context.size(0), context.size(1), context.size(2)
context = context.view(-1, N_dim)
embedded_context = self.encoder_mlp(context)
embed_dim = embedded_context.size(1)
embedded_context = embedded_context.view(N_tasks, N_max_cont, embed_dim)
agg = self.agg(embedded_context, mask)
post_mean, post_log_sig = self.r2z_map(agg)
return ReparamMultivariateNormalDiag(post_mean, post_log_sig)
class FetchTaskDesign():
def __init__(
self,
mlp,
num_tasks_used_per_update=5,
min_context_size=1,
max_context_size=5,
classification_batch_size_per_task=32,
encoder_lr=1e-3,
encoder_optimizer_class=optim.Adam,
mlp_lr=1e-3,
mlp_optimizer_class=optim.Adam,
num_update_loops_per_train_call=1000,
num_epochs=10000,
z_dim=16,
**kwargs
):
self.mlp = mlp
self.encoder = Encoder(z_dim)
self.num_tasks_used_per_update = num_tasks_used_per_update
self.min_context_size = min_context_size
self.max_context_size = max_context_size
self.classification_batch_size_per_task = classification_batch_size_per_task
self.encoder_optimizer = encoder_optimizer_class(
self.encoder.parameters(),
lr=encoder_lr,
betas=(0.9, 0.999)
)
self.mlp_optimizer = mlp_optimizer_class(
self.mlp.parameters(),
lr=mlp_lr,
betas=(0.9, 0.999)
)
self.bce = nn.BCEWithLogitsLoss()
self.num_update_loops_per_train_call = num_update_loops_per_train_call
self.num_epochs = num_epochs
def _sample_color_within_radius(self, center, radius):
new_color = self._uniform_sample_from_sphere(radius) + center
while np.linalg.norm(new_color) > OUTER_RADIUS:
new_color = self._uniform_sample_from_sphere(radius) + center
return new_color
def _uniform_sample_from_sphere(self, radius):
x = np.random.normal(size=3)
x /= np.linalg.norm(x, axis=-1)
r = radius
u = np.random.uniform()
sampled_color = r * (u**(1.0/3.0)) * x
return sampled_color
def _sample_color_with_min_dist(self, color, min_dist):
new_color = self._uniform_sample_from_sphere(OUTER_RADIUS)
while np.linalg.norm(new_color - color, axis=-1) < min_dist:
new_color = self._uniform_sample_from_sphere(OUTER_RADIUS)
return new_color
def _get_training_batch(self):
task_colors = []
for _ in range(self.num_tasks_used_per_update):
task_colors.append(self._uniform_sample_from_sphere(TASK_RADIUS))
task_colors = np.array(task_colors)
input_batch = []
labels = []
for task in task_colors:
for _ in range(self.classification_batch_size_per_task):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
bad = self._sample_color_with_min_dist(task, 0.0) # HERE
if np.random.uniform() > 0.5:
input_batch.append(np.concatenate((good, bad)))
labels.append([1.0])
else:
input_batch.append(np.concatenate((bad, good)))
labels.append([0.0])
input_batch = Variable(ptu.from_numpy(np.array(input_batch)))
labels = Variable(ptu.from_numpy(np.array(labels)))
context = []
mask = Variable(ptu.from_numpy(np.zeros((self.num_tasks_used_per_update, self.max_context_size, 1))))
for task_num, task in enumerate(task_colors):
task_context = []
for _ in range(self.max_context_size):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
bad = self._sample_color_with_min_dist(task, 0.0) # HERE
# always the same order because it's the context
task_context.append(np.concatenate((good, bad)))
context.append(task_context)
con_size = np.random.randint(self.min_context_size, self.max_context_size+1)
mask[task_num,:con_size,:] = 1.0
context = Variable(ptu.from_numpy(np.array(context)))
return context, mask, input_batch, labels
def _get_eval_batch(self):
task_colors = []
for _ in range(self.num_tasks_used_per_update):
task_colors.append(self._uniform_sample_from_sphere(TASK_RADIUS))
task_colors = np.array(task_colors)
# task_colors = np.zeros((self.num_tasks_used_per_update, 3)) # THIS
# task_colors[:,0] = -1.0 # THIS
input_batch = []
labels = []
for task in task_colors:
for _ in range(self.classification_batch_size_per_task):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
bad = self._sample_color_with_min_dist(task, SAME_COLOUR_RADIUS)
if np.random.uniform() > 0.5:
input_batch.append(np.concatenate((good, bad)))
labels.append([1.0])
else:
input_batch.append(np.concatenate((bad, good)))
labels.append([0.0])
input_batch = Variable(ptu.from_numpy(np.array(input_batch)))
labels = Variable(ptu.from_numpy(np.array(labels)))
context = []
mask = Variable(ptu.from_numpy(np.zeros((self.num_tasks_used_per_update, self.max_context_size, 1))))
for task_num, task in enumerate(task_colors):
task_context = []
for _ in range(self.max_context_size):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
# good = np.zeros(3) # THIS
bad = self._sample_color_with_min_dist(task, 0.0) # HERE
# bad = np.array([2.0, 0.0, 0.0]) # THIS
# always the same order because it's the context
task_context.append(np.concatenate((good, bad)))
context.append(task_context)
con_size = np.random.randint(self.min_context_size, self.max_context_size+1)
mask[task_num,:con_size,:] = 1.0
context = Variable(ptu.from_numpy(np.array(context)))
return context, mask, input_batch, labels
def train(self):
for e in range(self.num_epochs):
self._do_training(e, self.num_update_loops_per_train_call)
self.evaluate()
def _do_training(self, epoch, num_updates):
'''
Train the discriminator
'''
self.mlp.train()
self.encoder.train()
for _ in range(num_updates):
self.encoder_optimizer.zero_grad()
self.mlp_optimizer.zero_grad()
# prep the batches
context, mask, input_batch, labels = self._get_training_batch()
post_dist = self.encoder(context, mask)
# z = post_dist.sample() # N_tasks x Dim
z = post_dist.mean
repeated_z = z.repeat(1, self.classification_batch_size_per_task).view(-1, z.size(1))
mlp_input = torch.cat([input_batch, repeated_z], dim=-1)
preds = self.mlp(mlp_input)
loss = self.bce(preds, labels)
loss.backward()
self.mlp_optimizer.step()
self.encoder_optimizer.step()
def evaluate(self):
eval_statistics = OrderedDict()
self.mlp.eval()
self.encoder.eval()
for i in range(1, 12):
# prep the batches
# context, mask, input_batch, labels = self._get_training_batch()
context, mask, input_batch, labels = self._get_eval_batch()
post_dist = self.encoder(context, mask)
# z = post_dist.sample() # N_tasks x Dim
z = post_dist.mean
repeated_z = z.repeat(1, self.classification_batch_size_per_task).view(-1, z.size(1))
mlp_input = torch.cat([input_batch, repeated_z], dim=-1)
preds = self.mlp(mlp_input)
class_preds = (preds > 0).type(preds.data.type())
accuracy = (class_preds == labels).type(torch.FloatTensor).mean()
eval_statistics['Acc for %d' % i] = np.mean(ptu.get_numpy(accuracy))
# for key, value in eval_statistics.items():
# logger.record_tabular(key, value)
# logger.dump_tabular(with_prefix=False, with_timestamp=False)
print(np.mean(list(eval_statistics.values())))
def cuda(self):
self.encoder.cuda()
self.mlp.cuda()
def cpu(self):
self.encoder.cpu()
self.mlp.cpu()
def _elem_or_tuple_to_variable(elem_or_tuple):
if isinstance(elem_or_tuple, tuple):
return tuple(
_elem_or_tuple_to_variable(e) for e in elem_or_tuple
)
return Variable(ptu.from_numpy(elem_or_tuple).float(), requires_grad=False)
def _filter_batch(np_batch):
for k, v in np_batch.items():
if v.dtype == np.bool:
yield k, v.astype(int)
else:
yield k, v
def np_to_pytorch_batch(np_batch):
return {
k: _elem_or_tuple_to_variable(x)
for k, x in _filter_batch(np_batch)
if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)
}
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
| 2.015625 | 2 |
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/objc/forward-decl/TestForwardDecl.py | Polidea/SiriusObfuscator | 427 | 12768540 | <gh_stars>100-1000
"""Test that a forward-declared class works when its complete definition is in a library"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ForwardDeclTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// Set breakpoint 0 here.')
self.shlib_names = ["Container"]
@skipUnlessDarwin
def test_expr(self):
self.build()
# Create a target by the debugger.
target = self.dbg.CreateTarget("a.out")
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect("expression [j getMember]", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 0x"])
| 2.078125 | 2 |
OOP/Classes and objectives excercise/To-do-list/project/task.py | petel3/Softuni_education | 2 | 12768541 | class Task:
def __init__(self,name,due_date):
self.name = name
self.due_date = due_date
self.comments=[]
self.completed=False
def change_name(self,new_name:str):
if self.name==new_name:
return f"Name cannot be the same."
self.name=new_name
return self.name
def change_due_date(self,new_date:str):
if self.due_date==new_date:
return "Date cannot be the same."
self.due_date=new_date
return self.due_date
def add_comment(self,comment:str):
self.comments.append(comment)
def edit_comment(self,comment_number:int,new_comment:str):
if 0<comment_number>=len(self.comments):
return "Cannot find comment."
self.comments[comment_number]=new_comment
return f"{', '.join([x for x in self.comments])}"
def details(self):
return f"Name: {self.name} - Due Date: {self.due_date}"
| 3.546875 | 4 |
whisperbackup/gcs.py | asvyatov/whisper-backup | 44 | 12768542 | <reponame>asvyatov/whisper-backup<filename>whisperbackup/gcs.py
#!/usr/bin/env python
#
# Copyright 2019 42 Lines, Inc.
# Original Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import __main__
import logging
from google.cloud import storage
logger = logging.getLogger(__main__.__name__)
# Google Cloud Storage
class GCS(object):
def __init__(self, bucket, project="", region="us", noop=False):
"""Setup the GCS storage backend with the bucket we will use and
optional region."""
if project == "":
self.client = storage.Client()
else:
self.client = storage.Client(project)
self.noop = noop
self.bucket = storage.Bucket(self.client, bucket)
self.bucket.location = region
self.bucket.storage_class = "STANDARD"
# Create the bucket if it doesn't exist
if not self.bucket.exists():
if not noop:
self.bucket.create()
else:
logger.info("No-Op: Create bucket: %s" % bucket)
def list(self, prefix=""):
"""Return all keys in this bucket."""
for i in self.client.list_blobs(self.bucket, prefix=prefix):
yield i.name
def get(self, src):
"""Return the contents of src from this bucket as a string."""
obj = storage.blob.Blob(src, self.bucket)
if not obj.exists():
return None
return obj.download_as_string()
def put(self, dst, data):
"""Store the contents of the string data at a key named by dst
in GCS."""
if self.noop:
logger.info("No-Op Put: %s" % dst)
else:
obj = storage.blob.Blob(dst, self.bucket)
obj.upload_from_string(data, content_type="application/octet-stream")
def delete(self, src):
"""Delete the object in GCP referenced by the key name src."""
if self.noop:
logger.info("No-Op Delete: %s" % src)
else:
obj = storage.blob.Blob(dst, self.bucket)
obj.delete()
| 2.484375 | 2 |
ParsingProject/movie/migrations/0007_alter_movie_photo.py | rzhvn1/Parsing-APi | 0 | 12768543 | # Generated by Django 3.2 on 2021-04-12 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0006_alter_movie_rating'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='photo',
field=models.ImageField(upload_to='media'),
),
]
| 1.53125 | 2 |
src/pyinfrabox/testresult/__init__.py | agu3rra/InfraBox | 265 | 12768544 | from builtins import int, range
from pyinfrabox import ValidationError
from pyinfrabox.utils import *
def check_version(v, path):
if not isinstance(v, int):
raise ValidationError(path, "must be an int")
if v != 1:
raise ValidationError(path, "unsupported version")
def parse_measurement(d, path):
check_allowed_properties(d, path, ("name", "unit", "value"))
check_required_properties(d, path, ("name", "unit", "value"))
check_text(d['unit'], path + ".unit")
check_text(d['name'], path + ".name")
check_text(d['value'], path + ".value")
def parse_measurements(e, path):
if not isinstance(e, list):
raise ValidationError(path, "must be an array")
for i in range(0, len(e)):
elem = e[i]
path = "%s[%s]" % (path, i)
parse_measurement(elem, path)
def parse_t(d, path):
check_allowed_properties(d, path,
("suite", "name", "status", "duration", "message",
"stack", "measurements"))
check_required_properties(d, path, ("suite", "name", "status", "duration"))
check_text(d['suite'], path + ".suite")
check_text(d['name'], path + ".name")
check_text(d['status'], path + ".status")
check_number(d['duration'], path + ".duration")
if 'message' in d:
check_text(d['message'], path + ".message")
if 'stack' in d:
check_text(d['stack'], path + ".stack")
if 'measurements' in d:
parse_measurements(d['measurements'], path + ".measurements")
def parse_ts(e, path):
if not isinstance(e, list):
raise ValidationError(path, "must be an array")
if not e:
raise ValidationError(path, "must not be empty")
for i in range(0, len(e)):
elem = e[i]
p = "%s[%s]" % (path, i)
parse_t(elem, p)
def parse_document(d):
check_allowed_properties(d, "#", ("version", "tests"))
check_required_properties(d, "#", ("version", "tests"))
check_version(d['version'], "#version")
parse_ts(d['tests'], "#tests")
def validate_result(d):
parse_document(d)
| 2.90625 | 3 |
arrow/__init__.py | galaxy-genome-annotation/python-apollo | 5 | 12768545 | __version__ = '4.2.13'
| 1.039063 | 1 |
webware/Request.py | PeaceWorksTechnologySolutions/w4py3 | 11 | 12768546 | <filename>webware/Request.py
"""An abstract request"""
from MiscUtils import AbstractError
from MiscUtils.Funcs import asclocaltime
class Request:
"""The abstract request class.
Request is a base class that offers the following:
* A time stamp (indicating when the request was made)
* An input stream
* Remote request information (address, name)
* Local host information (address, name, port)
* A security indicator
Request is an abstract class; developers typically use HTTPRequest.
"""
# region Init
def __init__(self):
"""Initialize the request.
Subclasses are responsible for invoking super
and initializing self._time.
"""
self._transaction = None
# endregion Init
# region Access
def time(self):
return self._time # pylint: disable=no-member
def timeStamp(self):
"""Return time() as human readable string for logging and debugging."""
return asclocaltime(self.time())
# endregion Access
# region Input
def input(self):
"""Return a file-style object that the contents can be read from."""
# This is bogus. Disregard for now.
# endregion Input
# region Remote info
def remoteAddress(self):
"""Get the remote address.
Returns a string containing the Internet Protocol (IP) address
of the client that sent the request.
"""
raise AbstractError(self.__class__)
def remoteName(self):
"""Get the remote name.
Returns the fully qualified name of the client that sent the request,
or the IP address of the client if the name cannot be determined.
"""
raise AbstractError(self.__class__)
# endregion Remote info
# region Local info
def localAddress(self):
"""Get local address.
Returns a string containing the Internet Protocol (IP) address
of the local host (e.g., the server) that received the request.
"""
raise AbstractError(self.__class__)
@staticmethod
def localName():
"""Get local name.
Returns the fully qualified name of the local host (e.g., the server)
that received the request.
"""
return 'localhost'
def localPort(self):
"""Get local port.
Returns the port of the local host (e.g., the server)
that received the request.
"""
raise AbstractError(self.__class__)
# endregion Local info
# region Security
@staticmethod
def isSecure():
"""Check whether this is a secure channel.
Returns true if request was made using a secure channel,
such as HTTPS. This currently always returns false,
since secure channels are not yet supported.
"""
return False
# endregion Security
# region Transactions
def responseClass(self):
"""Get the corresponding response class."""
raise AbstractError(self.__class__)
def setTransaction(self, trans):
"""Set a transaction container."""
self._transaction = trans
def transaction(self):
"""Get the transaction container."""
return self._transaction
# endregion Transactions
# region Cleanup
def clearTransaction(self):
del self._transaction
# endregion Cleanup
# region Exception reports
_exceptionReportAttrNames = []
def writeExceptionReport(self, handler):
handler.writeTitle(self.__class__.__name__)
handler.writeAttrs(self, self._exceptionReportAttrNames)
# endregion Exception reports
| 3.46875 | 3 |
tests/plantcv/visualize/test_colorize_label_img.py | ygarrot/plantcv | 1 | 12768547 | import numpy as np
from plantcv.plantcv.visualize import colorize_label_img
def test_colorize_label_img():
"""Test for PlantCV."""
label_img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
colored_img = colorize_label_img(label_img)
assert (colored_img.shape[0:-1] == label_img.shape) and colored_img.shape[-1] == 3
| 2.9375 | 3 |
setup.py | stakelink/substrate-utils | 0 | 12768548 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = 'substrate-utils'
VERSION = '0.1'
DESCRIPTION = ''
URL = 'https://github.com/stakelink/substrate-utils'
EMAIL = '<EMAIL>'
AUTHOR = 'STAKELINK'
REQUIRES_PYTHON = '>=3.6.0'
LICENSE = 'MIT'
REQUIRED = [
'substrate-interface>=0.13',
'cachetools'
]
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r", encoding="utf-8") as fh:
LONG_DESCRIPTION = fh.read()
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=['substrateutils'],
entry_points={},
install_requires=REQUIRED,
license=LICENSE,
project_urls={
'Bug Reports': 'https://github.com/stakelink/substrate-utils/issues',
'Source': 'https://github.com/stakelink/substrate-utils',
},
)
| 1.585938 | 2 |
geomagio/adjusted/transform/ZRotationHScale.py | usgs/geomag-algorithms | 49 | 12768549 | import numpy as np
from typing import List, Optional, Tuple
from .LeastSq import LeastSq
class ZRotationHscale(LeastSq):
"""Calculates affine using least squares, constrained to rotate about the Z axis
and apply uniform horizontal scaling."""
def get_matrix(
self,
matrix: List[List[float]],
absolutes: Optional[Tuple[List[float], List[float], List[float]]] = None,
ordinates: Optional[Tuple[List[float], List[float], List[float]]] = None,
weights: Optional[List[float]] = None,
) -> np.array:
return [
[matrix[0], matrix[1], 0.0, matrix[2]],
[-matrix[1], matrix[0], 0.0, matrix[3]],
[0.0, 0.0, matrix[4], matrix[5]],
[0.0, 0.0, 0.0, 1.0],
]
def get_stacked_ordinates(
self, ordinates: Tuple[List[float], List[float], List[float]]
) -> List[List[float]]:
# (reduces degrees of freedom by 10:
# - 2 for making x,y independent of z;
# - 2 for making z independent of x,y
# - 2 for not allowing shear in x,y; and
# - 4 for the last row of zeros and a one)
ord_stacked = np.zeros((6, len(ordinates[0]) * 3))
ord_stacked[0, 0::3] = ordinates[0]
ord_stacked[0, 1::3] = ordinates[1]
ord_stacked[1, 0::3] = ordinates[1]
ord_stacked[1, 1::3] = -ordinates[0]
ord_stacked[2, 0::3] = 1.0
ord_stacked[3, 1::3] = 1.0
ord_stacked[4, 2::3] = ordinates[2]
ord_stacked[5, 2::3] = 1.0
return ord_stacked
| 3.078125 | 3 |
src/scipp/core/shape.py | mlund/scipp | 0 | 12768550 | <gh_stars>0
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author <NAME>
# flake8: noqa: E501
import numpy as np
from typing import Dict, List, Optional, Sequence, Tuple, Union
from .._scipp import core as _cpp
from ._cpp_wrapper_util import call_func as _call_cpp_func
from ..typing import VariableLike
def broadcast(x: _cpp.Variable, dims: Union[List[str], Tuple[str, ...]],
shape: Sequence[int]) -> _cpp.Variable:
"""Broadcast a variable.
Note that scipp operations broadcast automatically, so using this function
directly is rarely required.
:param x: Variable to broadcast.
:param dims: List of new dimensions.
:param shape: New extents in each dimension.
:return: New variable with requested dimension labels and shape.
"""
return _call_cpp_func(_cpp.broadcast, x, dims, shape)
def concat(x: Sequence[VariableLike], dim: str) -> VariableLike:
"""Concatenate input arrays along the given dimension.
Concatenation can happen in two ways:
- Along an existing dimension, yielding a new dimension extent
given by the sum of the input's extents.
- Along a new dimension that is not contained in either of the inputs,
yielding an output with one extra dimensions.
In the case of a data array or dataset, the coords and masks are also
concatenated.
Coords and masks for any but the given dimension are required to match
and are copied to the output without changes.
:param x: Sequence of input variables, data arraus, or datasets.
:param dim: Dimension along which to concatenate.
:raises: If the dtype or unit does not match, or if the
dimensions and shapes are incompatible.
:return: Concatenation of the inputs.
Examples:
>>> a = sc.arange('x', 3)
>>> b = 100 * sc.arange('x', 3)
>>> c = sc.concat([a, b], dim='x')
>>> c
<scipp.Variable> (x: 6) int64 [dimensionless] [0, 1, ..., 100, 200]
>>> c.values
array([ 0, 1, 2, 0, 100, 200])
>>> d = sc.concat([a, b], dim='y')
>>> d
<scipp.Variable> (y: 2, x: 3) int64 [dimensionless] [0, 1, ..., 100, 200]
>>> d.values
array([[ 0, 1, 2],
[ 0, 100, 200]])
>>> x = sc.DataArray(sc.arange('x', 3), coords={'x': sc.arange('x', 3)})
>>> y = sc.DataArray(100 * sc.arange('x', 3), coords={'x': 100 * sc.arange('x', 3)})
>>> z = sc.concat([x, y], dim='x')
>>> z
<scipp.DataArray>
Dimensions: Sizes[x:6, ]
Coordinates:
x int64 [dimensionless] (x) [0, 1, ..., 100, 200]
Data:
int64 [dimensionless] (x) [0, 1, ..., 100, 200]
>>> z.values
array([ 0, 1, 2, 0, 100, 200])
"""
return _call_cpp_func(_cpp.concat, x, dim)
def fold(x: VariableLike,
dim: str,
sizes: Optional[Dict[str, int]] = None,
dims: Optional[Union[List[str], Tuple[str, ...]]] = None,
shape: Optional[Sequence[int]] = None) -> VariableLike:
"""Fold a single dimension of a variable or data array into multiple dims.
:param x: Variable or DataArray to fold.
:param dim: A single dim label that will be folded into more dims.
:param sizes: A dict mapping new dims to new shapes.
:param dims: A list of new dims labels.
:param shape: A list of new dim shapes.
:raises: If the volume of the old shape is not equal to the
volume of the new shape.
:return: Variable or DataArray with requested dimension labels and shape.
Examples:
>>> v = sc.arange('x', 6)
>>> v
<scipp.Variable> (x: 6) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> sc.fold(v, dim='x', sizes={'y': 2, 'z': 3})
<scipp.Variable> (y: 2, z: 3) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> sc.fold(v, dim='x', sizes={'y': 2, 'z': 3}).values
array([[0, 1, 2],
[3, 4, 5]])
>>> sc.fold(v, dim='x', dims=['y', 'z'], shape=[2, 3])
<scipp.Variable> (y: 2, z: 3) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> sc.fold(v, dim='x', sizes={'y': 2, 'z': -1})
<scipp.Variable> (y: 2, z: 3) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> a = sc.DataArray(0.1 * sc.arange('x', 6), coords={'x': sc.arange('x', 6)})
>>> sc.fold(a, dim='x', sizes={'y': 2, 'z': 3})
<scipp.DataArray>
Dimensions: Sizes[y:2, z:3, ]
Coordinates:
x int64 [dimensionless] (y, z) [0, 1, ..., 4, 5]
Data:
float64 [dimensionless] (y, z) [0, 0.1, ..., 0.4, 0.5]
>>> sc.fold(a, dim='x', sizes={'y': 2, 'z': 3}).data.values
array([[0. , 0.1, 0.2],
[0.3, 0.4, 0.5]])
>>> sc.fold(a, dim='x', sizes={'y': 2, 'z': 3}).coords['x'].values
array([[0, 1, 2],
[3, 4, 5]])
"""
if sizes is not None:
if (dims is not None) or (shape is not None):
raise RuntimeError(
"If sizes is defined, dims and shape must be None in fold.")
sizes = sizes.copy()
else:
if (dims is None) or (shape is None):
raise RuntimeError("Both dims and shape must be defined in fold.")
sizes = dict(zip(dims, shape))
# Handle potential size of -1.
# Note that we implement this here on the Python layer, because one cannot create
# a C++ Dimensions object with negative sizes.
new_shape = list(sizes.values())
minus_one_count = new_shape.count(-1)
if minus_one_count > 1:
raise _cpp.DimensionError(
"Can only have a single -1 in the new requested shape.")
if minus_one_count == 1:
ind = new_shape.index(-1)
del new_shape[ind]
new_volume = np.prod(new_shape)
dim_size = x.sizes[dim] // new_volume
if x.sizes[dim] % new_volume != 0:
raise ValueError("-1 in new shape was computed to be {}, but the original "
"shape {} cannot be divided by {}.".format(
dim_size, x.sizes[dim], dim_size))
sizes[list(sizes.keys())[ind]] = dim_size
return _call_cpp_func(_cpp.fold, x, dim, sizes)
def flatten(x: VariableLike,
dims: Optional[Union[List[str], Tuple[str, ...]]] = None,
to: Optional[str] = None) -> VariableLike:
"""Flatten multiple dimensions of a variable or data array into a single
dimension. If dims is omitted, then we flatten all of the inputs dimensions
into a single dim.
:param x: Variable or DataArray to flatten.
:param dims: A list of dim labels that will be flattened.
:param to: A single dim label for the resulting flattened dim.
:raises: If the bin edge coordinates cannot be stitched back together.
:return: Variable or DataArray with requested dimension labels and shape.
Examples:
>>> v = sc.array(dims=['x', 'y'], values=np.arange(6).reshape(2, 3))
>>> v
<scipp.Variable> (x: 2, y: 3) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> sc.flatten(v, to='u')
<scipp.Variable> (u: 6) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> sc.flatten(v, dims=['x', 'y'], to='u')
<scipp.Variable> (u: 6) int64 [dimensionless] [0, 1, ..., 4, 5]
>>> v = sc.array(dims=['x', 'y', 'z'], values=np.arange(24).reshape(2, 3, 4))
>>> v
<scipp.Variable> (x: 2, y: 3, z: 4) int64 [dimensionless] [0, 1, ..., 22, 23]
>>> sc.flatten(v, to='u')
<scipp.Variable> (u: 24) int64 [dimensionless] [0, 1, ..., 22, 23]
>>> sc.flatten(v, dims=['x', 'y'], to='u')
<scipp.Variable> (u: 6, z: 4) int64 [dimensionless] [0, 1, ..., 22, 23]
>>> sc.flatten(v, dims=['y', 'z'], to='u')
<scipp.Variable> (x: 2, u: 12) int64 [dimensionless] [0, 1, ..., 22, 23]
>>> a = sc.DataArray(0.1 * sc.array(dims=['x', 'y'], values=np.arange(6).reshape(2, 3)),
... coords={'x': sc.arange('x', 2),
... 'y': sc.arange('y', 3),
... 'xy': sc.array(dims=['x', 'y'],
... values=np.arange(6).reshape(2, 3))})
>>> a
<scipp.DataArray>
Dimensions: Sizes[x:2, y:3, ]
Coordinates:
x int64 [dimensionless] (x) [0, 1]
xy int64 [dimensionless] (x, y) [0, 1, ..., 4, 5]
y int64 [dimensionless] (y) [0, 1, 2]
Data:
float64 [dimensionless] (x, y) [0, 0.1, ..., 0.4, 0.5]
>>> sc.flatten(a, to='u')
<scipp.DataArray>
Dimensions: Sizes[u:6, ]
Coordinates:
x int64 [dimensionless] (u) [0, 0, ..., 1, 1]
xy int64 [dimensionless] (u) [0, 1, ..., 4, 5]
y int64 [dimensionless] (u) [0, 1, ..., 1, 2]
Data:
float64 [dimensionless] (u) [0, 0.1, ..., 0.4, 0.5]
"""
if to is None:
# Note that this is a result of the fact that we want to support
# calling flatten without kwargs, and that in this case it semantically
# makes more sense for the dims that we want to flatten to come first
# in the argument list.
raise RuntimeError("The final flattened dimension is required.")
if dims is None:
dims = x.dims
return _call_cpp_func(_cpp.flatten, x, dims, to)
def transpose(x: VariableLike,
dims: Optional[Union[List[str], Tuple[str, ...]]] = None) -> VariableLike:
"""Transpose dimensions of a variable, a data array, or a dataset.
:param x: Object to transpose.
:param dims: List of dimensions in desired order. If default,
reverses existing order.
:raises: If the dtype or unit does not match, or if the
dimensions and shapes are incompatible.
:return: The absolute values of the input.
"""
return _call_cpp_func(_cpp.transpose, x, dims if dims is not None else [])
def squeeze(
x: VariableLike,
dim: Optional[Union[str, List[str], Tuple[str, ...]]] = None) -> VariableLike:
"""Remove dimensions of length 1.
This is equivalent to indexing the squeezed dimensions with index 0, that is
``squeeze(x, ['x', 'y'])`` is equivalent to ``x['x', 0]['y', 0]``.
:param x: Object to remove dimensions from.
:param dim: If given, the dimension(s) to squeeze.
If ``None``, all length-1 dimensions are squeezed.
:raises: If a dimension in `dim` does not have length 1.
:return: `x` with dimensions squeezed out.
:seealso: :py:func:`scipp.Variable.squeeze`
:py:func:`scipp.DataArray.squeeze`
:py:func:`scipp.Dataset.squeeze`
:py:func:`numpy.squeeze`
Examples:
>>> v = sc.arange('a', 3).fold('a', {'x': 1, 'y': 3, 'z': 1})
>>> v
<scipp.Variable> (x: 1, y: 3, z: 1) int64 [dimensionless] [0, 1, 2]
>>> sc.squeeze(v)
<scipp.Variable> (y: 3) int64 [dimensionless] [0, 1, 2]
>>> sc.squeeze(v, 'z')
<scipp.Variable> (x: 1, y: 3) int64 [dimensionless] [0, 1, 2]
>>> sc.squeeze(v, ['x', 'z'])
<scipp.Variable> (y: 3) int64 [dimensionless] [0, 1, 2]
Coordinates for squeezed dimensions are turned into attributes:
>>> da = sc.DataArray(v, coords={'x': sc.arange('x', 1),
... 'y': sc.arange('y', 3)})
>>> da
<scipp.DataArray>
Dimensions: Sizes[x:1, y:3, z:1, ]
Coordinates:
x int64 [dimensionless] (x) [0]
y int64 [dimensionless] (y) [0, 1, 2]
Data:
int64 [dimensionless] (x, y, z) [0, 1, 2]
>>> sc.squeeze(da)
<scipp.DataArray>
Dimensions: Sizes[y:3, ]
Coordinates:
y int64 [dimensionless] (y) [0, 1, 2]
Data:
int64 [dimensionless] (y) [0, 1, 2]
Attributes:
x int64 [dimensionless] () [0]
"""
return _call_cpp_func(_cpp.squeeze, x, (dim, ) if isinstance(dim, str) else dim)
| 2.796875 | 3 |